@ai-sdk/openai 2.0.0-alpha.9 → 2.0.0-beta.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -17,7 +17,7 @@ import {
17
17
  parseProviderOptions,
18
18
  postJsonToApi
19
19
  } from "@ai-sdk/provider-utils";
20
- import { z as z3 } from "zod";
20
+ import { z as z5 } from "zod/v4";
21
21
 
22
22
  // src/convert-to-openai-chat-messages.ts
23
23
  import {
@@ -153,7 +153,7 @@ function convertToOpenAIChatMessages({
153
153
  type: "function",
154
154
  function: {
155
155
  name: part.toolName,
156
- arguments: JSON.stringify(part.args)
156
+ arguments: JSON.stringify(part.input)
157
157
  }
158
158
  });
159
159
  break;
@@ -169,10 +169,23 @@ function convertToOpenAIChatMessages({
169
169
  }
170
170
  case "tool": {
171
171
  for (const toolResponse of content) {
172
+ const output = toolResponse.output;
173
+ let contentValue;
174
+ switch (output.type) {
175
+ case "text":
176
+ case "error-text":
177
+ contentValue = output.value;
178
+ break;
179
+ case "content":
180
+ case "json":
181
+ case "error-json":
182
+ contentValue = JSON.stringify(output.value);
183
+ break;
184
+ }
172
185
  messages.push({
173
186
  role: "tool",
174
187
  tool_call_id: toolResponse.toolCallId,
175
- content: JSON.stringify(toolResponse.result)
188
+ content: contentValue
176
189
  });
177
190
  }
178
191
  break;
@@ -217,7 +230,7 @@ function mapOpenAIFinishReason(finishReason) {
217
230
  }
218
231
 
219
232
  // src/openai-chat-options.ts
220
- import { z } from "zod";
233
+ import { z } from "zod/v4";
221
234
  var openaiProviderOptions = z.object({
222
235
  /**
223
236
  * Modify the likelihood of specified tokens appearing in the completion.
@@ -260,21 +273,34 @@ var openaiProviderOptions = z.object({
260
273
  /**
261
274
  * Metadata to associate with the request.
262
275
  */
263
- metadata: z.record(z.string()).optional(),
276
+ metadata: z.record(z.string().max(64), z.string().max(512)).optional(),
264
277
  /**
265
278
  * Parameters for prediction mode.
266
279
  */
267
- prediction: z.record(z.any()).optional(),
280
+ prediction: z.record(z.string(), z.any()).optional(),
268
281
  /**
269
282
  * Whether to use structured outputs.
270
283
  *
271
284
  * @default true
272
285
  */
273
- structuredOutputs: z.boolean().optional()
286
+ structuredOutputs: z.boolean().optional(),
287
+ /**
288
+ * Service tier for the request. Set to 'flex' for 50% cheaper processing
289
+ * at the cost of increased latency. Only available for o3 and o4-mini models.
290
+ *
291
+ * @default 'auto'
292
+ */
293
+ serviceTier: z.enum(["auto", "flex"]).optional(),
294
+ /**
295
+ * Whether to use strict JSON schema validation.
296
+ *
297
+ * @default false
298
+ */
299
+ strictJsonSchema: z.boolean().optional()
274
300
  });
275
301
 
276
302
  // src/openai-error.ts
277
- import { z as z2 } from "zod";
303
+ import { z as z2 } from "zod/v4";
278
304
  import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
279
305
  var openaiErrorDataSchema = z2.object({
280
306
  error: z2.object({
@@ -296,10 +322,81 @@ var openaiFailedResponseHandler = createJsonErrorResponseHandler({
296
322
  import {
297
323
  UnsupportedFunctionalityError as UnsupportedFunctionalityError2
298
324
  } from "@ai-sdk/provider";
325
+
326
+ // src/tool/file-search.ts
327
+ import { createProviderDefinedToolFactory } from "@ai-sdk/provider-utils";
328
+ import { z as z3 } from "zod/v4";
329
+ var fileSearchArgsSchema = z3.object({
330
+ /**
331
+ * List of vector store IDs to search through. If not provided, searches all available vector stores.
332
+ */
333
+ vectorStoreIds: z3.array(z3.string()).optional(),
334
+ /**
335
+ * Maximum number of search results to return. Defaults to 10.
336
+ */
337
+ maxResults: z3.number().optional(),
338
+ /**
339
+ * Type of search to perform. Defaults to 'auto'.
340
+ */
341
+ searchType: z3.enum(["auto", "keyword", "semantic"]).optional()
342
+ });
343
+ var fileSearch = createProviderDefinedToolFactory({
344
+ id: "openai.file_search",
345
+ name: "file_search",
346
+ inputSchema: z3.object({
347
+ query: z3.string()
348
+ })
349
+ });
350
+
351
+ // src/tool/web-search-preview.ts
352
+ import { createProviderDefinedToolFactory as createProviderDefinedToolFactory2 } from "@ai-sdk/provider-utils";
353
+ import { z as z4 } from "zod/v4";
354
+ var webSearchPreviewArgsSchema = z4.object({
355
+ /**
356
+ * Search context size to use for the web search.
357
+ * - high: Most comprehensive context, highest cost, slower response
358
+ * - medium: Balanced context, cost, and latency (default)
359
+ * - low: Least context, lowest cost, fastest response
360
+ */
361
+ searchContextSize: z4.enum(["low", "medium", "high"]).optional(),
362
+ /**
363
+ * User location information to provide geographically relevant search results.
364
+ */
365
+ userLocation: z4.object({
366
+ /**
367
+ * Type of location (always 'approximate')
368
+ */
369
+ type: z4.literal("approximate"),
370
+ /**
371
+ * Two-letter ISO country code (e.g., 'US', 'GB')
372
+ */
373
+ country: z4.string().optional(),
374
+ /**
375
+ * City name (free text, e.g., 'Minneapolis')
376
+ */
377
+ city: z4.string().optional(),
378
+ /**
379
+ * Region name (free text, e.g., 'Minnesota')
380
+ */
381
+ region: z4.string().optional(),
382
+ /**
383
+ * IANA timezone (e.g., 'America/Chicago')
384
+ */
385
+ timezone: z4.string().optional()
386
+ }).optional()
387
+ });
388
+ var webSearchPreview = createProviderDefinedToolFactory2({
389
+ id: "openai.web_search_preview",
390
+ name: "web_search_preview",
391
+ inputSchema: z4.object({})
392
+ });
393
+
394
+ // src/openai-prepare-tools.ts
299
395
  function prepareTools({
300
396
  tools,
301
397
  toolChoice,
302
- structuredOutputs
398
+ structuredOutputs,
399
+ strictJsonSchema
303
400
  }) {
304
401
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
305
402
  const toolWarnings = [];
@@ -308,18 +405,47 @@ function prepareTools({
308
405
  }
309
406
  const openaiTools2 = [];
310
407
  for (const tool of tools) {
311
- if (tool.type === "provider-defined") {
312
- toolWarnings.push({ type: "unsupported-tool", tool });
313
- } else {
314
- openaiTools2.push({
315
- type: "function",
316
- function: {
317
- name: tool.name,
318
- description: tool.description,
319
- parameters: tool.parameters,
320
- strict: structuredOutputs ? true : void 0
408
+ switch (tool.type) {
409
+ case "function":
410
+ openaiTools2.push({
411
+ type: "function",
412
+ function: {
413
+ name: tool.name,
414
+ description: tool.description,
415
+ parameters: tool.inputSchema,
416
+ strict: structuredOutputs ? strictJsonSchema : void 0
417
+ }
418
+ });
419
+ break;
420
+ case "provider-defined":
421
+ switch (tool.id) {
422
+ case "openai.file_search": {
423
+ const args = fileSearchArgsSchema.parse(tool.args);
424
+ openaiTools2.push({
425
+ type: "file_search",
426
+ vector_store_ids: args.vectorStoreIds,
427
+ max_results: args.maxResults,
428
+ search_type: args.searchType
429
+ });
430
+ break;
431
+ }
432
+ case "openai.web_search_preview": {
433
+ const args = webSearchPreviewArgsSchema.parse(tool.args);
434
+ openaiTools2.push({
435
+ type: "web_search_preview",
436
+ search_context_size: args.searchContextSize,
437
+ user_location: args.userLocation
438
+ });
439
+ break;
440
+ }
441
+ default:
442
+ toolWarnings.push({ type: "unsupported-tool", tool });
443
+ break;
321
444
  }
322
- });
445
+ break;
446
+ default:
447
+ toolWarnings.push({ type: "unsupported-tool", tool });
448
+ break;
323
449
  }
324
450
  }
325
451
  if (toolChoice == null) {
@@ -379,7 +505,7 @@ var OpenAIChatLanguageModel = class {
379
505
  toolChoice,
380
506
  providerOptions
381
507
  }) {
382
- var _a, _b, _c;
508
+ var _a, _b, _c, _d;
383
509
  const warnings = [];
384
510
  const openaiOptions = (_a = await parseProviderOptions({
385
511
  provider: "openai",
@@ -407,6 +533,7 @@ var OpenAIChatLanguageModel = class {
407
533
  }
408
534
  );
409
535
  warnings.push(...messageWarnings);
536
+ const strictJsonSchema = (_c = openaiOptions.strictJsonSchema) != null ? _c : false;
410
537
  const baseArgs = {
411
538
  // model id:
412
539
  model: this.modelId,
@@ -422,18 +549,15 @@ var OpenAIChatLanguageModel = class {
422
549
  top_p: topP,
423
550
  frequency_penalty: frequencyPenalty,
424
551
  presence_penalty: presencePenalty,
425
- response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? (
426
- // TODO convert into provider option
427
- structuredOutputs && responseFormat.schema != null ? {
428
- type: "json_schema",
429
- json_schema: {
430
- schema: responseFormat.schema,
431
- strict: true,
432
- name: (_c = responseFormat.name) != null ? _c : "response",
433
- description: responseFormat.description
434
- }
435
- } : { type: "json_object" }
436
- ) : void 0,
552
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? structuredOutputs && responseFormat.schema != null ? {
553
+ type: "json_schema",
554
+ json_schema: {
555
+ schema: responseFormat.schema,
556
+ strict: strictJsonSchema,
557
+ name: (_d = responseFormat.name) != null ? _d : "response",
558
+ description: responseFormat.description
559
+ }
560
+ } : { type: "json_object" } : void 0,
437
561
  stop: stopSequences,
438
562
  seed,
439
563
  // openai specific settings:
@@ -443,6 +567,7 @@ var OpenAIChatLanguageModel = class {
443
567
  metadata: openaiOptions.metadata,
444
568
  prediction: openaiOptions.prediction,
445
569
  reasoning_effort: openaiOptions.reasoningEffort,
570
+ service_tier: openaiOptions.serviceTier,
446
571
  // messages:
447
572
  messages
448
573
  };
@@ -516,6 +641,14 @@ var OpenAIChatLanguageModel = class {
516
641
  });
517
642
  }
518
643
  }
644
+ if (openaiOptions.serviceTier === "flex" && !supportsFlexProcessing(this.modelId)) {
645
+ warnings.push({
646
+ type: "unsupported-setting",
647
+ setting: "serviceTier",
648
+ details: "flex processing is only available for o3 and o4-mini models"
649
+ });
650
+ baseArgs.service_tier = void 0;
651
+ }
519
652
  const {
520
653
  tools: openaiTools2,
521
654
  toolChoice: openaiToolChoice,
@@ -523,7 +656,8 @@ var OpenAIChatLanguageModel = class {
523
656
  } = prepareTools({
524
657
  tools,
525
658
  toolChoice,
526
- structuredOutputs
659
+ structuredOutputs,
660
+ strictJsonSchema
527
661
  });
528
662
  return {
529
663
  args: {
@@ -564,10 +698,9 @@ var OpenAIChatLanguageModel = class {
564
698
  for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
565
699
  content.push({
566
700
  type: "tool-call",
567
- toolCallType: "function",
568
701
  toolCallId: (_b = toolCall.id) != null ? _b : generateId(),
569
702
  toolName: toolCall.function.name,
570
- args: toolCall.function.arguments
703
+ input: toolCall.function.arguments
571
704
  });
572
705
  }
573
706
  const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
@@ -633,6 +766,7 @@ var OpenAIChatLanguageModel = class {
633
766
  totalTokens: void 0
634
767
  };
635
768
  let isFirstChunk = true;
769
+ let isActiveText = false;
636
770
  const providerMetadata = { openai: {} };
637
771
  return {
638
772
  stream: response.pipeThrough(
@@ -642,6 +776,9 @@ var OpenAIChatLanguageModel = class {
642
776
  },
643
777
  transform(chunk, controller) {
644
778
  var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
779
+ if (options.includeRawChunks) {
780
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
781
+ }
645
782
  if (!chunk.success) {
646
783
  finishReason = "error";
647
784
  controller.enqueue({ type: "error", error: chunk.error });
@@ -685,9 +822,14 @@ var OpenAIChatLanguageModel = class {
685
822
  }
686
823
  const delta = choice.delta;
687
824
  if (delta.content != null) {
825
+ if (!isActiveText) {
826
+ controller.enqueue({ type: "text-start", id: "0" });
827
+ isActiveText = true;
828
+ }
688
829
  controller.enqueue({
689
- type: "text",
690
- text: delta.content
830
+ type: "text-delta",
831
+ id: "0",
832
+ delta: delta.content
691
833
  });
692
834
  }
693
835
  if (delta.tool_calls != null) {
@@ -712,6 +854,11 @@ var OpenAIChatLanguageModel = class {
712
854
  message: `Expected 'function.name' to be a string.`
713
855
  });
714
856
  }
857
+ controller.enqueue({
858
+ type: "tool-input-start",
859
+ id: toolCallDelta.id,
860
+ toolName: toolCallDelta.function.name
861
+ });
715
862
  toolCalls[index] = {
716
863
  id: toolCallDelta.id,
717
864
  type: "function",
@@ -725,20 +872,21 @@ var OpenAIChatLanguageModel = class {
725
872
  if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
726
873
  if (toolCall2.function.arguments.length > 0) {
727
874
  controller.enqueue({
728
- type: "tool-call-delta",
729
- toolCallType: "function",
730
- toolCallId: toolCall2.id,
731
- toolName: toolCall2.function.name,
732
- argsTextDelta: toolCall2.function.arguments
875
+ type: "tool-input-delta",
876
+ id: toolCall2.id,
877
+ delta: toolCall2.function.arguments
733
878
  });
734
879
  }
735
880
  if (isParsableJson(toolCall2.function.arguments)) {
881
+ controller.enqueue({
882
+ type: "tool-input-end",
883
+ id: toolCall2.id
884
+ });
736
885
  controller.enqueue({
737
886
  type: "tool-call",
738
- toolCallType: "function",
739
887
  toolCallId: (_q = toolCall2.id) != null ? _q : generateId(),
740
888
  toolName: toolCall2.function.name,
741
- args: toolCall2.function.arguments
889
+ input: toolCall2.function.arguments
742
890
  });
743
891
  toolCall2.hasFinished = true;
744
892
  }
@@ -753,19 +901,20 @@ var OpenAIChatLanguageModel = class {
753
901
  toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
754
902
  }
755
903
  controller.enqueue({
756
- type: "tool-call-delta",
757
- toolCallType: "function",
758
- toolCallId: toolCall.id,
759
- toolName: toolCall.function.name,
760
- argsTextDelta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
904
+ type: "tool-input-delta",
905
+ id: toolCall.id,
906
+ delta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
761
907
  });
762
908
  if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && isParsableJson(toolCall.function.arguments)) {
909
+ controller.enqueue({
910
+ type: "tool-input-end",
911
+ id: toolCall.id
912
+ });
763
913
  controller.enqueue({
764
914
  type: "tool-call",
765
- toolCallType: "function",
766
915
  toolCallId: (_x = toolCall.id) != null ? _x : generateId(),
767
916
  toolName: toolCall.function.name,
768
- args: toolCall.function.arguments
917
+ input: toolCall.function.arguments
769
918
  });
770
919
  toolCall.hasFinished = true;
771
920
  }
@@ -773,6 +922,9 @@ var OpenAIChatLanguageModel = class {
773
922
  }
774
923
  },
775
924
  flush(controller) {
925
+ if (isActiveText) {
926
+ controller.enqueue({ type: "text-end", id: "0" });
927
+ }
776
928
  controller.enqueue({
777
929
  type: "finish",
778
930
  finishReason,
@@ -787,97 +939,97 @@ var OpenAIChatLanguageModel = class {
787
939
  };
788
940
  }
789
941
  };
790
- var openaiTokenUsageSchema = z3.object({
791
- prompt_tokens: z3.number().nullish(),
792
- completion_tokens: z3.number().nullish(),
793
- total_tokens: z3.number().nullish(),
794
- prompt_tokens_details: z3.object({
795
- cached_tokens: z3.number().nullish()
942
+ var openaiTokenUsageSchema = z5.object({
943
+ prompt_tokens: z5.number().nullish(),
944
+ completion_tokens: z5.number().nullish(),
945
+ total_tokens: z5.number().nullish(),
946
+ prompt_tokens_details: z5.object({
947
+ cached_tokens: z5.number().nullish()
796
948
  }).nullish(),
797
- completion_tokens_details: z3.object({
798
- reasoning_tokens: z3.number().nullish(),
799
- accepted_prediction_tokens: z3.number().nullish(),
800
- rejected_prediction_tokens: z3.number().nullish()
949
+ completion_tokens_details: z5.object({
950
+ reasoning_tokens: z5.number().nullish(),
951
+ accepted_prediction_tokens: z5.number().nullish(),
952
+ rejected_prediction_tokens: z5.number().nullish()
801
953
  }).nullish()
802
954
  }).nullish();
803
- var openaiChatResponseSchema = z3.object({
804
- id: z3.string().nullish(),
805
- created: z3.number().nullish(),
806
- model: z3.string().nullish(),
807
- choices: z3.array(
808
- z3.object({
809
- message: z3.object({
810
- role: z3.literal("assistant").nullish(),
811
- content: z3.string().nullish(),
812
- tool_calls: z3.array(
813
- z3.object({
814
- id: z3.string().nullish(),
815
- type: z3.literal("function"),
816
- function: z3.object({
817
- name: z3.string(),
818
- arguments: z3.string()
955
+ var openaiChatResponseSchema = z5.object({
956
+ id: z5.string().nullish(),
957
+ created: z5.number().nullish(),
958
+ model: z5.string().nullish(),
959
+ choices: z5.array(
960
+ z5.object({
961
+ message: z5.object({
962
+ role: z5.literal("assistant").nullish(),
963
+ content: z5.string().nullish(),
964
+ tool_calls: z5.array(
965
+ z5.object({
966
+ id: z5.string().nullish(),
967
+ type: z5.literal("function"),
968
+ function: z5.object({
969
+ name: z5.string(),
970
+ arguments: z5.string()
819
971
  })
820
972
  })
821
973
  ).nullish()
822
974
  }),
823
- index: z3.number(),
824
- logprobs: z3.object({
825
- content: z3.array(
826
- z3.object({
827
- token: z3.string(),
828
- logprob: z3.number(),
829
- top_logprobs: z3.array(
830
- z3.object({
831
- token: z3.string(),
832
- logprob: z3.number()
975
+ index: z5.number(),
976
+ logprobs: z5.object({
977
+ content: z5.array(
978
+ z5.object({
979
+ token: z5.string(),
980
+ logprob: z5.number(),
981
+ top_logprobs: z5.array(
982
+ z5.object({
983
+ token: z5.string(),
984
+ logprob: z5.number()
833
985
  })
834
986
  )
835
987
  })
836
988
  ).nullish()
837
989
  }).nullish(),
838
- finish_reason: z3.string().nullish()
990
+ finish_reason: z5.string().nullish()
839
991
  })
840
992
  ),
841
993
  usage: openaiTokenUsageSchema
842
994
  });
843
- var openaiChatChunkSchema = z3.union([
844
- z3.object({
845
- id: z3.string().nullish(),
846
- created: z3.number().nullish(),
847
- model: z3.string().nullish(),
848
- choices: z3.array(
849
- z3.object({
850
- delta: z3.object({
851
- role: z3.enum(["assistant"]).nullish(),
852
- content: z3.string().nullish(),
853
- tool_calls: z3.array(
854
- z3.object({
855
- index: z3.number(),
856
- id: z3.string().nullish(),
857
- type: z3.literal("function").nullish(),
858
- function: z3.object({
859
- name: z3.string().nullish(),
860
- arguments: z3.string().nullish()
995
+ var openaiChatChunkSchema = z5.union([
996
+ z5.object({
997
+ id: z5.string().nullish(),
998
+ created: z5.number().nullish(),
999
+ model: z5.string().nullish(),
1000
+ choices: z5.array(
1001
+ z5.object({
1002
+ delta: z5.object({
1003
+ role: z5.enum(["assistant"]).nullish(),
1004
+ content: z5.string().nullish(),
1005
+ tool_calls: z5.array(
1006
+ z5.object({
1007
+ index: z5.number(),
1008
+ id: z5.string().nullish(),
1009
+ type: z5.literal("function").nullish(),
1010
+ function: z5.object({
1011
+ name: z5.string().nullish(),
1012
+ arguments: z5.string().nullish()
861
1013
  })
862
1014
  })
863
1015
  ).nullish()
864
1016
  }).nullish(),
865
- logprobs: z3.object({
866
- content: z3.array(
867
- z3.object({
868
- token: z3.string(),
869
- logprob: z3.number(),
870
- top_logprobs: z3.array(
871
- z3.object({
872
- token: z3.string(),
873
- logprob: z3.number()
1017
+ logprobs: z5.object({
1018
+ content: z5.array(
1019
+ z5.object({
1020
+ token: z5.string(),
1021
+ logprob: z5.number(),
1022
+ top_logprobs: z5.array(
1023
+ z5.object({
1024
+ token: z5.string(),
1025
+ logprob: z5.number()
874
1026
  })
875
1027
  )
876
1028
  })
877
1029
  ).nullish()
878
1030
  }).nullish(),
879
- finish_reason: z3.string().nullish(),
880
- index: z3.number()
1031
+ finish_reason: z5.string().nullish(),
1032
+ index: z5.number()
881
1033
  })
882
1034
  ),
883
1035
  usage: openaiTokenUsageSchema
@@ -887,6 +1039,9 @@ var openaiChatChunkSchema = z3.union([
887
1039
  function isReasoningModel(modelId) {
888
1040
  return modelId.startsWith("o");
889
1041
  }
1042
+ function supportsFlexProcessing(modelId) {
1043
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
1044
+ }
890
1045
  function getSystemMessageMode(modelId) {
891
1046
  var _a, _b;
892
1047
  if (!isReasoningModel(modelId)) {
@@ -935,7 +1090,7 @@ import {
935
1090
  parseProviderOptions as parseProviderOptions2,
936
1091
  postJsonToApi as postJsonToApi2
937
1092
  } from "@ai-sdk/provider-utils";
938
- import { z as z5 } from "zod";
1093
+ import { z as z7 } from "zod/v4";
939
1094
 
940
1095
  // src/convert-to-openai-completion-prompt.ts
941
1096
  import {
@@ -1016,12 +1171,12 @@ ${user}:`]
1016
1171
  }
1017
1172
 
1018
1173
  // src/openai-completion-options.ts
1019
- import { z as z4 } from "zod";
1020
- var openaiCompletionProviderOptions = z4.object({
1174
+ import { z as z6 } from "zod/v4";
1175
+ var openaiCompletionProviderOptions = z6.object({
1021
1176
  /**
1022
1177
  Echo back the prompt in addition to the completion.
1023
1178
  */
1024
- echo: z4.boolean().optional(),
1179
+ echo: z6.boolean().optional(),
1025
1180
  /**
1026
1181
  Modify the likelihood of specified tokens appearing in the completion.
1027
1182
 
@@ -1036,16 +1191,16 @@ var openaiCompletionProviderOptions = z4.object({
1036
1191
  As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
1037
1192
  token from being generated.
1038
1193
  */
1039
- logitBias: z4.record(z4.string(), z4.number()).optional(),
1194
+ logitBias: z6.record(z6.string(), z6.number()).optional(),
1040
1195
  /**
1041
1196
  The suffix that comes after a completion of inserted text.
1042
1197
  */
1043
- suffix: z4.string().optional(),
1198
+ suffix: z6.string().optional(),
1044
1199
  /**
1045
1200
  A unique identifier representing your end-user, which can help OpenAI to
1046
1201
  monitor and detect abuse. Learn more.
1047
1202
  */
1048
- user: z4.string().optional(),
1203
+ user: z6.string().optional(),
1049
1204
  /**
1050
1205
  Return the log probabilities of the tokens. Including logprobs will increase
1051
1206
  the response size and can slow down response times. However, it can
@@ -1055,7 +1210,7 @@ var openaiCompletionProviderOptions = z4.object({
1055
1210
  Setting to a number will return the log probabilities of the top n
1056
1211
  tokens that were generated.
1057
1212
  */
1058
- logprobs: z4.union([z4.boolean(), z4.number()]).optional()
1213
+ logprobs: z6.union([z6.boolean(), z6.number()]).optional()
1059
1214
  });
1060
1215
 
1061
1216
  // src/openai-completion-language-model.ts
@@ -1227,6 +1382,9 @@ var OpenAICompletionLanguageModel = class {
1227
1382
  controller.enqueue({ type: "stream-start", warnings });
1228
1383
  },
1229
1384
  transform(chunk, controller) {
1385
+ if (options.includeRawChunks) {
1386
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
1387
+ }
1230
1388
  if (!chunk.success) {
1231
1389
  finishReason = "error";
1232
1390
  controller.enqueue({ type: "error", error: chunk.error });
@@ -1244,6 +1402,7 @@ var OpenAICompletionLanguageModel = class {
1244
1402
  type: "response-metadata",
1245
1403
  ...getResponseMetadata(value)
1246
1404
  });
1405
+ controller.enqueue({ type: "text-start", id: "0" });
1247
1406
  }
1248
1407
  if (value.usage != null) {
1249
1408
  usage.inputTokens = value.usage.prompt_tokens;
@@ -1257,14 +1416,18 @@ var OpenAICompletionLanguageModel = class {
1257
1416
  if ((choice == null ? void 0 : choice.logprobs) != null) {
1258
1417
  providerMetadata.openai.logprobs = choice.logprobs;
1259
1418
  }
1260
- if ((choice == null ? void 0 : choice.text) != null) {
1419
+ if ((choice == null ? void 0 : choice.text) != null && choice.text.length > 0) {
1261
1420
  controller.enqueue({
1262
- type: "text",
1263
- text: choice.text
1421
+ type: "text-delta",
1422
+ id: "0",
1423
+ delta: choice.text
1264
1424
  });
1265
1425
  }
1266
1426
  },
1267
1427
  flush(controller) {
1428
+ if (!isFirstChunk) {
1429
+ controller.enqueue({ type: "text-end", id: "0" });
1430
+ }
1268
1431
  controller.enqueue({
1269
1432
  type: "finish",
1270
1433
  finishReason,
@@ -1279,42 +1442,42 @@ var OpenAICompletionLanguageModel = class {
1279
1442
  };
1280
1443
  }
1281
1444
  };
1282
- var usageSchema = z5.object({
1283
- prompt_tokens: z5.number(),
1284
- completion_tokens: z5.number(),
1285
- total_tokens: z5.number()
1445
+ var usageSchema = z7.object({
1446
+ prompt_tokens: z7.number(),
1447
+ completion_tokens: z7.number(),
1448
+ total_tokens: z7.number()
1286
1449
  });
1287
- var openaiCompletionResponseSchema = z5.object({
1288
- id: z5.string().nullish(),
1289
- created: z5.number().nullish(),
1290
- model: z5.string().nullish(),
1291
- choices: z5.array(
1292
- z5.object({
1293
- text: z5.string(),
1294
- finish_reason: z5.string(),
1295
- logprobs: z5.object({
1296
- tokens: z5.array(z5.string()),
1297
- token_logprobs: z5.array(z5.number()),
1298
- top_logprobs: z5.array(z5.record(z5.string(), z5.number())).nullish()
1450
+ var openaiCompletionResponseSchema = z7.object({
1451
+ id: z7.string().nullish(),
1452
+ created: z7.number().nullish(),
1453
+ model: z7.string().nullish(),
1454
+ choices: z7.array(
1455
+ z7.object({
1456
+ text: z7.string(),
1457
+ finish_reason: z7.string(),
1458
+ logprobs: z7.object({
1459
+ tokens: z7.array(z7.string()),
1460
+ token_logprobs: z7.array(z7.number()),
1461
+ top_logprobs: z7.array(z7.record(z7.string(), z7.number())).nullish()
1299
1462
  }).nullish()
1300
1463
  })
1301
1464
  ),
1302
1465
  usage: usageSchema.nullish()
1303
1466
  });
1304
- var openaiCompletionChunkSchema = z5.union([
1305
- z5.object({
1306
- id: z5.string().nullish(),
1307
- created: z5.number().nullish(),
1308
- model: z5.string().nullish(),
1309
- choices: z5.array(
1310
- z5.object({
1311
- text: z5.string(),
1312
- finish_reason: z5.string().nullish(),
1313
- index: z5.number(),
1314
- logprobs: z5.object({
1315
- tokens: z5.array(z5.string()),
1316
- token_logprobs: z5.array(z5.number()),
1317
- top_logprobs: z5.array(z5.record(z5.string(), z5.number())).nullish()
1467
+ var openaiCompletionChunkSchema = z7.union([
1468
+ z7.object({
1469
+ id: z7.string().nullish(),
1470
+ created: z7.number().nullish(),
1471
+ model: z7.string().nullish(),
1472
+ choices: z7.array(
1473
+ z7.object({
1474
+ text: z7.string(),
1475
+ finish_reason: z7.string().nullish(),
1476
+ index: z7.number(),
1477
+ logprobs: z7.object({
1478
+ tokens: z7.array(z7.string()),
1479
+ token_logprobs: z7.array(z7.number()),
1480
+ top_logprobs: z7.array(z7.record(z7.string(), z7.number())).nullish()
1318
1481
  }).nullish()
1319
1482
  })
1320
1483
  ),
@@ -1333,21 +1496,21 @@ import {
1333
1496
  parseProviderOptions as parseProviderOptions3,
1334
1497
  postJsonToApi as postJsonToApi3
1335
1498
  } from "@ai-sdk/provider-utils";
1336
- import { z as z7 } from "zod";
1499
+ import { z as z9 } from "zod/v4";
1337
1500
 
1338
1501
  // src/openai-embedding-options.ts
1339
- import { z as z6 } from "zod";
1340
- var openaiEmbeddingProviderOptions = z6.object({
1502
+ import { z as z8 } from "zod/v4";
1503
+ var openaiEmbeddingProviderOptions = z8.object({
1341
1504
  /**
1342
1505
  The number of dimensions the resulting output embeddings should have.
1343
1506
  Only supported in text-embedding-3 and later models.
1344
1507
  */
1345
- dimensions: z6.number().optional(),
1508
+ dimensions: z8.number().optional(),
1346
1509
  /**
1347
1510
  A unique identifier representing your end-user, which can help OpenAI to
1348
1511
  monitor and detect abuse. Learn more.
1349
1512
  */
1350
- user: z6.string().optional()
1513
+ user: z8.string().optional()
1351
1514
  });
1352
1515
 
1353
1516
  // src/openai-embedding-model.ts
@@ -1413,9 +1576,9 @@ var OpenAIEmbeddingModel = class {
1413
1576
  };
1414
1577
  }
1415
1578
  };
1416
- var openaiTextEmbeddingResponseSchema = z7.object({
1417
- data: z7.array(z7.object({ embedding: z7.array(z7.number()) })),
1418
- usage: z7.object({ prompt_tokens: z7.number() }).nullish()
1579
+ var openaiTextEmbeddingResponseSchema = z9.object({
1580
+ data: z9.array(z9.object({ embedding: z9.array(z9.number()) })),
1581
+ usage: z9.object({ prompt_tokens: z9.number() }).nullish()
1419
1582
  });
1420
1583
 
1421
1584
  // src/openai-image-model.ts
@@ -1424,7 +1587,7 @@ import {
1424
1587
  createJsonResponseHandler as createJsonResponseHandler4,
1425
1588
  postJsonToApi as postJsonToApi4
1426
1589
  } from "@ai-sdk/provider-utils";
1427
- import { z as z8 } from "zod";
1590
+ import { z as z10 } from "zod/v4";
1428
1591
 
1429
1592
  // src/openai-image-settings.ts
1430
1593
  var modelMaxImagesPerCall = {
@@ -1512,31 +1675,16 @@ var OpenAIImageModel = class {
1512
1675
  };
1513
1676
  }
1514
1677
  };
1515
- var openaiImageResponseSchema = z8.object({
1516
- data: z8.array(
1517
- z8.object({ b64_json: z8.string(), revised_prompt: z8.string().optional() })
1678
+ var openaiImageResponseSchema = z10.object({
1679
+ data: z10.array(
1680
+ z10.object({ b64_json: z10.string(), revised_prompt: z10.string().optional() })
1518
1681
  )
1519
1682
  });
1520
1683
 
1521
1684
  // src/openai-tools.ts
1522
- import { z as z9 } from "zod";
1523
- var WebSearchPreviewParameters = z9.object({});
1524
- function webSearchPreviewTool({
1525
- searchContextSize,
1526
- userLocation
1527
- } = {}) {
1528
- return {
1529
- type: "provider-defined",
1530
- id: "openai.web_search_preview",
1531
- args: {
1532
- searchContextSize,
1533
- userLocation
1534
- },
1535
- parameters: WebSearchPreviewParameters
1536
- };
1537
- }
1538
1685
  var openaiTools = {
1539
- webSearchPreview: webSearchPreviewTool
1686
+ fileSearch,
1687
+ webSearchPreview
1540
1688
  };
1541
1689
 
1542
1690
  // src/openai-transcription-model.ts
@@ -1547,33 +1695,33 @@ import {
1547
1695
  parseProviderOptions as parseProviderOptions4,
1548
1696
  postFormDataToApi
1549
1697
  } from "@ai-sdk/provider-utils";
1550
- import { z as z11 } from "zod";
1698
+ import { z as z12 } from "zod/v4";
1551
1699
 
1552
1700
  // src/openai-transcription-options.ts
1553
- import { z as z10 } from "zod";
1554
- var openAITranscriptionProviderOptions = z10.object({
1701
+ import { z as z11 } from "zod/v4";
1702
+ var openAITranscriptionProviderOptions = z11.object({
1555
1703
  /**
1556
1704
  * Additional information to include in the transcription response.
1557
1705
  */
1558
- include: z10.array(z10.string()).optional(),
1706
+ include: z11.array(z11.string()).optional(),
1559
1707
  /**
1560
1708
  * The language of the input audio in ISO-639-1 format.
1561
1709
  */
1562
- language: z10.string().optional(),
1710
+ language: z11.string().optional(),
1563
1711
  /**
1564
1712
  * An optional text to guide the model's style or continue a previous audio segment.
1565
1713
  */
1566
- prompt: z10.string().optional(),
1714
+ prompt: z11.string().optional(),
1567
1715
  /**
1568
1716
  * The sampling temperature, between 0 and 1.
1569
1717
  * @default 0
1570
1718
  */
1571
- temperature: z10.number().min(0).max(1).default(0).optional(),
1719
+ temperature: z11.number().min(0).max(1).default(0).optional(),
1572
1720
  /**
1573
1721
  * The timestamp granularities to populate for this transcription.
1574
1722
  * @default ['segment']
1575
1723
  */
1576
- timestampGranularities: z10.array(z10.enum(["word", "segment"])).default(["segment"]).optional()
1724
+ timestampGranularities: z11.array(z11.enum(["word", "segment"])).default(["segment"]).optional()
1577
1725
  });
1578
1726
 
1579
1727
  // src/openai-transcription-model.ts
@@ -1640,7 +1788,7 @@ var OpenAITranscriptionModel = class {
1640
1788
  constructor(modelId, config) {
1641
1789
  this.modelId = modelId;
1642
1790
  this.config = config;
1643
- this.specificationVersion = "v1";
1791
+ this.specificationVersion = "v2";
1644
1792
  }
1645
1793
  get provider() {
1646
1794
  return this.config.provider;
@@ -1721,38 +1869,44 @@ var OpenAITranscriptionModel = class {
1721
1869
  };
1722
1870
  }
1723
1871
  };
1724
- var openaiTranscriptionResponseSchema = z11.object({
1725
- text: z11.string(),
1726
- language: z11.string().nullish(),
1727
- duration: z11.number().nullish(),
1728
- words: z11.array(
1729
- z11.object({
1730
- word: z11.string(),
1731
- start: z11.number(),
1732
- end: z11.number()
1872
+ var openaiTranscriptionResponseSchema = z12.object({
1873
+ text: z12.string(),
1874
+ language: z12.string().nullish(),
1875
+ duration: z12.number().nullish(),
1876
+ words: z12.array(
1877
+ z12.object({
1878
+ word: z12.string(),
1879
+ start: z12.number(),
1880
+ end: z12.number()
1733
1881
  })
1734
1882
  ).nullish()
1735
1883
  });
1736
1884
 
1737
1885
  // src/responses/openai-responses-language-model.ts
1886
+ import {
1887
+ APICallError
1888
+ } from "@ai-sdk/provider";
1738
1889
  import {
1739
1890
  combineHeaders as combineHeaders6,
1740
1891
  createEventSourceResponseHandler as createEventSourceResponseHandler3,
1741
1892
  createJsonResponseHandler as createJsonResponseHandler6,
1742
1893
  generateId as generateId2,
1743
- parseProviderOptions as parseProviderOptions5,
1894
+ parseProviderOptions as parseProviderOptions6,
1744
1895
  postJsonToApi as postJsonToApi5
1745
1896
  } from "@ai-sdk/provider-utils";
1746
- import { z as z12 } from "zod";
1897
+ import { z as z14 } from "zod/v4";
1747
1898
 
1748
1899
  // src/responses/convert-to-openai-responses-messages.ts
1749
1900
  import {
1750
1901
  UnsupportedFunctionalityError as UnsupportedFunctionalityError4
1751
1902
  } from "@ai-sdk/provider";
1752
- function convertToOpenAIResponsesMessages({
1903
+ import { parseProviderOptions as parseProviderOptions5 } from "@ai-sdk/provider-utils";
1904
+ import { z as z13 } from "zod/v4";
1905
+ async function convertToOpenAIResponsesMessages({
1753
1906
  prompt,
1754
1907
  systemMessageMode
1755
1908
  }) {
1909
+ var _a, _b, _c, _d, _e, _f;
1756
1910
  const messages = [];
1757
1911
  const warnings = [];
1758
1912
  for (const { role, content } of prompt) {
@@ -1787,7 +1941,7 @@ function convertToOpenAIResponsesMessages({
1787
1941
  messages.push({
1788
1942
  role: "user",
1789
1943
  content: content.map((part, index) => {
1790
- var _a, _b, _c;
1944
+ var _a2, _b2, _c2;
1791
1945
  switch (part.type) {
1792
1946
  case "text": {
1793
1947
  return { type: "input_text", text: part.text };
@@ -1799,7 +1953,7 @@ function convertToOpenAIResponsesMessages({
1799
1953
  type: "input_image",
1800
1954
  image_url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
1801
1955
  // OpenAI specific extension: image detail
1802
- detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
1956
+ detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
1803
1957
  };
1804
1958
  } else if (part.mediaType === "application/pdf") {
1805
1959
  if (part.data instanceof URL) {
@@ -1809,7 +1963,7 @@ function convertToOpenAIResponsesMessages({
1809
1963
  }
1810
1964
  return {
1811
1965
  type: "input_file",
1812
- filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
1966
+ filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
1813
1967
  file_data: `data:application/pdf;base64,${part.data}`
1814
1968
  };
1815
1969
  } else {
@@ -1824,34 +1978,97 @@ function convertToOpenAIResponsesMessages({
1824
1978
  break;
1825
1979
  }
1826
1980
  case "assistant": {
1981
+ const reasoningMessages = {};
1827
1982
  for (const part of content) {
1828
1983
  switch (part.type) {
1829
1984
  case "text": {
1830
1985
  messages.push({
1831
1986
  role: "assistant",
1832
- content: [{ type: "output_text", text: part.text }]
1987
+ content: [{ type: "output_text", text: part.text }],
1988
+ id: (_c = (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.itemId) != null ? _c : void 0
1833
1989
  });
1834
1990
  break;
1835
1991
  }
1836
1992
  case "tool-call": {
1993
+ if (part.providerExecuted) {
1994
+ break;
1995
+ }
1837
1996
  messages.push({
1838
1997
  type: "function_call",
1839
1998
  call_id: part.toolCallId,
1840
1999
  name: part.toolName,
1841
- arguments: JSON.stringify(part.args)
2000
+ arguments: JSON.stringify(part.input),
2001
+ id: (_f = (_e = (_d = part.providerOptions) == null ? void 0 : _d.openai) == null ? void 0 : _e.itemId) != null ? _f : void 0
2002
+ });
2003
+ break;
2004
+ }
2005
+ case "tool-result": {
2006
+ warnings.push({
2007
+ type: "other",
2008
+ message: `tool result parts in assistant messages are not supported for OpenAI responses`
1842
2009
  });
1843
2010
  break;
1844
2011
  }
2012
+ case "reasoning": {
2013
+ const providerOptions = await parseProviderOptions5({
2014
+ provider: "openai",
2015
+ providerOptions: part.providerOptions,
2016
+ schema: openaiResponsesReasoningProviderOptionsSchema
2017
+ });
2018
+ const reasoningId = providerOptions == null ? void 0 : providerOptions.itemId;
2019
+ if (reasoningId != null) {
2020
+ const existingReasoningMessage = reasoningMessages[reasoningId];
2021
+ const summaryParts = [];
2022
+ if (part.text.length > 0) {
2023
+ summaryParts.push({ type: "summary_text", text: part.text });
2024
+ } else if (existingReasoningMessage !== void 0) {
2025
+ warnings.push({
2026
+ type: "other",
2027
+ message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
2028
+ });
2029
+ }
2030
+ if (existingReasoningMessage === void 0) {
2031
+ reasoningMessages[reasoningId] = {
2032
+ type: "reasoning",
2033
+ id: reasoningId,
2034
+ encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
2035
+ summary: summaryParts
2036
+ };
2037
+ messages.push(reasoningMessages[reasoningId]);
2038
+ } else {
2039
+ existingReasoningMessage.summary.push(...summaryParts);
2040
+ }
2041
+ } else {
2042
+ warnings.push({
2043
+ type: "other",
2044
+ message: `Non-OpenAI reasoning parts are not supported. Skipping reasoning part: ${JSON.stringify(part)}.`
2045
+ });
2046
+ }
2047
+ break;
2048
+ }
1845
2049
  }
1846
2050
  }
1847
2051
  break;
1848
2052
  }
1849
2053
  case "tool": {
1850
2054
  for (const part of content) {
2055
+ const output = part.output;
2056
+ let contentValue;
2057
+ switch (output.type) {
2058
+ case "text":
2059
+ case "error-text":
2060
+ contentValue = output.value;
2061
+ break;
2062
+ case "content":
2063
+ case "json":
2064
+ case "error-json":
2065
+ contentValue = JSON.stringify(output.value);
2066
+ break;
2067
+ }
1851
2068
  messages.push({
1852
2069
  type: "function_call_output",
1853
2070
  call_id: part.toolCallId,
1854
- output: JSON.stringify(part.result)
2071
+ output: contentValue
1855
2072
  });
1856
2073
  }
1857
2074
  break;
@@ -1864,6 +2081,10 @@ function convertToOpenAIResponsesMessages({
1864
2081
  }
1865
2082
  return { messages, warnings };
1866
2083
  }
2084
+ var openaiResponsesReasoningProviderOptionsSchema = z13.object({
2085
+ itemId: z13.string().nullish(),
2086
+ reasoningEncryptedContent: z13.string().nullish()
2087
+ });
1867
2088
 
1868
2089
  // src/responses/map-openai-responses-finish-reason.ts
1869
2090
  function mapOpenAIResponseFinishReason({
@@ -1890,7 +2111,7 @@ import {
1890
2111
  function prepareResponsesTools({
1891
2112
  tools,
1892
2113
  toolChoice,
1893
- strict
2114
+ strictJsonSchema
1894
2115
  }) {
1895
2116
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
1896
2117
  const toolWarnings = [];
@@ -1905,12 +2126,22 @@ function prepareResponsesTools({
1905
2126
  type: "function",
1906
2127
  name: tool.name,
1907
2128
  description: tool.description,
1908
- parameters: tool.parameters,
1909
- strict: strict ? true : void 0
2129
+ parameters: tool.inputSchema,
2130
+ strict: strictJsonSchema
1910
2131
  });
1911
2132
  break;
1912
2133
  case "provider-defined":
1913
2134
  switch (tool.id) {
2135
+ case "openai.file_search": {
2136
+ const args = fileSearchArgsSchema.parse(tool.args);
2137
+ openaiTools2.push({
2138
+ type: "file_search",
2139
+ vector_store_ids: args.vectorStoreIds,
2140
+ max_results: args.maxResults,
2141
+ search_type: args.searchType
2142
+ });
2143
+ break;
2144
+ }
1914
2145
  case "openai.web_search_preview":
1915
2146
  openaiTools2.push({
1916
2147
  type: "web_search_preview",
@@ -1940,7 +2171,7 @@ function prepareResponsesTools({
1940
2171
  case "tool":
1941
2172
  return {
1942
2173
  tools: openaiTools2,
1943
- toolChoice: toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
2174
+ toolChoice: toolChoice.toolName === "file_search" ? { type: "file_search" } : toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
1944
2175
  toolWarnings
1945
2176
  };
1946
2177
  default: {
@@ -2004,17 +2235,17 @@ var OpenAIResponsesLanguageModel = class {
2004
2235
  if (stopSequences != null) {
2005
2236
  warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
2006
2237
  }
2007
- const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
2238
+ const { messages, warnings: messageWarnings } = await convertToOpenAIResponsesMessages({
2008
2239
  prompt,
2009
2240
  systemMessageMode: modelConfig.systemMessageMode
2010
2241
  });
2011
2242
  warnings.push(...messageWarnings);
2012
- const openaiOptions = await parseProviderOptions5({
2243
+ const openaiOptions = await parseProviderOptions6({
2013
2244
  provider: "openai",
2014
2245
  providerOptions,
2015
2246
  schema: openaiResponsesProviderOptionsSchema
2016
2247
  });
2017
- const isStrict = (_a = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a : true;
2248
+ const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
2018
2249
  const baseArgs = {
2019
2250
  model: this.modelId,
2020
2251
  input: messages,
@@ -2025,7 +2256,7 @@ var OpenAIResponsesLanguageModel = class {
2025
2256
  text: {
2026
2257
  format: responseFormat.schema != null ? {
2027
2258
  type: "json_schema",
2028
- strict: isStrict,
2259
+ strict: strictJsonSchema,
2029
2260
  name: (_b = responseFormat.name) != null ? _b : "response",
2030
2261
  description: responseFormat.description,
2031
2262
  schema: responseFormat.schema
@@ -2039,6 +2270,8 @@ var OpenAIResponsesLanguageModel = class {
2039
2270
  store: openaiOptions == null ? void 0 : openaiOptions.store,
2040
2271
  user: openaiOptions == null ? void 0 : openaiOptions.user,
2041
2272
  instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2273
+ service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
2274
+ include: openaiOptions == null ? void 0 : openaiOptions.include,
2042
2275
  // model-specific settings:
2043
2276
  ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
2044
2277
  reasoning: {
@@ -2071,6 +2304,29 @@ var OpenAIResponsesLanguageModel = class {
2071
2304
  details: "topP is not supported for reasoning models"
2072
2305
  });
2073
2306
  }
2307
+ } else {
2308
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null) {
2309
+ warnings.push({
2310
+ type: "unsupported-setting",
2311
+ setting: "reasoningEffort",
2312
+ details: "reasoningEffort is not supported for non-reasoning models"
2313
+ });
2314
+ }
2315
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) {
2316
+ warnings.push({
2317
+ type: "unsupported-setting",
2318
+ setting: "reasoningSummary",
2319
+ details: "reasoningSummary is not supported for non-reasoning models"
2320
+ });
2321
+ }
2322
+ }
2323
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !supportsFlexProcessing2(this.modelId)) {
2324
+ warnings.push({
2325
+ type: "unsupported-setting",
2326
+ setting: "serviceTier",
2327
+ details: "flex processing is only available for o3 and o4-mini models"
2328
+ });
2329
+ delete baseArgs.service_tier;
2074
2330
  }
2075
2331
  const {
2076
2332
  tools: openaiTools2,
@@ -2079,7 +2335,7 @@ var OpenAIResponsesLanguageModel = class {
2079
2335
  } = prepareResponsesTools({
2080
2336
  tools,
2081
2337
  toolChoice,
2082
- strict: isStrict
2338
+ strictJsonSchema
2083
2339
  });
2084
2340
  return {
2085
2341
  args: {
@@ -2091,97 +2347,137 @@ var OpenAIResponsesLanguageModel = class {
2091
2347
  };
2092
2348
  }
2093
2349
  async doGenerate(options) {
2094
- var _a, _b, _c, _d, _e, _f, _g, _h;
2350
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
2095
2351
  const { args: body, warnings } = await this.getArgs(options);
2352
+ const url = this.config.url({
2353
+ path: "/responses",
2354
+ modelId: this.modelId
2355
+ });
2096
2356
  const {
2097
2357
  responseHeaders,
2098
2358
  value: response,
2099
2359
  rawValue: rawResponse
2100
2360
  } = await postJsonToApi5({
2101
- url: this.config.url({
2102
- path: "/responses",
2103
- modelId: this.modelId
2104
- }),
2361
+ url,
2105
2362
  headers: combineHeaders6(this.config.headers(), options.headers),
2106
2363
  body,
2107
2364
  failedResponseHandler: openaiFailedResponseHandler,
2108
2365
  successfulResponseHandler: createJsonResponseHandler6(
2109
- z12.object({
2110
- id: z12.string(),
2111
- created_at: z12.number(),
2112
- model: z12.string(),
2113
- output: z12.array(
2114
- z12.discriminatedUnion("type", [
2115
- z12.object({
2116
- type: z12.literal("message"),
2117
- role: z12.literal("assistant"),
2118
- content: z12.array(
2119
- z12.object({
2120
- type: z12.literal("output_text"),
2121
- text: z12.string(),
2122
- annotations: z12.array(
2123
- z12.object({
2124
- type: z12.literal("url_citation"),
2125
- start_index: z12.number(),
2126
- end_index: z12.number(),
2127
- url: z12.string(),
2128
- title: z12.string()
2366
+ z14.object({
2367
+ id: z14.string(),
2368
+ created_at: z14.number(),
2369
+ error: z14.object({
2370
+ code: z14.string(),
2371
+ message: z14.string()
2372
+ }).nullish(),
2373
+ model: z14.string(),
2374
+ output: z14.array(
2375
+ z14.discriminatedUnion("type", [
2376
+ z14.object({
2377
+ type: z14.literal("message"),
2378
+ role: z14.literal("assistant"),
2379
+ id: z14.string(),
2380
+ content: z14.array(
2381
+ z14.object({
2382
+ type: z14.literal("output_text"),
2383
+ text: z14.string(),
2384
+ annotations: z14.array(
2385
+ z14.object({
2386
+ type: z14.literal("url_citation"),
2387
+ start_index: z14.number(),
2388
+ end_index: z14.number(),
2389
+ url: z14.string(),
2390
+ title: z14.string()
2129
2391
  })
2130
2392
  )
2131
2393
  })
2132
2394
  )
2133
2395
  }),
2134
- z12.object({
2135
- type: z12.literal("function_call"),
2136
- call_id: z12.string(),
2137
- name: z12.string(),
2138
- arguments: z12.string()
2396
+ z14.object({
2397
+ type: z14.literal("function_call"),
2398
+ call_id: z14.string(),
2399
+ name: z14.string(),
2400
+ arguments: z14.string(),
2401
+ id: z14.string()
2139
2402
  }),
2140
- z12.object({
2141
- type: z12.literal("web_search_call")
2403
+ z14.object({
2404
+ type: z14.literal("web_search_call"),
2405
+ id: z14.string(),
2406
+ status: z14.string().optional()
2142
2407
  }),
2143
- z12.object({
2144
- type: z12.literal("computer_call")
2408
+ z14.object({
2409
+ type: z14.literal("computer_call"),
2410
+ id: z14.string(),
2411
+ status: z14.string().optional()
2145
2412
  }),
2146
- z12.object({
2147
- type: z12.literal("reasoning"),
2148
- summary: z12.array(
2149
- z12.object({
2150
- type: z12.literal("summary_text"),
2151
- text: z12.string()
2413
+ z14.object({
2414
+ type: z14.literal("reasoning"),
2415
+ id: z14.string(),
2416
+ encrypted_content: z14.string().nullish(),
2417
+ summary: z14.array(
2418
+ z14.object({
2419
+ type: z14.literal("summary_text"),
2420
+ text: z14.string()
2152
2421
  })
2153
2422
  )
2154
2423
  })
2155
2424
  ])
2156
2425
  ),
2157
- incomplete_details: z12.object({ reason: z12.string() }).nullable(),
2426
+ incomplete_details: z14.object({ reason: z14.string() }).nullable(),
2158
2427
  usage: usageSchema2
2159
2428
  })
2160
2429
  ),
2161
2430
  abortSignal: options.abortSignal,
2162
2431
  fetch: this.config.fetch
2163
2432
  });
2433
+ if (response.error) {
2434
+ throw new APICallError({
2435
+ message: response.error.message,
2436
+ url,
2437
+ requestBodyValues: body,
2438
+ statusCode: 400,
2439
+ responseHeaders,
2440
+ responseBody: rawResponse,
2441
+ isRetryable: false
2442
+ });
2443
+ }
2164
2444
  const content = [];
2165
2445
  for (const part of response.output) {
2166
2446
  switch (part.type) {
2167
2447
  case "reasoning": {
2168
- content.push({
2169
- type: "reasoning",
2170
- text: part.summary.map((summary) => summary.text).join()
2171
- });
2448
+ if (part.summary.length === 0) {
2449
+ part.summary.push({ type: "summary_text", text: "" });
2450
+ }
2451
+ for (const summary of part.summary) {
2452
+ content.push({
2453
+ type: "reasoning",
2454
+ text: summary.text,
2455
+ providerMetadata: {
2456
+ openai: {
2457
+ itemId: part.id,
2458
+ reasoningEncryptedContent: (_a = part.encrypted_content) != null ? _a : null
2459
+ }
2460
+ }
2461
+ });
2462
+ }
2172
2463
  break;
2173
2464
  }
2174
2465
  case "message": {
2175
2466
  for (const contentPart of part.content) {
2176
2467
  content.push({
2177
2468
  type: "text",
2178
- text: contentPart.text
2469
+ text: contentPart.text,
2470
+ providerMetadata: {
2471
+ openai: {
2472
+ itemId: part.id
2473
+ }
2474
+ }
2179
2475
  });
2180
2476
  for (const annotation of contentPart.annotations) {
2181
2477
  content.push({
2182
2478
  type: "source",
2183
2479
  sourceType: "url",
2184
- id: (_c = (_b = (_a = this.config).generateId) == null ? void 0 : _b.call(_a)) != null ? _c : generateId2(),
2480
+ id: (_d = (_c = (_b = this.config).generateId) == null ? void 0 : _c.call(_b)) != null ? _d : generateId2(),
2185
2481
  url: annotation.url,
2186
2482
  title: annotation.title
2187
2483
  });
@@ -2192,10 +2488,51 @@ var OpenAIResponsesLanguageModel = class {
2192
2488
  case "function_call": {
2193
2489
  content.push({
2194
2490
  type: "tool-call",
2195
- toolCallType: "function",
2196
2491
  toolCallId: part.call_id,
2197
2492
  toolName: part.name,
2198
- args: part.arguments
2493
+ input: part.arguments,
2494
+ providerMetadata: {
2495
+ openai: {
2496
+ itemId: part.id
2497
+ }
2498
+ }
2499
+ });
2500
+ break;
2501
+ }
2502
+ case "web_search_call": {
2503
+ content.push({
2504
+ type: "tool-call",
2505
+ toolCallId: part.id,
2506
+ toolName: "web_search_preview",
2507
+ input: "",
2508
+ providerExecuted: true
2509
+ });
2510
+ content.push({
2511
+ type: "tool-result",
2512
+ toolCallId: part.id,
2513
+ toolName: "web_search_preview",
2514
+ result: { status: part.status || "completed" },
2515
+ providerExecuted: true
2516
+ });
2517
+ break;
2518
+ }
2519
+ case "computer_call": {
2520
+ content.push({
2521
+ type: "tool-call",
2522
+ toolCallId: part.id,
2523
+ toolName: "computer_use",
2524
+ input: "",
2525
+ providerExecuted: true
2526
+ });
2527
+ content.push({
2528
+ type: "tool-result",
2529
+ toolCallId: part.id,
2530
+ toolName: "computer_use",
2531
+ result: {
2532
+ type: "computer_use_tool_result",
2533
+ status: part.status || "completed"
2534
+ },
2535
+ providerExecuted: true
2199
2536
  });
2200
2537
  break;
2201
2538
  }
@@ -2204,15 +2541,15 @@ var OpenAIResponsesLanguageModel = class {
2204
2541
  return {
2205
2542
  content,
2206
2543
  finishReason: mapOpenAIResponseFinishReason({
2207
- finishReason: (_d = response.incomplete_details) == null ? void 0 : _d.reason,
2544
+ finishReason: (_e = response.incomplete_details) == null ? void 0 : _e.reason,
2208
2545
  hasToolCalls: content.some((part) => part.type === "tool-call")
2209
2546
  }),
2210
2547
  usage: {
2211
2548
  inputTokens: response.usage.input_tokens,
2212
2549
  outputTokens: response.usage.output_tokens,
2213
2550
  totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2214
- reasoningTokens: (_f = (_e = response.usage.output_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : void 0,
2215
- cachedInputTokens: (_h = (_g = response.usage.input_tokens_details) == null ? void 0 : _g.cached_tokens) != null ? _h : void 0
2551
+ reasoningTokens: (_g = (_f = response.usage.output_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : void 0,
2552
+ cachedInputTokens: (_i = (_h = response.usage.input_tokens_details) == null ? void 0 : _h.cached_tokens) != null ? _i : void 0
2216
2553
  },
2217
2554
  request: { body },
2218
2555
  response: {
@@ -2259,6 +2596,7 @@ var OpenAIResponsesLanguageModel = class {
2259
2596
  let responseId = null;
2260
2597
  const ongoingToolCalls = {};
2261
2598
  let hasToolCalls = false;
2599
+ const activeReasoning = {};
2262
2600
  return {
2263
2601
  stream: response.pipeThrough(
2264
2602
  new TransformStream({
@@ -2266,7 +2604,10 @@ var OpenAIResponsesLanguageModel = class {
2266
2604
  controller.enqueue({ type: "stream-start", warnings });
2267
2605
  },
2268
2606
  transform(chunk, controller) {
2269
- var _a, _b, _c, _d, _e, _f, _g, _h;
2607
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
2608
+ if (options.includeRawChunks) {
2609
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
2610
+ }
2270
2611
  if (!chunk.success) {
2271
2612
  finishReason = "error";
2272
2613
  controller.enqueue({ type: "error", error: chunk.error });
@@ -2280,22 +2621,151 @@ var OpenAIResponsesLanguageModel = class {
2280
2621
  toolCallId: value.item.call_id
2281
2622
  };
2282
2623
  controller.enqueue({
2283
- type: "tool-call-delta",
2284
- toolCallType: "function",
2624
+ type: "tool-input-start",
2625
+ id: value.item.call_id,
2626
+ toolName: value.item.name
2627
+ });
2628
+ } else if (value.item.type === "web_search_call") {
2629
+ ongoingToolCalls[value.output_index] = {
2630
+ toolName: "web_search_preview",
2631
+ toolCallId: value.item.id
2632
+ };
2633
+ controller.enqueue({
2634
+ type: "tool-input-start",
2635
+ id: value.item.id,
2636
+ toolName: "web_search_preview"
2637
+ });
2638
+ } else if (value.item.type === "computer_call") {
2639
+ ongoingToolCalls[value.output_index] = {
2640
+ toolName: "computer_use",
2641
+ toolCallId: value.item.id
2642
+ };
2643
+ controller.enqueue({
2644
+ type: "tool-input-start",
2645
+ id: value.item.id,
2646
+ toolName: "computer_use"
2647
+ });
2648
+ } else if (value.item.type === "message") {
2649
+ controller.enqueue({
2650
+ type: "text-start",
2651
+ id: value.item.id,
2652
+ providerMetadata: {
2653
+ openai: {
2654
+ itemId: value.item.id
2655
+ }
2656
+ }
2657
+ });
2658
+ } else if (isResponseOutputItemAddedReasoningChunk(value)) {
2659
+ activeReasoning[value.item.id] = {
2660
+ encryptedContent: value.item.encrypted_content,
2661
+ summaryParts: [0]
2662
+ };
2663
+ controller.enqueue({
2664
+ type: "reasoning-start",
2665
+ id: `${value.item.id}:0`,
2666
+ providerMetadata: {
2667
+ openai: {
2668
+ itemId: value.item.id,
2669
+ reasoningEncryptedContent: (_a = value.item.encrypted_content) != null ? _a : null
2670
+ }
2671
+ }
2672
+ });
2673
+ }
2674
+ } else if (isResponseOutputItemDoneChunk(value)) {
2675
+ if (value.item.type === "function_call") {
2676
+ ongoingToolCalls[value.output_index] = void 0;
2677
+ hasToolCalls = true;
2678
+ controller.enqueue({
2679
+ type: "tool-input-end",
2680
+ id: value.item.call_id
2681
+ });
2682
+ controller.enqueue({
2683
+ type: "tool-call",
2285
2684
  toolCallId: value.item.call_id,
2286
2685
  toolName: value.item.name,
2287
- argsTextDelta: value.item.arguments
2686
+ input: value.item.arguments,
2687
+ providerMetadata: {
2688
+ openai: {
2689
+ itemId: value.item.id
2690
+ }
2691
+ }
2692
+ });
2693
+ } else if (value.item.type === "web_search_call") {
2694
+ ongoingToolCalls[value.output_index] = void 0;
2695
+ hasToolCalls = true;
2696
+ controller.enqueue({
2697
+ type: "tool-input-end",
2698
+ id: value.item.id
2699
+ });
2700
+ controller.enqueue({
2701
+ type: "tool-call",
2702
+ toolCallId: value.item.id,
2703
+ toolName: "web_search_preview",
2704
+ input: "",
2705
+ providerExecuted: true
2288
2706
  });
2707
+ controller.enqueue({
2708
+ type: "tool-result",
2709
+ toolCallId: value.item.id,
2710
+ toolName: "web_search_preview",
2711
+ result: {
2712
+ type: "web_search_tool_result",
2713
+ status: value.item.status || "completed"
2714
+ },
2715
+ providerExecuted: true
2716
+ });
2717
+ } else if (value.item.type === "computer_call") {
2718
+ ongoingToolCalls[value.output_index] = void 0;
2719
+ hasToolCalls = true;
2720
+ controller.enqueue({
2721
+ type: "tool-input-end",
2722
+ id: value.item.id
2723
+ });
2724
+ controller.enqueue({
2725
+ type: "tool-call",
2726
+ toolCallId: value.item.id,
2727
+ toolName: "computer_use",
2728
+ input: "",
2729
+ providerExecuted: true
2730
+ });
2731
+ controller.enqueue({
2732
+ type: "tool-result",
2733
+ toolCallId: value.item.id,
2734
+ toolName: "computer_use",
2735
+ result: {
2736
+ type: "computer_use_tool_result",
2737
+ status: value.item.status || "completed"
2738
+ },
2739
+ providerExecuted: true
2740
+ });
2741
+ } else if (value.item.type === "message") {
2742
+ controller.enqueue({
2743
+ type: "text-end",
2744
+ id: value.item.id
2745
+ });
2746
+ } else if (isResponseOutputItemDoneReasoningChunk(value)) {
2747
+ const activeReasoningPart = activeReasoning[value.item.id];
2748
+ for (const summaryIndex of activeReasoningPart.summaryParts) {
2749
+ controller.enqueue({
2750
+ type: "reasoning-end",
2751
+ id: `${value.item.id}:${summaryIndex}`,
2752
+ providerMetadata: {
2753
+ openai: {
2754
+ itemId: value.item.id,
2755
+ reasoningEncryptedContent: (_b = value.item.encrypted_content) != null ? _b : null
2756
+ }
2757
+ }
2758
+ });
2759
+ }
2760
+ delete activeReasoning[value.item.id];
2289
2761
  }
2290
2762
  } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
2291
2763
  const toolCall = ongoingToolCalls[value.output_index];
2292
2764
  if (toolCall != null) {
2293
2765
  controller.enqueue({
2294
- type: "tool-call-delta",
2295
- toolCallType: "function",
2296
- toolCallId: toolCall.toolCallId,
2297
- toolName: toolCall.toolName,
2298
- argsTextDelta: value.delta
2766
+ type: "tool-input-delta",
2767
+ id: toolCall.toolCallId,
2768
+ delta: value.delta
2299
2769
  });
2300
2770
  }
2301
2771
  } else if (isResponseCreatedChunk(value)) {
@@ -2308,42 +2778,57 @@ var OpenAIResponsesLanguageModel = class {
2308
2778
  });
2309
2779
  } else if (isTextDeltaChunk(value)) {
2310
2780
  controller.enqueue({
2311
- type: "text",
2312
- text: value.delta
2781
+ type: "text-delta",
2782
+ id: value.item_id,
2783
+ delta: value.delta
2313
2784
  });
2785
+ } else if (isResponseReasoningSummaryPartAddedChunk(value)) {
2786
+ if (value.summary_index > 0) {
2787
+ (_c = activeReasoning[value.item_id]) == null ? void 0 : _c.summaryParts.push(
2788
+ value.summary_index
2789
+ );
2790
+ controller.enqueue({
2791
+ type: "reasoning-start",
2792
+ id: `${value.item_id}:${value.summary_index}`,
2793
+ providerMetadata: {
2794
+ openai: {
2795
+ itemId: value.item_id,
2796
+ reasoningEncryptedContent: (_e = (_d = activeReasoning[value.item_id]) == null ? void 0 : _d.encryptedContent) != null ? _e : null
2797
+ }
2798
+ }
2799
+ });
2800
+ }
2314
2801
  } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2315
2802
  controller.enqueue({
2316
- type: "reasoning",
2317
- text: value.delta
2318
- });
2319
- } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2320
- ongoingToolCalls[value.output_index] = void 0;
2321
- hasToolCalls = true;
2322
- controller.enqueue({
2323
- type: "tool-call",
2324
- toolCallType: "function",
2325
- toolCallId: value.item.call_id,
2326
- toolName: value.item.name,
2327
- args: value.item.arguments
2803
+ type: "reasoning-delta",
2804
+ id: `${value.item_id}:${value.summary_index}`,
2805
+ delta: value.delta,
2806
+ providerMetadata: {
2807
+ openai: {
2808
+ itemId: value.item_id
2809
+ }
2810
+ }
2328
2811
  });
2329
2812
  } else if (isResponseFinishedChunk(value)) {
2330
2813
  finishReason = mapOpenAIResponseFinishReason({
2331
- finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2814
+ finishReason: (_f = value.response.incomplete_details) == null ? void 0 : _f.reason,
2332
2815
  hasToolCalls
2333
2816
  });
2334
2817
  usage.inputTokens = value.response.usage.input_tokens;
2335
2818
  usage.outputTokens = value.response.usage.output_tokens;
2336
2819
  usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
2337
- usage.reasoningTokens = (_c = (_b = value.response.usage.output_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? _c : void 0;
2338
- usage.cachedInputTokens = (_e = (_d = value.response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : void 0;
2820
+ usage.reasoningTokens = (_h = (_g = value.response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : void 0;
2821
+ usage.cachedInputTokens = (_j = (_i = value.response.usage.input_tokens_details) == null ? void 0 : _i.cached_tokens) != null ? _j : void 0;
2339
2822
  } else if (isResponseAnnotationAddedChunk(value)) {
2340
2823
  controller.enqueue({
2341
2824
  type: "source",
2342
2825
  sourceType: "url",
2343
- id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : generateId2(),
2826
+ id: (_m = (_l = (_k = self.config).generateId) == null ? void 0 : _l.call(_k)) != null ? _m : generateId2(),
2344
2827
  url: value.annotation.url,
2345
2828
  title: value.annotation.title
2346
2829
  });
2830
+ } else if (isErrorChunk(value)) {
2831
+ controller.enqueue({ type: "error", error: value });
2347
2832
  }
2348
2833
  },
2349
2834
  flush(controller) {
@@ -2365,95 +2850,141 @@ var OpenAIResponsesLanguageModel = class {
2365
2850
  };
2366
2851
  }
2367
2852
  };
2368
- var usageSchema2 = z12.object({
2369
- input_tokens: z12.number(),
2370
- input_tokens_details: z12.object({ cached_tokens: z12.number().nullish() }).nullish(),
2371
- output_tokens: z12.number(),
2372
- output_tokens_details: z12.object({ reasoning_tokens: z12.number().nullish() }).nullish()
2853
+ var usageSchema2 = z14.object({
2854
+ input_tokens: z14.number(),
2855
+ input_tokens_details: z14.object({ cached_tokens: z14.number().nullish() }).nullish(),
2856
+ output_tokens: z14.number(),
2857
+ output_tokens_details: z14.object({ reasoning_tokens: z14.number().nullish() }).nullish()
2858
+ });
2859
+ var textDeltaChunkSchema = z14.object({
2860
+ type: z14.literal("response.output_text.delta"),
2861
+ item_id: z14.string(),
2862
+ delta: z14.string()
2373
2863
  });
2374
- var textDeltaChunkSchema = z12.object({
2375
- type: z12.literal("response.output_text.delta"),
2376
- delta: z12.string()
2864
+ var errorChunkSchema = z14.object({
2865
+ type: z14.literal("error"),
2866
+ code: z14.string(),
2867
+ message: z14.string(),
2868
+ param: z14.string().nullish(),
2869
+ sequence_number: z14.number()
2377
2870
  });
2378
- var responseFinishedChunkSchema = z12.object({
2379
- type: z12.enum(["response.completed", "response.incomplete"]),
2380
- response: z12.object({
2381
- incomplete_details: z12.object({ reason: z12.string() }).nullish(),
2871
+ var responseFinishedChunkSchema = z14.object({
2872
+ type: z14.enum(["response.completed", "response.incomplete"]),
2873
+ response: z14.object({
2874
+ incomplete_details: z14.object({ reason: z14.string() }).nullish(),
2382
2875
  usage: usageSchema2
2383
2876
  })
2384
2877
  });
2385
- var responseCreatedChunkSchema = z12.object({
2386
- type: z12.literal("response.created"),
2387
- response: z12.object({
2388
- id: z12.string(),
2389
- created_at: z12.number(),
2390
- model: z12.string()
2878
+ var responseCreatedChunkSchema = z14.object({
2879
+ type: z14.literal("response.created"),
2880
+ response: z14.object({
2881
+ id: z14.string(),
2882
+ created_at: z14.number(),
2883
+ model: z14.string()
2391
2884
  })
2392
2885
  });
2393
- var responseOutputItemDoneSchema = z12.object({
2394
- type: z12.literal("response.output_item.done"),
2395
- output_index: z12.number(),
2396
- item: z12.discriminatedUnion("type", [
2397
- z12.object({
2398
- type: z12.literal("message")
2886
+ var responseOutputItemAddedSchema = z14.object({
2887
+ type: z14.literal("response.output_item.added"),
2888
+ output_index: z14.number(),
2889
+ item: z14.discriminatedUnion("type", [
2890
+ z14.object({
2891
+ type: z14.literal("message"),
2892
+ id: z14.string()
2399
2893
  }),
2400
- z12.object({
2401
- type: z12.literal("function_call"),
2402
- id: z12.string(),
2403
- call_id: z12.string(),
2404
- name: z12.string(),
2405
- arguments: z12.string(),
2406
- status: z12.literal("completed")
2894
+ z14.object({
2895
+ type: z14.literal("reasoning"),
2896
+ id: z14.string(),
2897
+ encrypted_content: z14.string().nullish()
2898
+ }),
2899
+ z14.object({
2900
+ type: z14.literal("function_call"),
2901
+ id: z14.string(),
2902
+ call_id: z14.string(),
2903
+ name: z14.string(),
2904
+ arguments: z14.string()
2905
+ }),
2906
+ z14.object({
2907
+ type: z14.literal("web_search_call"),
2908
+ id: z14.string(),
2909
+ status: z14.string()
2910
+ }),
2911
+ z14.object({
2912
+ type: z14.literal("computer_call"),
2913
+ id: z14.string(),
2914
+ status: z14.string()
2407
2915
  })
2408
2916
  ])
2409
2917
  });
2410
- var responseFunctionCallArgumentsDeltaSchema = z12.object({
2411
- type: z12.literal("response.function_call_arguments.delta"),
2412
- item_id: z12.string(),
2413
- output_index: z12.number(),
2414
- delta: z12.string()
2415
- });
2416
- var responseOutputItemAddedSchema = z12.object({
2417
- type: z12.literal("response.output_item.added"),
2418
- output_index: z12.number(),
2419
- item: z12.discriminatedUnion("type", [
2420
- z12.object({
2421
- type: z12.literal("message")
2918
+ var responseOutputItemDoneSchema = z14.object({
2919
+ type: z14.literal("response.output_item.done"),
2920
+ output_index: z14.number(),
2921
+ item: z14.discriminatedUnion("type", [
2922
+ z14.object({
2923
+ type: z14.literal("message"),
2924
+ id: z14.string()
2422
2925
  }),
2423
- z12.object({
2424
- type: z12.literal("function_call"),
2425
- id: z12.string(),
2426
- call_id: z12.string(),
2427
- name: z12.string(),
2428
- arguments: z12.string()
2926
+ z14.object({
2927
+ type: z14.literal("reasoning"),
2928
+ id: z14.string(),
2929
+ encrypted_content: z14.string().nullish()
2930
+ }),
2931
+ z14.object({
2932
+ type: z14.literal("function_call"),
2933
+ id: z14.string(),
2934
+ call_id: z14.string(),
2935
+ name: z14.string(),
2936
+ arguments: z14.string(),
2937
+ status: z14.literal("completed")
2938
+ }),
2939
+ z14.object({
2940
+ type: z14.literal("web_search_call"),
2941
+ id: z14.string(),
2942
+ status: z14.literal("completed")
2943
+ }),
2944
+ z14.object({
2945
+ type: z14.literal("computer_call"),
2946
+ id: z14.string(),
2947
+ status: z14.literal("completed")
2429
2948
  })
2430
2949
  ])
2431
2950
  });
2432
- var responseAnnotationAddedSchema = z12.object({
2433
- type: z12.literal("response.output_text.annotation.added"),
2434
- annotation: z12.object({
2435
- type: z12.literal("url_citation"),
2436
- url: z12.string(),
2437
- title: z12.string()
2951
+ var responseFunctionCallArgumentsDeltaSchema = z14.object({
2952
+ type: z14.literal("response.function_call_arguments.delta"),
2953
+ item_id: z14.string(),
2954
+ output_index: z14.number(),
2955
+ delta: z14.string()
2956
+ });
2957
+ var responseAnnotationAddedSchema = z14.object({
2958
+ type: z14.literal("response.output_text.annotation.added"),
2959
+ annotation: z14.object({
2960
+ type: z14.literal("url_citation"),
2961
+ url: z14.string(),
2962
+ title: z14.string()
2438
2963
  })
2439
2964
  });
2440
- var responseReasoningSummaryTextDeltaSchema = z12.object({
2441
- type: z12.literal("response.reasoning_summary_text.delta"),
2442
- item_id: z12.string(),
2443
- output_index: z12.number(),
2444
- summary_index: z12.number(),
2445
- delta: z12.string()
2965
+ var responseReasoningSummaryPartAddedSchema = z14.object({
2966
+ type: z14.literal("response.reasoning_summary_part.added"),
2967
+ item_id: z14.string(),
2968
+ summary_index: z14.number()
2969
+ });
2970
+ var responseReasoningSummaryTextDeltaSchema = z14.object({
2971
+ type: z14.literal("response.reasoning_summary_text.delta"),
2972
+ item_id: z14.string(),
2973
+ summary_index: z14.number(),
2974
+ delta: z14.string()
2446
2975
  });
2447
- var openaiResponsesChunkSchema = z12.union([
2976
+ var openaiResponsesChunkSchema = z14.union([
2448
2977
  textDeltaChunkSchema,
2449
2978
  responseFinishedChunkSchema,
2450
2979
  responseCreatedChunkSchema,
2980
+ responseOutputItemAddedSchema,
2451
2981
  responseOutputItemDoneSchema,
2452
2982
  responseFunctionCallArgumentsDeltaSchema,
2453
- responseOutputItemAddedSchema,
2454
2983
  responseAnnotationAddedSchema,
2984
+ responseReasoningSummaryPartAddedSchema,
2455
2985
  responseReasoningSummaryTextDeltaSchema,
2456
- z12.object({ type: z12.string() }).passthrough()
2986
+ errorChunkSchema,
2987
+ z14.object({ type: z14.string() }).loose()
2457
2988
  // fallback for unknown chunks
2458
2989
  ]);
2459
2990
  function isTextDeltaChunk(chunk) {
@@ -2462,6 +2993,9 @@ function isTextDeltaChunk(chunk) {
2462
2993
  function isResponseOutputItemDoneChunk(chunk) {
2463
2994
  return chunk.type === "response.output_item.done";
2464
2995
  }
2996
+ function isResponseOutputItemDoneReasoningChunk(chunk) {
2997
+ return isResponseOutputItemDoneChunk(chunk) && chunk.item.type === "reasoning";
2998
+ }
2465
2999
  function isResponseFinishedChunk(chunk) {
2466
3000
  return chunk.type === "response.completed" || chunk.type === "response.incomplete";
2467
3001
  }
@@ -2474,14 +3008,23 @@ function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
2474
3008
  function isResponseOutputItemAddedChunk(chunk) {
2475
3009
  return chunk.type === "response.output_item.added";
2476
3010
  }
3011
+ function isResponseOutputItemAddedReasoningChunk(chunk) {
3012
+ return isResponseOutputItemAddedChunk(chunk) && chunk.item.type === "reasoning";
3013
+ }
2477
3014
  function isResponseAnnotationAddedChunk(chunk) {
2478
3015
  return chunk.type === "response.output_text.annotation.added";
2479
3016
  }
3017
+ function isResponseReasoningSummaryPartAddedChunk(chunk) {
3018
+ return chunk.type === "response.reasoning_summary_part.added";
3019
+ }
2480
3020
  function isResponseReasoningSummaryTextDeltaChunk(chunk) {
2481
3021
  return chunk.type === "response.reasoning_summary_text.delta";
2482
3022
  }
3023
+ function isErrorChunk(chunk) {
3024
+ return chunk.type === "error";
3025
+ }
2483
3026
  function getResponsesModelConfig(modelId) {
2484
- if (modelId.startsWith("o")) {
3027
+ if (modelId.startsWith("o") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
2485
3028
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
2486
3029
  return {
2487
3030
  isReasoningModel: true,
@@ -2501,35 +3044,40 @@ function getResponsesModelConfig(modelId) {
2501
3044
  requiredAutoTruncation: false
2502
3045
  };
2503
3046
  }
2504
- var openaiResponsesProviderOptionsSchema = z12.object({
2505
- metadata: z12.any().nullish(),
2506
- parallelToolCalls: z12.boolean().nullish(),
2507
- previousResponseId: z12.string().nullish(),
2508
- store: z12.boolean().nullish(),
2509
- user: z12.string().nullish(),
2510
- reasoningEffort: z12.string().nullish(),
2511
- strictSchemas: z12.boolean().nullish(),
2512
- instructions: z12.string().nullish(),
2513
- reasoningSummary: z12.string().nullish()
3047
+ function supportsFlexProcessing2(modelId) {
3048
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
3049
+ }
3050
+ var openaiResponsesProviderOptionsSchema = z14.object({
3051
+ metadata: z14.any().nullish(),
3052
+ parallelToolCalls: z14.boolean().nullish(),
3053
+ previousResponseId: z14.string().nullish(),
3054
+ store: z14.boolean().nullish(),
3055
+ user: z14.string().nullish(),
3056
+ reasoningEffort: z14.string().nullish(),
3057
+ strictJsonSchema: z14.boolean().nullish(),
3058
+ instructions: z14.string().nullish(),
3059
+ reasoningSummary: z14.string().nullish(),
3060
+ serviceTier: z14.enum(["auto", "flex"]).nullish(),
3061
+ include: z14.array(z14.enum(["reasoning.encrypted_content"])).nullish()
2514
3062
  });
2515
3063
 
2516
3064
  // src/openai-speech-model.ts
2517
3065
  import {
2518
3066
  combineHeaders as combineHeaders7,
2519
3067
  createBinaryResponseHandler,
2520
- parseProviderOptions as parseProviderOptions6,
3068
+ parseProviderOptions as parseProviderOptions7,
2521
3069
  postJsonToApi as postJsonToApi6
2522
3070
  } from "@ai-sdk/provider-utils";
2523
- import { z as z13 } from "zod";
2524
- var OpenAIProviderOptionsSchema = z13.object({
2525
- instructions: z13.string().nullish(),
2526
- speed: z13.number().min(0.25).max(4).default(1).nullish()
3071
+ import { z as z15 } from "zod/v4";
3072
+ var OpenAIProviderOptionsSchema = z15.object({
3073
+ instructions: z15.string().nullish(),
3074
+ speed: z15.number().min(0.25).max(4).default(1).nullish()
2527
3075
  });
2528
3076
  var OpenAISpeechModel = class {
2529
3077
  constructor(modelId, config) {
2530
3078
  this.modelId = modelId;
2531
3079
  this.config = config;
2532
- this.specificationVersion = "v1";
3080
+ this.specificationVersion = "v2";
2533
3081
  }
2534
3082
  get provider() {
2535
3083
  return this.config.provider;
@@ -2540,10 +3088,11 @@ var OpenAISpeechModel = class {
2540
3088
  outputFormat = "mp3",
2541
3089
  speed,
2542
3090
  instructions,
3091
+ language,
2543
3092
  providerOptions
2544
3093
  }) {
2545
3094
  const warnings = [];
2546
- const openAIOptions = await parseProviderOptions6({
3095
+ const openAIOptions = await parseProviderOptions7({
2547
3096
  provider: "openai",
2548
3097
  providerOptions,
2549
3098
  schema: OpenAIProviderOptionsSchema
@@ -2576,6 +3125,13 @@ var OpenAISpeechModel = class {
2576
3125
  }
2577
3126
  }
2578
3127
  }
3128
+ if (language) {
3129
+ warnings.push({
3130
+ type: "unsupported-setting",
3131
+ setting: "language",
3132
+ details: `OpenAI speech models do not support language selection. Language parameter "${language}" was ignored.`
3133
+ });
3134
+ }
2579
3135
  return {
2580
3136
  requestBody,
2581
3137
  warnings
@@ -2674,10 +3230,7 @@ function createOpenAI(options = {}) {
2674
3230
  "The OpenAI model function cannot be called with the new keyword."
2675
3231
  );
2676
3232
  }
2677
- if (modelId === "gpt-3.5-turbo-instruct") {
2678
- return createCompletionModel(modelId);
2679
- }
2680
- return createChatModel(modelId);
3233
+ return createResponsesModel(modelId);
2681
3234
  };
2682
3235
  const createResponsesModel = (modelId) => {
2683
3236
  return new OpenAIResponsesLanguageModel(modelId, {