@ai-sdk/openai 2.0.0-alpha.9 → 2.0.0-beta.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -11,7 +11,7 @@ import {
11
11
  parseProviderOptions,
12
12
  postJsonToApi
13
13
  } from "@ai-sdk/provider-utils";
14
- import { z as z3 } from "zod";
14
+ import { z as z5 } from "zod/v4";
15
15
 
16
16
  // src/convert-to-openai-chat-messages.ts
17
17
  import {
@@ -147,7 +147,7 @@ function convertToOpenAIChatMessages({
147
147
  type: "function",
148
148
  function: {
149
149
  name: part.toolName,
150
- arguments: JSON.stringify(part.args)
150
+ arguments: JSON.stringify(part.input)
151
151
  }
152
152
  });
153
153
  break;
@@ -163,10 +163,23 @@ function convertToOpenAIChatMessages({
163
163
  }
164
164
  case "tool": {
165
165
  for (const toolResponse of content) {
166
+ const output = toolResponse.output;
167
+ let contentValue;
168
+ switch (output.type) {
169
+ case "text":
170
+ case "error-text":
171
+ contentValue = output.value;
172
+ break;
173
+ case "content":
174
+ case "json":
175
+ case "error-json":
176
+ contentValue = JSON.stringify(output.value);
177
+ break;
178
+ }
166
179
  messages.push({
167
180
  role: "tool",
168
181
  tool_call_id: toolResponse.toolCallId,
169
- content: JSON.stringify(toolResponse.result)
182
+ content: contentValue
170
183
  });
171
184
  }
172
185
  break;
@@ -211,7 +224,7 @@ function mapOpenAIFinishReason(finishReason) {
211
224
  }
212
225
 
213
226
  // src/openai-chat-options.ts
214
- import { z } from "zod";
227
+ import { z } from "zod/v4";
215
228
  var openaiProviderOptions = z.object({
216
229
  /**
217
230
  * Modify the likelihood of specified tokens appearing in the completion.
@@ -254,21 +267,34 @@ var openaiProviderOptions = z.object({
254
267
  /**
255
268
  * Metadata to associate with the request.
256
269
  */
257
- metadata: z.record(z.string()).optional(),
270
+ metadata: z.record(z.string().max(64), z.string().max(512)).optional(),
258
271
  /**
259
272
  * Parameters for prediction mode.
260
273
  */
261
- prediction: z.record(z.any()).optional(),
274
+ prediction: z.record(z.string(), z.any()).optional(),
262
275
  /**
263
276
  * Whether to use structured outputs.
264
277
  *
265
278
  * @default true
266
279
  */
267
- structuredOutputs: z.boolean().optional()
280
+ structuredOutputs: z.boolean().optional(),
281
+ /**
282
+ * Service tier for the request. Set to 'flex' for 50% cheaper processing
283
+ * at the cost of increased latency. Only available for o3 and o4-mini models.
284
+ *
285
+ * @default 'auto'
286
+ */
287
+ serviceTier: z.enum(["auto", "flex"]).optional(),
288
+ /**
289
+ * Whether to use strict JSON schema validation.
290
+ *
291
+ * @default false
292
+ */
293
+ strictJsonSchema: z.boolean().optional()
268
294
  });
269
295
 
270
296
  // src/openai-error.ts
271
- import { z as z2 } from "zod";
297
+ import { z as z2 } from "zod/v4";
272
298
  import { createJsonErrorResponseHandler } from "@ai-sdk/provider-utils";
273
299
  var openaiErrorDataSchema = z2.object({
274
300
  error: z2.object({
@@ -290,10 +316,81 @@ var openaiFailedResponseHandler = createJsonErrorResponseHandler({
290
316
  import {
291
317
  UnsupportedFunctionalityError as UnsupportedFunctionalityError2
292
318
  } from "@ai-sdk/provider";
319
+
320
+ // src/tool/file-search.ts
321
+ import { createProviderDefinedToolFactory } from "@ai-sdk/provider-utils";
322
+ import { z as z3 } from "zod/v4";
323
+ var fileSearchArgsSchema = z3.object({
324
+ /**
325
+ * List of vector store IDs to search through. If not provided, searches all available vector stores.
326
+ */
327
+ vectorStoreIds: z3.array(z3.string()).optional(),
328
+ /**
329
+ * Maximum number of search results to return. Defaults to 10.
330
+ */
331
+ maxResults: z3.number().optional(),
332
+ /**
333
+ * Type of search to perform. Defaults to 'auto'.
334
+ */
335
+ searchType: z3.enum(["auto", "keyword", "semantic"]).optional()
336
+ });
337
+ var fileSearch = createProviderDefinedToolFactory({
338
+ id: "openai.file_search",
339
+ name: "file_search",
340
+ inputSchema: z3.object({
341
+ query: z3.string()
342
+ })
343
+ });
344
+
345
+ // src/tool/web-search-preview.ts
346
+ import { createProviderDefinedToolFactory as createProviderDefinedToolFactory2 } from "@ai-sdk/provider-utils";
347
+ import { z as z4 } from "zod/v4";
348
+ var webSearchPreviewArgsSchema = z4.object({
349
+ /**
350
+ * Search context size to use for the web search.
351
+ * - high: Most comprehensive context, highest cost, slower response
352
+ * - medium: Balanced context, cost, and latency (default)
353
+ * - low: Least context, lowest cost, fastest response
354
+ */
355
+ searchContextSize: z4.enum(["low", "medium", "high"]).optional(),
356
+ /**
357
+ * User location information to provide geographically relevant search results.
358
+ */
359
+ userLocation: z4.object({
360
+ /**
361
+ * Type of location (always 'approximate')
362
+ */
363
+ type: z4.literal("approximate"),
364
+ /**
365
+ * Two-letter ISO country code (e.g., 'US', 'GB')
366
+ */
367
+ country: z4.string().optional(),
368
+ /**
369
+ * City name (free text, e.g., 'Minneapolis')
370
+ */
371
+ city: z4.string().optional(),
372
+ /**
373
+ * Region name (free text, e.g., 'Minnesota')
374
+ */
375
+ region: z4.string().optional(),
376
+ /**
377
+ * IANA timezone (e.g., 'America/Chicago')
378
+ */
379
+ timezone: z4.string().optional()
380
+ }).optional()
381
+ });
382
+ var webSearchPreview = createProviderDefinedToolFactory2({
383
+ id: "openai.web_search_preview",
384
+ name: "web_search_preview",
385
+ inputSchema: z4.object({})
386
+ });
387
+
388
+ // src/openai-prepare-tools.ts
293
389
  function prepareTools({
294
390
  tools,
295
391
  toolChoice,
296
- structuredOutputs
392
+ structuredOutputs,
393
+ strictJsonSchema
297
394
  }) {
298
395
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
299
396
  const toolWarnings = [];
@@ -302,18 +399,47 @@ function prepareTools({
302
399
  }
303
400
  const openaiTools = [];
304
401
  for (const tool of tools) {
305
- if (tool.type === "provider-defined") {
306
- toolWarnings.push({ type: "unsupported-tool", tool });
307
- } else {
308
- openaiTools.push({
309
- type: "function",
310
- function: {
311
- name: tool.name,
312
- description: tool.description,
313
- parameters: tool.parameters,
314
- strict: structuredOutputs ? true : void 0
402
+ switch (tool.type) {
403
+ case "function":
404
+ openaiTools.push({
405
+ type: "function",
406
+ function: {
407
+ name: tool.name,
408
+ description: tool.description,
409
+ parameters: tool.inputSchema,
410
+ strict: structuredOutputs ? strictJsonSchema : void 0
411
+ }
412
+ });
413
+ break;
414
+ case "provider-defined":
415
+ switch (tool.id) {
416
+ case "openai.file_search": {
417
+ const args = fileSearchArgsSchema.parse(tool.args);
418
+ openaiTools.push({
419
+ type: "file_search",
420
+ vector_store_ids: args.vectorStoreIds,
421
+ max_results: args.maxResults,
422
+ search_type: args.searchType
423
+ });
424
+ break;
425
+ }
426
+ case "openai.web_search_preview": {
427
+ const args = webSearchPreviewArgsSchema.parse(tool.args);
428
+ openaiTools.push({
429
+ type: "web_search_preview",
430
+ search_context_size: args.searchContextSize,
431
+ user_location: args.userLocation
432
+ });
433
+ break;
434
+ }
435
+ default:
436
+ toolWarnings.push({ type: "unsupported-tool", tool });
437
+ break;
315
438
  }
316
- });
439
+ break;
440
+ default:
441
+ toolWarnings.push({ type: "unsupported-tool", tool });
442
+ break;
317
443
  }
318
444
  }
319
445
  if (toolChoice == null) {
@@ -373,7 +499,7 @@ var OpenAIChatLanguageModel = class {
373
499
  toolChoice,
374
500
  providerOptions
375
501
  }) {
376
- var _a, _b, _c;
502
+ var _a, _b, _c, _d;
377
503
  const warnings = [];
378
504
  const openaiOptions = (_a = await parseProviderOptions({
379
505
  provider: "openai",
@@ -401,6 +527,7 @@ var OpenAIChatLanguageModel = class {
401
527
  }
402
528
  );
403
529
  warnings.push(...messageWarnings);
530
+ const strictJsonSchema = (_c = openaiOptions.strictJsonSchema) != null ? _c : false;
404
531
  const baseArgs = {
405
532
  // model id:
406
533
  model: this.modelId,
@@ -416,18 +543,15 @@ var OpenAIChatLanguageModel = class {
416
543
  top_p: topP,
417
544
  frequency_penalty: frequencyPenalty,
418
545
  presence_penalty: presencePenalty,
419
- response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? (
420
- // TODO convert into provider option
421
- structuredOutputs && responseFormat.schema != null ? {
422
- type: "json_schema",
423
- json_schema: {
424
- schema: responseFormat.schema,
425
- strict: true,
426
- name: (_c = responseFormat.name) != null ? _c : "response",
427
- description: responseFormat.description
428
- }
429
- } : { type: "json_object" }
430
- ) : void 0,
546
+ response_format: (responseFormat == null ? void 0 : responseFormat.type) === "json" ? structuredOutputs && responseFormat.schema != null ? {
547
+ type: "json_schema",
548
+ json_schema: {
549
+ schema: responseFormat.schema,
550
+ strict: strictJsonSchema,
551
+ name: (_d = responseFormat.name) != null ? _d : "response",
552
+ description: responseFormat.description
553
+ }
554
+ } : { type: "json_object" } : void 0,
431
555
  stop: stopSequences,
432
556
  seed,
433
557
  // openai specific settings:
@@ -437,6 +561,7 @@ var OpenAIChatLanguageModel = class {
437
561
  metadata: openaiOptions.metadata,
438
562
  prediction: openaiOptions.prediction,
439
563
  reasoning_effort: openaiOptions.reasoningEffort,
564
+ service_tier: openaiOptions.serviceTier,
440
565
  // messages:
441
566
  messages
442
567
  };
@@ -510,6 +635,14 @@ var OpenAIChatLanguageModel = class {
510
635
  });
511
636
  }
512
637
  }
638
+ if (openaiOptions.serviceTier === "flex" && !supportsFlexProcessing(this.modelId)) {
639
+ warnings.push({
640
+ type: "unsupported-setting",
641
+ setting: "serviceTier",
642
+ details: "flex processing is only available for o3 and o4-mini models"
643
+ });
644
+ baseArgs.service_tier = void 0;
645
+ }
513
646
  const {
514
647
  tools: openaiTools,
515
648
  toolChoice: openaiToolChoice,
@@ -517,7 +650,8 @@ var OpenAIChatLanguageModel = class {
517
650
  } = prepareTools({
518
651
  tools,
519
652
  toolChoice,
520
- structuredOutputs
653
+ structuredOutputs,
654
+ strictJsonSchema
521
655
  });
522
656
  return {
523
657
  args: {
@@ -558,10 +692,9 @@ var OpenAIChatLanguageModel = class {
558
692
  for (const toolCall of (_a = choice.message.tool_calls) != null ? _a : []) {
559
693
  content.push({
560
694
  type: "tool-call",
561
- toolCallType: "function",
562
695
  toolCallId: (_b = toolCall.id) != null ? _b : generateId(),
563
696
  toolName: toolCall.function.name,
564
- args: toolCall.function.arguments
697
+ input: toolCall.function.arguments
565
698
  });
566
699
  }
567
700
  const completionTokenDetails = (_c = response.usage) == null ? void 0 : _c.completion_tokens_details;
@@ -627,6 +760,7 @@ var OpenAIChatLanguageModel = class {
627
760
  totalTokens: void 0
628
761
  };
629
762
  let isFirstChunk = true;
763
+ let isActiveText = false;
630
764
  const providerMetadata = { openai: {} };
631
765
  return {
632
766
  stream: response.pipeThrough(
@@ -636,6 +770,9 @@ var OpenAIChatLanguageModel = class {
636
770
  },
637
771
  transform(chunk, controller) {
638
772
  var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
773
+ if (options.includeRawChunks) {
774
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
775
+ }
639
776
  if (!chunk.success) {
640
777
  finishReason = "error";
641
778
  controller.enqueue({ type: "error", error: chunk.error });
@@ -679,9 +816,14 @@ var OpenAIChatLanguageModel = class {
679
816
  }
680
817
  const delta = choice.delta;
681
818
  if (delta.content != null) {
819
+ if (!isActiveText) {
820
+ controller.enqueue({ type: "text-start", id: "0" });
821
+ isActiveText = true;
822
+ }
682
823
  controller.enqueue({
683
- type: "text",
684
- text: delta.content
824
+ type: "text-delta",
825
+ id: "0",
826
+ delta: delta.content
685
827
  });
686
828
  }
687
829
  if (delta.tool_calls != null) {
@@ -706,6 +848,11 @@ var OpenAIChatLanguageModel = class {
706
848
  message: `Expected 'function.name' to be a string.`
707
849
  });
708
850
  }
851
+ controller.enqueue({
852
+ type: "tool-input-start",
853
+ id: toolCallDelta.id,
854
+ toolName: toolCallDelta.function.name
855
+ });
709
856
  toolCalls[index] = {
710
857
  id: toolCallDelta.id,
711
858
  type: "function",
@@ -719,20 +866,21 @@ var OpenAIChatLanguageModel = class {
719
866
  if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
720
867
  if (toolCall2.function.arguments.length > 0) {
721
868
  controller.enqueue({
722
- type: "tool-call-delta",
723
- toolCallType: "function",
724
- toolCallId: toolCall2.id,
725
- toolName: toolCall2.function.name,
726
- argsTextDelta: toolCall2.function.arguments
869
+ type: "tool-input-delta",
870
+ id: toolCall2.id,
871
+ delta: toolCall2.function.arguments
727
872
  });
728
873
  }
729
874
  if (isParsableJson(toolCall2.function.arguments)) {
875
+ controller.enqueue({
876
+ type: "tool-input-end",
877
+ id: toolCall2.id
878
+ });
730
879
  controller.enqueue({
731
880
  type: "tool-call",
732
- toolCallType: "function",
733
881
  toolCallId: (_q = toolCall2.id) != null ? _q : generateId(),
734
882
  toolName: toolCall2.function.name,
735
- args: toolCall2.function.arguments
883
+ input: toolCall2.function.arguments
736
884
  });
737
885
  toolCall2.hasFinished = true;
738
886
  }
@@ -747,19 +895,20 @@ var OpenAIChatLanguageModel = class {
747
895
  toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
748
896
  }
749
897
  controller.enqueue({
750
- type: "tool-call-delta",
751
- toolCallType: "function",
752
- toolCallId: toolCall.id,
753
- toolName: toolCall.function.name,
754
- argsTextDelta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
898
+ type: "tool-input-delta",
899
+ id: toolCall.id,
900
+ delta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
755
901
  });
756
902
  if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && isParsableJson(toolCall.function.arguments)) {
903
+ controller.enqueue({
904
+ type: "tool-input-end",
905
+ id: toolCall.id
906
+ });
757
907
  controller.enqueue({
758
908
  type: "tool-call",
759
- toolCallType: "function",
760
909
  toolCallId: (_x = toolCall.id) != null ? _x : generateId(),
761
910
  toolName: toolCall.function.name,
762
- args: toolCall.function.arguments
911
+ input: toolCall.function.arguments
763
912
  });
764
913
  toolCall.hasFinished = true;
765
914
  }
@@ -767,6 +916,9 @@ var OpenAIChatLanguageModel = class {
767
916
  }
768
917
  },
769
918
  flush(controller) {
919
+ if (isActiveText) {
920
+ controller.enqueue({ type: "text-end", id: "0" });
921
+ }
770
922
  controller.enqueue({
771
923
  type: "finish",
772
924
  finishReason,
@@ -781,97 +933,97 @@ var OpenAIChatLanguageModel = class {
781
933
  };
782
934
  }
783
935
  };
784
- var openaiTokenUsageSchema = z3.object({
785
- prompt_tokens: z3.number().nullish(),
786
- completion_tokens: z3.number().nullish(),
787
- total_tokens: z3.number().nullish(),
788
- prompt_tokens_details: z3.object({
789
- cached_tokens: z3.number().nullish()
936
+ var openaiTokenUsageSchema = z5.object({
937
+ prompt_tokens: z5.number().nullish(),
938
+ completion_tokens: z5.number().nullish(),
939
+ total_tokens: z5.number().nullish(),
940
+ prompt_tokens_details: z5.object({
941
+ cached_tokens: z5.number().nullish()
790
942
  }).nullish(),
791
- completion_tokens_details: z3.object({
792
- reasoning_tokens: z3.number().nullish(),
793
- accepted_prediction_tokens: z3.number().nullish(),
794
- rejected_prediction_tokens: z3.number().nullish()
943
+ completion_tokens_details: z5.object({
944
+ reasoning_tokens: z5.number().nullish(),
945
+ accepted_prediction_tokens: z5.number().nullish(),
946
+ rejected_prediction_tokens: z5.number().nullish()
795
947
  }).nullish()
796
948
  }).nullish();
797
- var openaiChatResponseSchema = z3.object({
798
- id: z3.string().nullish(),
799
- created: z3.number().nullish(),
800
- model: z3.string().nullish(),
801
- choices: z3.array(
802
- z3.object({
803
- message: z3.object({
804
- role: z3.literal("assistant").nullish(),
805
- content: z3.string().nullish(),
806
- tool_calls: z3.array(
807
- z3.object({
808
- id: z3.string().nullish(),
809
- type: z3.literal("function"),
810
- function: z3.object({
811
- name: z3.string(),
812
- arguments: z3.string()
949
+ var openaiChatResponseSchema = z5.object({
950
+ id: z5.string().nullish(),
951
+ created: z5.number().nullish(),
952
+ model: z5.string().nullish(),
953
+ choices: z5.array(
954
+ z5.object({
955
+ message: z5.object({
956
+ role: z5.literal("assistant").nullish(),
957
+ content: z5.string().nullish(),
958
+ tool_calls: z5.array(
959
+ z5.object({
960
+ id: z5.string().nullish(),
961
+ type: z5.literal("function"),
962
+ function: z5.object({
963
+ name: z5.string(),
964
+ arguments: z5.string()
813
965
  })
814
966
  })
815
967
  ).nullish()
816
968
  }),
817
- index: z3.number(),
818
- logprobs: z3.object({
819
- content: z3.array(
820
- z3.object({
821
- token: z3.string(),
822
- logprob: z3.number(),
823
- top_logprobs: z3.array(
824
- z3.object({
825
- token: z3.string(),
826
- logprob: z3.number()
969
+ index: z5.number(),
970
+ logprobs: z5.object({
971
+ content: z5.array(
972
+ z5.object({
973
+ token: z5.string(),
974
+ logprob: z5.number(),
975
+ top_logprobs: z5.array(
976
+ z5.object({
977
+ token: z5.string(),
978
+ logprob: z5.number()
827
979
  })
828
980
  )
829
981
  })
830
982
  ).nullish()
831
983
  }).nullish(),
832
- finish_reason: z3.string().nullish()
984
+ finish_reason: z5.string().nullish()
833
985
  })
834
986
  ),
835
987
  usage: openaiTokenUsageSchema
836
988
  });
837
- var openaiChatChunkSchema = z3.union([
838
- z3.object({
839
- id: z3.string().nullish(),
840
- created: z3.number().nullish(),
841
- model: z3.string().nullish(),
842
- choices: z3.array(
843
- z3.object({
844
- delta: z3.object({
845
- role: z3.enum(["assistant"]).nullish(),
846
- content: z3.string().nullish(),
847
- tool_calls: z3.array(
848
- z3.object({
849
- index: z3.number(),
850
- id: z3.string().nullish(),
851
- type: z3.literal("function").nullish(),
852
- function: z3.object({
853
- name: z3.string().nullish(),
854
- arguments: z3.string().nullish()
989
+ var openaiChatChunkSchema = z5.union([
990
+ z5.object({
991
+ id: z5.string().nullish(),
992
+ created: z5.number().nullish(),
993
+ model: z5.string().nullish(),
994
+ choices: z5.array(
995
+ z5.object({
996
+ delta: z5.object({
997
+ role: z5.enum(["assistant"]).nullish(),
998
+ content: z5.string().nullish(),
999
+ tool_calls: z5.array(
1000
+ z5.object({
1001
+ index: z5.number(),
1002
+ id: z5.string().nullish(),
1003
+ type: z5.literal("function").nullish(),
1004
+ function: z5.object({
1005
+ name: z5.string().nullish(),
1006
+ arguments: z5.string().nullish()
855
1007
  })
856
1008
  })
857
1009
  ).nullish()
858
1010
  }).nullish(),
859
- logprobs: z3.object({
860
- content: z3.array(
861
- z3.object({
862
- token: z3.string(),
863
- logprob: z3.number(),
864
- top_logprobs: z3.array(
865
- z3.object({
866
- token: z3.string(),
867
- logprob: z3.number()
1011
+ logprobs: z5.object({
1012
+ content: z5.array(
1013
+ z5.object({
1014
+ token: z5.string(),
1015
+ logprob: z5.number(),
1016
+ top_logprobs: z5.array(
1017
+ z5.object({
1018
+ token: z5.string(),
1019
+ logprob: z5.number()
868
1020
  })
869
1021
  )
870
1022
  })
871
1023
  ).nullish()
872
1024
  }).nullish(),
873
- finish_reason: z3.string().nullish(),
874
- index: z3.number()
1025
+ finish_reason: z5.string().nullish(),
1026
+ index: z5.number()
875
1027
  })
876
1028
  ),
877
1029
  usage: openaiTokenUsageSchema
@@ -881,6 +1033,9 @@ var openaiChatChunkSchema = z3.union([
881
1033
  function isReasoningModel(modelId) {
882
1034
  return modelId.startsWith("o");
883
1035
  }
1036
+ function supportsFlexProcessing(modelId) {
1037
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
1038
+ }
884
1039
  function getSystemMessageMode(modelId) {
885
1040
  var _a, _b;
886
1041
  if (!isReasoningModel(modelId)) {
@@ -929,7 +1084,7 @@ import {
929
1084
  parseProviderOptions as parseProviderOptions2,
930
1085
  postJsonToApi as postJsonToApi2
931
1086
  } from "@ai-sdk/provider-utils";
932
- import { z as z5 } from "zod";
1087
+ import { z as z7 } from "zod/v4";
933
1088
 
934
1089
  // src/convert-to-openai-completion-prompt.ts
935
1090
  import {
@@ -1010,12 +1165,12 @@ ${user}:`]
1010
1165
  }
1011
1166
 
1012
1167
  // src/openai-completion-options.ts
1013
- import { z as z4 } from "zod";
1014
- var openaiCompletionProviderOptions = z4.object({
1168
+ import { z as z6 } from "zod/v4";
1169
+ var openaiCompletionProviderOptions = z6.object({
1015
1170
  /**
1016
1171
  Echo back the prompt in addition to the completion.
1017
1172
  */
1018
- echo: z4.boolean().optional(),
1173
+ echo: z6.boolean().optional(),
1019
1174
  /**
1020
1175
  Modify the likelihood of specified tokens appearing in the completion.
1021
1176
 
@@ -1030,16 +1185,16 @@ var openaiCompletionProviderOptions = z4.object({
1030
1185
  As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
1031
1186
  token from being generated.
1032
1187
  */
1033
- logitBias: z4.record(z4.string(), z4.number()).optional(),
1188
+ logitBias: z6.record(z6.string(), z6.number()).optional(),
1034
1189
  /**
1035
1190
  The suffix that comes after a completion of inserted text.
1036
1191
  */
1037
- suffix: z4.string().optional(),
1192
+ suffix: z6.string().optional(),
1038
1193
  /**
1039
1194
  A unique identifier representing your end-user, which can help OpenAI to
1040
1195
  monitor and detect abuse. Learn more.
1041
1196
  */
1042
- user: z4.string().optional(),
1197
+ user: z6.string().optional(),
1043
1198
  /**
1044
1199
  Return the log probabilities of the tokens. Including logprobs will increase
1045
1200
  the response size and can slow down response times. However, it can
@@ -1049,7 +1204,7 @@ var openaiCompletionProviderOptions = z4.object({
1049
1204
  Setting to a number will return the log probabilities of the top n
1050
1205
  tokens that were generated.
1051
1206
  */
1052
- logprobs: z4.union([z4.boolean(), z4.number()]).optional()
1207
+ logprobs: z6.union([z6.boolean(), z6.number()]).optional()
1053
1208
  });
1054
1209
 
1055
1210
  // src/openai-completion-language-model.ts
@@ -1221,6 +1376,9 @@ var OpenAICompletionLanguageModel = class {
1221
1376
  controller.enqueue({ type: "stream-start", warnings });
1222
1377
  },
1223
1378
  transform(chunk, controller) {
1379
+ if (options.includeRawChunks) {
1380
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
1381
+ }
1224
1382
  if (!chunk.success) {
1225
1383
  finishReason = "error";
1226
1384
  controller.enqueue({ type: "error", error: chunk.error });
@@ -1238,6 +1396,7 @@ var OpenAICompletionLanguageModel = class {
1238
1396
  type: "response-metadata",
1239
1397
  ...getResponseMetadata(value)
1240
1398
  });
1399
+ controller.enqueue({ type: "text-start", id: "0" });
1241
1400
  }
1242
1401
  if (value.usage != null) {
1243
1402
  usage.inputTokens = value.usage.prompt_tokens;
@@ -1251,14 +1410,18 @@ var OpenAICompletionLanguageModel = class {
1251
1410
  if ((choice == null ? void 0 : choice.logprobs) != null) {
1252
1411
  providerMetadata.openai.logprobs = choice.logprobs;
1253
1412
  }
1254
- if ((choice == null ? void 0 : choice.text) != null) {
1413
+ if ((choice == null ? void 0 : choice.text) != null && choice.text.length > 0) {
1255
1414
  controller.enqueue({
1256
- type: "text",
1257
- text: choice.text
1415
+ type: "text-delta",
1416
+ id: "0",
1417
+ delta: choice.text
1258
1418
  });
1259
1419
  }
1260
1420
  },
1261
1421
  flush(controller) {
1422
+ if (!isFirstChunk) {
1423
+ controller.enqueue({ type: "text-end", id: "0" });
1424
+ }
1262
1425
  controller.enqueue({
1263
1426
  type: "finish",
1264
1427
  finishReason,
@@ -1273,42 +1436,42 @@ var OpenAICompletionLanguageModel = class {
1273
1436
  };
1274
1437
  }
1275
1438
  };
1276
- var usageSchema = z5.object({
1277
- prompt_tokens: z5.number(),
1278
- completion_tokens: z5.number(),
1279
- total_tokens: z5.number()
1439
+ var usageSchema = z7.object({
1440
+ prompt_tokens: z7.number(),
1441
+ completion_tokens: z7.number(),
1442
+ total_tokens: z7.number()
1280
1443
  });
1281
- var openaiCompletionResponseSchema = z5.object({
1282
- id: z5.string().nullish(),
1283
- created: z5.number().nullish(),
1284
- model: z5.string().nullish(),
1285
- choices: z5.array(
1286
- z5.object({
1287
- text: z5.string(),
1288
- finish_reason: z5.string(),
1289
- logprobs: z5.object({
1290
- tokens: z5.array(z5.string()),
1291
- token_logprobs: z5.array(z5.number()),
1292
- top_logprobs: z5.array(z5.record(z5.string(), z5.number())).nullish()
1444
+ var openaiCompletionResponseSchema = z7.object({
1445
+ id: z7.string().nullish(),
1446
+ created: z7.number().nullish(),
1447
+ model: z7.string().nullish(),
1448
+ choices: z7.array(
1449
+ z7.object({
1450
+ text: z7.string(),
1451
+ finish_reason: z7.string(),
1452
+ logprobs: z7.object({
1453
+ tokens: z7.array(z7.string()),
1454
+ token_logprobs: z7.array(z7.number()),
1455
+ top_logprobs: z7.array(z7.record(z7.string(), z7.number())).nullish()
1293
1456
  }).nullish()
1294
1457
  })
1295
1458
  ),
1296
1459
  usage: usageSchema.nullish()
1297
1460
  });
1298
- var openaiCompletionChunkSchema = z5.union([
1299
- z5.object({
1300
- id: z5.string().nullish(),
1301
- created: z5.number().nullish(),
1302
- model: z5.string().nullish(),
1303
- choices: z5.array(
1304
- z5.object({
1305
- text: z5.string(),
1306
- finish_reason: z5.string().nullish(),
1307
- index: z5.number(),
1308
- logprobs: z5.object({
1309
- tokens: z5.array(z5.string()),
1310
- token_logprobs: z5.array(z5.number()),
1311
- top_logprobs: z5.array(z5.record(z5.string(), z5.number())).nullish()
1461
+ var openaiCompletionChunkSchema = z7.union([
1462
+ z7.object({
1463
+ id: z7.string().nullish(),
1464
+ created: z7.number().nullish(),
1465
+ model: z7.string().nullish(),
1466
+ choices: z7.array(
1467
+ z7.object({
1468
+ text: z7.string(),
1469
+ finish_reason: z7.string().nullish(),
1470
+ index: z7.number(),
1471
+ logprobs: z7.object({
1472
+ tokens: z7.array(z7.string()),
1473
+ token_logprobs: z7.array(z7.number()),
1474
+ top_logprobs: z7.array(z7.record(z7.string(), z7.number())).nullish()
1312
1475
  }).nullish()
1313
1476
  })
1314
1477
  ),
@@ -1327,21 +1490,21 @@ import {
1327
1490
  parseProviderOptions as parseProviderOptions3,
1328
1491
  postJsonToApi as postJsonToApi3
1329
1492
  } from "@ai-sdk/provider-utils";
1330
- import { z as z7 } from "zod";
1493
+ import { z as z9 } from "zod/v4";
1331
1494
 
1332
1495
  // src/openai-embedding-options.ts
1333
- import { z as z6 } from "zod";
1334
- var openaiEmbeddingProviderOptions = z6.object({
1496
+ import { z as z8 } from "zod/v4";
1497
+ var openaiEmbeddingProviderOptions = z8.object({
1335
1498
  /**
1336
1499
  The number of dimensions the resulting output embeddings should have.
1337
1500
  Only supported in text-embedding-3 and later models.
1338
1501
  */
1339
- dimensions: z6.number().optional(),
1502
+ dimensions: z8.number().optional(),
1340
1503
  /**
1341
1504
  A unique identifier representing your end-user, which can help OpenAI to
1342
1505
  monitor and detect abuse. Learn more.
1343
1506
  */
1344
- user: z6.string().optional()
1507
+ user: z8.string().optional()
1345
1508
  });
1346
1509
 
1347
1510
  // src/openai-embedding-model.ts
@@ -1407,9 +1570,9 @@ var OpenAIEmbeddingModel = class {
1407
1570
  };
1408
1571
  }
1409
1572
  };
1410
- var openaiTextEmbeddingResponseSchema = z7.object({
1411
- data: z7.array(z7.object({ embedding: z7.array(z7.number()) })),
1412
- usage: z7.object({ prompt_tokens: z7.number() }).nullish()
1573
+ var openaiTextEmbeddingResponseSchema = z9.object({
1574
+ data: z9.array(z9.object({ embedding: z9.array(z9.number()) })),
1575
+ usage: z9.object({ prompt_tokens: z9.number() }).nullish()
1413
1576
  });
1414
1577
 
1415
1578
  // src/openai-image-model.ts
@@ -1418,7 +1581,7 @@ import {
1418
1581
  createJsonResponseHandler as createJsonResponseHandler4,
1419
1582
  postJsonToApi as postJsonToApi4
1420
1583
  } from "@ai-sdk/provider-utils";
1421
- import { z as z8 } from "zod";
1584
+ import { z as z10 } from "zod/v4";
1422
1585
 
1423
1586
  // src/openai-image-settings.ts
1424
1587
  var modelMaxImagesPerCall = {
@@ -1506,9 +1669,9 @@ var OpenAIImageModel = class {
1506
1669
  };
1507
1670
  }
1508
1671
  };
1509
- var openaiImageResponseSchema = z8.object({
1510
- data: z8.array(
1511
- z8.object({ b64_json: z8.string(), revised_prompt: z8.string().optional() })
1672
+ var openaiImageResponseSchema = z10.object({
1673
+ data: z10.array(
1674
+ z10.object({ b64_json: z10.string(), revised_prompt: z10.string().optional() })
1512
1675
  )
1513
1676
  });
1514
1677
 
@@ -1520,33 +1683,33 @@ import {
1520
1683
  parseProviderOptions as parseProviderOptions4,
1521
1684
  postFormDataToApi
1522
1685
  } from "@ai-sdk/provider-utils";
1523
- import { z as z10 } from "zod";
1686
+ import { z as z12 } from "zod/v4";
1524
1687
 
1525
1688
  // src/openai-transcription-options.ts
1526
- import { z as z9 } from "zod";
1527
- var openAITranscriptionProviderOptions = z9.object({
1689
+ import { z as z11 } from "zod/v4";
1690
+ var openAITranscriptionProviderOptions = z11.object({
1528
1691
  /**
1529
1692
  * Additional information to include in the transcription response.
1530
1693
  */
1531
- include: z9.array(z9.string()).optional(),
1694
+ include: z11.array(z11.string()).optional(),
1532
1695
  /**
1533
1696
  * The language of the input audio in ISO-639-1 format.
1534
1697
  */
1535
- language: z9.string().optional(),
1698
+ language: z11.string().optional(),
1536
1699
  /**
1537
1700
  * An optional text to guide the model's style or continue a previous audio segment.
1538
1701
  */
1539
- prompt: z9.string().optional(),
1702
+ prompt: z11.string().optional(),
1540
1703
  /**
1541
1704
  * The sampling temperature, between 0 and 1.
1542
1705
  * @default 0
1543
1706
  */
1544
- temperature: z9.number().min(0).max(1).default(0).optional(),
1707
+ temperature: z11.number().min(0).max(1).default(0).optional(),
1545
1708
  /**
1546
1709
  * The timestamp granularities to populate for this transcription.
1547
1710
  * @default ['segment']
1548
1711
  */
1549
- timestampGranularities: z9.array(z9.enum(["word", "segment"])).default(["segment"]).optional()
1712
+ timestampGranularities: z11.array(z11.enum(["word", "segment"])).default(["segment"]).optional()
1550
1713
  });
1551
1714
 
1552
1715
  // src/openai-transcription-model.ts
@@ -1613,7 +1776,7 @@ var OpenAITranscriptionModel = class {
1613
1776
  constructor(modelId, config) {
1614
1777
  this.modelId = modelId;
1615
1778
  this.config = config;
1616
- this.specificationVersion = "v1";
1779
+ this.specificationVersion = "v2";
1617
1780
  }
1618
1781
  get provider() {
1619
1782
  return this.config.provider;
@@ -1694,15 +1857,15 @@ var OpenAITranscriptionModel = class {
1694
1857
  };
1695
1858
  }
1696
1859
  };
1697
- var openaiTranscriptionResponseSchema = z10.object({
1698
- text: z10.string(),
1699
- language: z10.string().nullish(),
1700
- duration: z10.number().nullish(),
1701
- words: z10.array(
1702
- z10.object({
1703
- word: z10.string(),
1704
- start: z10.number(),
1705
- end: z10.number()
1860
+ var openaiTranscriptionResponseSchema = z12.object({
1861
+ text: z12.string(),
1862
+ language: z12.string().nullish(),
1863
+ duration: z12.number().nullish(),
1864
+ words: z12.array(
1865
+ z12.object({
1866
+ word: z12.string(),
1867
+ start: z12.number(),
1868
+ end: z12.number()
1706
1869
  })
1707
1870
  ).nullish()
1708
1871
  });
@@ -1714,16 +1877,16 @@ import {
1714
1877
  parseProviderOptions as parseProviderOptions5,
1715
1878
  postJsonToApi as postJsonToApi5
1716
1879
  } from "@ai-sdk/provider-utils";
1717
- import { z as z11 } from "zod";
1718
- var OpenAIProviderOptionsSchema = z11.object({
1719
- instructions: z11.string().nullish(),
1720
- speed: z11.number().min(0.25).max(4).default(1).nullish()
1880
+ import { z as z13 } from "zod/v4";
1881
+ var OpenAIProviderOptionsSchema = z13.object({
1882
+ instructions: z13.string().nullish(),
1883
+ speed: z13.number().min(0.25).max(4).default(1).nullish()
1721
1884
  });
1722
1885
  var OpenAISpeechModel = class {
1723
1886
  constructor(modelId, config) {
1724
1887
  this.modelId = modelId;
1725
1888
  this.config = config;
1726
- this.specificationVersion = "v1";
1889
+ this.specificationVersion = "v2";
1727
1890
  }
1728
1891
  get provider() {
1729
1892
  return this.config.provider;
@@ -1734,6 +1897,7 @@ var OpenAISpeechModel = class {
1734
1897
  outputFormat = "mp3",
1735
1898
  speed,
1736
1899
  instructions,
1900
+ language,
1737
1901
  providerOptions
1738
1902
  }) {
1739
1903
  const warnings = [];
@@ -1770,6 +1934,13 @@ var OpenAISpeechModel = class {
1770
1934
  }
1771
1935
  }
1772
1936
  }
1937
+ if (language) {
1938
+ warnings.push({
1939
+ type: "unsupported-setting",
1940
+ setting: "language",
1941
+ details: `OpenAI speech models do not support language selection. Language parameter "${language}" was ignored.`
1942
+ });
1943
+ }
1773
1944
  return {
1774
1945
  requestBody,
1775
1946
  warnings
@@ -1812,24 +1983,30 @@ var OpenAISpeechModel = class {
1812
1983
  };
1813
1984
 
1814
1985
  // src/responses/openai-responses-language-model.ts
1986
+ import {
1987
+ APICallError
1988
+ } from "@ai-sdk/provider";
1815
1989
  import {
1816
1990
  combineHeaders as combineHeaders7,
1817
1991
  createEventSourceResponseHandler as createEventSourceResponseHandler3,
1818
1992
  createJsonResponseHandler as createJsonResponseHandler6,
1819
1993
  generateId as generateId2,
1820
- parseProviderOptions as parseProviderOptions6,
1994
+ parseProviderOptions as parseProviderOptions7,
1821
1995
  postJsonToApi as postJsonToApi6
1822
1996
  } from "@ai-sdk/provider-utils";
1823
- import { z as z12 } from "zod";
1997
+ import { z as z15 } from "zod/v4";
1824
1998
 
1825
1999
  // src/responses/convert-to-openai-responses-messages.ts
1826
2000
  import {
1827
2001
  UnsupportedFunctionalityError as UnsupportedFunctionalityError4
1828
2002
  } from "@ai-sdk/provider";
1829
- function convertToOpenAIResponsesMessages({
2003
+ import { parseProviderOptions as parseProviderOptions6 } from "@ai-sdk/provider-utils";
2004
+ import { z as z14 } from "zod/v4";
2005
+ async function convertToOpenAIResponsesMessages({
1830
2006
  prompt,
1831
2007
  systemMessageMode
1832
2008
  }) {
2009
+ var _a, _b, _c, _d, _e, _f;
1833
2010
  const messages = [];
1834
2011
  const warnings = [];
1835
2012
  for (const { role, content } of prompt) {
@@ -1864,7 +2041,7 @@ function convertToOpenAIResponsesMessages({
1864
2041
  messages.push({
1865
2042
  role: "user",
1866
2043
  content: content.map((part, index) => {
1867
- var _a, _b, _c;
2044
+ var _a2, _b2, _c2;
1868
2045
  switch (part.type) {
1869
2046
  case "text": {
1870
2047
  return { type: "input_text", text: part.text };
@@ -1876,7 +2053,7 @@ function convertToOpenAIResponsesMessages({
1876
2053
  type: "input_image",
1877
2054
  image_url: part.data instanceof URL ? part.data.toString() : `data:${mediaType};base64,${part.data}`,
1878
2055
  // OpenAI specific extension: image detail
1879
- detail: (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.imageDetail
2056
+ detail: (_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.openai) == null ? void 0 : _b2.imageDetail
1880
2057
  };
1881
2058
  } else if (part.mediaType === "application/pdf") {
1882
2059
  if (part.data instanceof URL) {
@@ -1886,7 +2063,7 @@ function convertToOpenAIResponsesMessages({
1886
2063
  }
1887
2064
  return {
1888
2065
  type: "input_file",
1889
- filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
2066
+ filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
1890
2067
  file_data: `data:application/pdf;base64,${part.data}`
1891
2068
  };
1892
2069
  } else {
@@ -1901,34 +2078,97 @@ function convertToOpenAIResponsesMessages({
1901
2078
  break;
1902
2079
  }
1903
2080
  case "assistant": {
2081
+ const reasoningMessages = {};
1904
2082
  for (const part of content) {
1905
2083
  switch (part.type) {
1906
2084
  case "text": {
1907
2085
  messages.push({
1908
2086
  role: "assistant",
1909
- content: [{ type: "output_text", text: part.text }]
2087
+ content: [{ type: "output_text", text: part.text }],
2088
+ id: (_c = (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.itemId) != null ? _c : void 0
1910
2089
  });
1911
2090
  break;
1912
2091
  }
1913
2092
  case "tool-call": {
2093
+ if (part.providerExecuted) {
2094
+ break;
2095
+ }
1914
2096
  messages.push({
1915
2097
  type: "function_call",
1916
2098
  call_id: part.toolCallId,
1917
2099
  name: part.toolName,
1918
- arguments: JSON.stringify(part.args)
2100
+ arguments: JSON.stringify(part.input),
2101
+ id: (_f = (_e = (_d = part.providerOptions) == null ? void 0 : _d.openai) == null ? void 0 : _e.itemId) != null ? _f : void 0
2102
+ });
2103
+ break;
2104
+ }
2105
+ case "tool-result": {
2106
+ warnings.push({
2107
+ type: "other",
2108
+ message: `tool result parts in assistant messages are not supported for OpenAI responses`
1919
2109
  });
1920
2110
  break;
1921
2111
  }
2112
+ case "reasoning": {
2113
+ const providerOptions = await parseProviderOptions6({
2114
+ provider: "openai",
2115
+ providerOptions: part.providerOptions,
2116
+ schema: openaiResponsesReasoningProviderOptionsSchema
2117
+ });
2118
+ const reasoningId = providerOptions == null ? void 0 : providerOptions.itemId;
2119
+ if (reasoningId != null) {
2120
+ const existingReasoningMessage = reasoningMessages[reasoningId];
2121
+ const summaryParts = [];
2122
+ if (part.text.length > 0) {
2123
+ summaryParts.push({ type: "summary_text", text: part.text });
2124
+ } else if (existingReasoningMessage !== void 0) {
2125
+ warnings.push({
2126
+ type: "other",
2127
+ message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
2128
+ });
2129
+ }
2130
+ if (existingReasoningMessage === void 0) {
2131
+ reasoningMessages[reasoningId] = {
2132
+ type: "reasoning",
2133
+ id: reasoningId,
2134
+ encrypted_content: providerOptions == null ? void 0 : providerOptions.reasoningEncryptedContent,
2135
+ summary: summaryParts
2136
+ };
2137
+ messages.push(reasoningMessages[reasoningId]);
2138
+ } else {
2139
+ existingReasoningMessage.summary.push(...summaryParts);
2140
+ }
2141
+ } else {
2142
+ warnings.push({
2143
+ type: "other",
2144
+ message: `Non-OpenAI reasoning parts are not supported. Skipping reasoning part: ${JSON.stringify(part)}.`
2145
+ });
2146
+ }
2147
+ break;
2148
+ }
1922
2149
  }
1923
2150
  }
1924
2151
  break;
1925
2152
  }
1926
2153
  case "tool": {
1927
2154
  for (const part of content) {
2155
+ const output = part.output;
2156
+ let contentValue;
2157
+ switch (output.type) {
2158
+ case "text":
2159
+ case "error-text":
2160
+ contentValue = output.value;
2161
+ break;
2162
+ case "content":
2163
+ case "json":
2164
+ case "error-json":
2165
+ contentValue = JSON.stringify(output.value);
2166
+ break;
2167
+ }
1928
2168
  messages.push({
1929
2169
  type: "function_call_output",
1930
2170
  call_id: part.toolCallId,
1931
- output: JSON.stringify(part.result)
2171
+ output: contentValue
1932
2172
  });
1933
2173
  }
1934
2174
  break;
@@ -1941,6 +2181,10 @@ function convertToOpenAIResponsesMessages({
1941
2181
  }
1942
2182
  return { messages, warnings };
1943
2183
  }
2184
+ var openaiResponsesReasoningProviderOptionsSchema = z14.object({
2185
+ itemId: z14.string().nullish(),
2186
+ reasoningEncryptedContent: z14.string().nullish()
2187
+ });
1944
2188
 
1945
2189
  // src/responses/map-openai-responses-finish-reason.ts
1946
2190
  function mapOpenAIResponseFinishReason({
@@ -1967,7 +2211,7 @@ import {
1967
2211
  function prepareResponsesTools({
1968
2212
  tools,
1969
2213
  toolChoice,
1970
- strict
2214
+ strictJsonSchema
1971
2215
  }) {
1972
2216
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
1973
2217
  const toolWarnings = [];
@@ -1982,12 +2226,22 @@ function prepareResponsesTools({
1982
2226
  type: "function",
1983
2227
  name: tool.name,
1984
2228
  description: tool.description,
1985
- parameters: tool.parameters,
1986
- strict: strict ? true : void 0
2229
+ parameters: tool.inputSchema,
2230
+ strict: strictJsonSchema
1987
2231
  });
1988
2232
  break;
1989
2233
  case "provider-defined":
1990
2234
  switch (tool.id) {
2235
+ case "openai.file_search": {
2236
+ const args = fileSearchArgsSchema.parse(tool.args);
2237
+ openaiTools.push({
2238
+ type: "file_search",
2239
+ vector_store_ids: args.vectorStoreIds,
2240
+ max_results: args.maxResults,
2241
+ search_type: args.searchType
2242
+ });
2243
+ break;
2244
+ }
1991
2245
  case "openai.web_search_preview":
1992
2246
  openaiTools.push({
1993
2247
  type: "web_search_preview",
@@ -2017,7 +2271,7 @@ function prepareResponsesTools({
2017
2271
  case "tool":
2018
2272
  return {
2019
2273
  tools: openaiTools,
2020
- toolChoice: toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
2274
+ toolChoice: toolChoice.toolName === "file_search" ? { type: "file_search" } : toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
2021
2275
  toolWarnings
2022
2276
  };
2023
2277
  default: {
@@ -2081,17 +2335,17 @@ var OpenAIResponsesLanguageModel = class {
2081
2335
  if (stopSequences != null) {
2082
2336
  warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
2083
2337
  }
2084
- const { messages, warnings: messageWarnings } = convertToOpenAIResponsesMessages({
2338
+ const { messages, warnings: messageWarnings } = await convertToOpenAIResponsesMessages({
2085
2339
  prompt,
2086
2340
  systemMessageMode: modelConfig.systemMessageMode
2087
2341
  });
2088
2342
  warnings.push(...messageWarnings);
2089
- const openaiOptions = await parseProviderOptions6({
2343
+ const openaiOptions = await parseProviderOptions7({
2090
2344
  provider: "openai",
2091
2345
  providerOptions,
2092
2346
  schema: openaiResponsesProviderOptionsSchema
2093
2347
  });
2094
- const isStrict = (_a = openaiOptions == null ? void 0 : openaiOptions.strictSchemas) != null ? _a : true;
2348
+ const strictJsonSchema = (_a = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _a : false;
2095
2349
  const baseArgs = {
2096
2350
  model: this.modelId,
2097
2351
  input: messages,
@@ -2102,7 +2356,7 @@ var OpenAIResponsesLanguageModel = class {
2102
2356
  text: {
2103
2357
  format: responseFormat.schema != null ? {
2104
2358
  type: "json_schema",
2105
- strict: isStrict,
2359
+ strict: strictJsonSchema,
2106
2360
  name: (_b = responseFormat.name) != null ? _b : "response",
2107
2361
  description: responseFormat.description,
2108
2362
  schema: responseFormat.schema
@@ -2116,6 +2370,8 @@ var OpenAIResponsesLanguageModel = class {
2116
2370
  store: openaiOptions == null ? void 0 : openaiOptions.store,
2117
2371
  user: openaiOptions == null ? void 0 : openaiOptions.user,
2118
2372
  instructions: openaiOptions == null ? void 0 : openaiOptions.instructions,
2373
+ service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
2374
+ include: openaiOptions == null ? void 0 : openaiOptions.include,
2119
2375
  // model-specific settings:
2120
2376
  ...modelConfig.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
2121
2377
  reasoning: {
@@ -2148,6 +2404,29 @@ var OpenAIResponsesLanguageModel = class {
2148
2404
  details: "topP is not supported for reasoning models"
2149
2405
  });
2150
2406
  }
2407
+ } else {
2408
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null) {
2409
+ warnings.push({
2410
+ type: "unsupported-setting",
2411
+ setting: "reasoningEffort",
2412
+ details: "reasoningEffort is not supported for non-reasoning models"
2413
+ });
2414
+ }
2415
+ if ((openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) {
2416
+ warnings.push({
2417
+ type: "unsupported-setting",
2418
+ setting: "reasoningSummary",
2419
+ details: "reasoningSummary is not supported for non-reasoning models"
2420
+ });
2421
+ }
2422
+ }
2423
+ if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !supportsFlexProcessing2(this.modelId)) {
2424
+ warnings.push({
2425
+ type: "unsupported-setting",
2426
+ setting: "serviceTier",
2427
+ details: "flex processing is only available for o3 and o4-mini models"
2428
+ });
2429
+ delete baseArgs.service_tier;
2151
2430
  }
2152
2431
  const {
2153
2432
  tools: openaiTools,
@@ -2156,7 +2435,7 @@ var OpenAIResponsesLanguageModel = class {
2156
2435
  } = prepareResponsesTools({
2157
2436
  tools,
2158
2437
  toolChoice,
2159
- strict: isStrict
2438
+ strictJsonSchema
2160
2439
  });
2161
2440
  return {
2162
2441
  args: {
@@ -2168,97 +2447,137 @@ var OpenAIResponsesLanguageModel = class {
2168
2447
  };
2169
2448
  }
2170
2449
  async doGenerate(options) {
2171
- var _a, _b, _c, _d, _e, _f, _g, _h;
2450
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
2172
2451
  const { args: body, warnings } = await this.getArgs(options);
2452
+ const url = this.config.url({
2453
+ path: "/responses",
2454
+ modelId: this.modelId
2455
+ });
2173
2456
  const {
2174
2457
  responseHeaders,
2175
2458
  value: response,
2176
2459
  rawValue: rawResponse
2177
2460
  } = await postJsonToApi6({
2178
- url: this.config.url({
2179
- path: "/responses",
2180
- modelId: this.modelId
2181
- }),
2461
+ url,
2182
2462
  headers: combineHeaders7(this.config.headers(), options.headers),
2183
2463
  body,
2184
2464
  failedResponseHandler: openaiFailedResponseHandler,
2185
2465
  successfulResponseHandler: createJsonResponseHandler6(
2186
- z12.object({
2187
- id: z12.string(),
2188
- created_at: z12.number(),
2189
- model: z12.string(),
2190
- output: z12.array(
2191
- z12.discriminatedUnion("type", [
2192
- z12.object({
2193
- type: z12.literal("message"),
2194
- role: z12.literal("assistant"),
2195
- content: z12.array(
2196
- z12.object({
2197
- type: z12.literal("output_text"),
2198
- text: z12.string(),
2199
- annotations: z12.array(
2200
- z12.object({
2201
- type: z12.literal("url_citation"),
2202
- start_index: z12.number(),
2203
- end_index: z12.number(),
2204
- url: z12.string(),
2205
- title: z12.string()
2466
+ z15.object({
2467
+ id: z15.string(),
2468
+ created_at: z15.number(),
2469
+ error: z15.object({
2470
+ code: z15.string(),
2471
+ message: z15.string()
2472
+ }).nullish(),
2473
+ model: z15.string(),
2474
+ output: z15.array(
2475
+ z15.discriminatedUnion("type", [
2476
+ z15.object({
2477
+ type: z15.literal("message"),
2478
+ role: z15.literal("assistant"),
2479
+ id: z15.string(),
2480
+ content: z15.array(
2481
+ z15.object({
2482
+ type: z15.literal("output_text"),
2483
+ text: z15.string(),
2484
+ annotations: z15.array(
2485
+ z15.object({
2486
+ type: z15.literal("url_citation"),
2487
+ start_index: z15.number(),
2488
+ end_index: z15.number(),
2489
+ url: z15.string(),
2490
+ title: z15.string()
2206
2491
  })
2207
2492
  )
2208
2493
  })
2209
2494
  )
2210
2495
  }),
2211
- z12.object({
2212
- type: z12.literal("function_call"),
2213
- call_id: z12.string(),
2214
- name: z12.string(),
2215
- arguments: z12.string()
2496
+ z15.object({
2497
+ type: z15.literal("function_call"),
2498
+ call_id: z15.string(),
2499
+ name: z15.string(),
2500
+ arguments: z15.string(),
2501
+ id: z15.string()
2216
2502
  }),
2217
- z12.object({
2218
- type: z12.literal("web_search_call")
2503
+ z15.object({
2504
+ type: z15.literal("web_search_call"),
2505
+ id: z15.string(),
2506
+ status: z15.string().optional()
2219
2507
  }),
2220
- z12.object({
2221
- type: z12.literal("computer_call")
2508
+ z15.object({
2509
+ type: z15.literal("computer_call"),
2510
+ id: z15.string(),
2511
+ status: z15.string().optional()
2222
2512
  }),
2223
- z12.object({
2224
- type: z12.literal("reasoning"),
2225
- summary: z12.array(
2226
- z12.object({
2227
- type: z12.literal("summary_text"),
2228
- text: z12.string()
2513
+ z15.object({
2514
+ type: z15.literal("reasoning"),
2515
+ id: z15.string(),
2516
+ encrypted_content: z15.string().nullish(),
2517
+ summary: z15.array(
2518
+ z15.object({
2519
+ type: z15.literal("summary_text"),
2520
+ text: z15.string()
2229
2521
  })
2230
2522
  )
2231
2523
  })
2232
2524
  ])
2233
2525
  ),
2234
- incomplete_details: z12.object({ reason: z12.string() }).nullable(),
2526
+ incomplete_details: z15.object({ reason: z15.string() }).nullable(),
2235
2527
  usage: usageSchema2
2236
2528
  })
2237
2529
  ),
2238
2530
  abortSignal: options.abortSignal,
2239
2531
  fetch: this.config.fetch
2240
2532
  });
2533
+ if (response.error) {
2534
+ throw new APICallError({
2535
+ message: response.error.message,
2536
+ url,
2537
+ requestBodyValues: body,
2538
+ statusCode: 400,
2539
+ responseHeaders,
2540
+ responseBody: rawResponse,
2541
+ isRetryable: false
2542
+ });
2543
+ }
2241
2544
  const content = [];
2242
2545
  for (const part of response.output) {
2243
2546
  switch (part.type) {
2244
2547
  case "reasoning": {
2245
- content.push({
2246
- type: "reasoning",
2247
- text: part.summary.map((summary) => summary.text).join()
2248
- });
2548
+ if (part.summary.length === 0) {
2549
+ part.summary.push({ type: "summary_text", text: "" });
2550
+ }
2551
+ for (const summary of part.summary) {
2552
+ content.push({
2553
+ type: "reasoning",
2554
+ text: summary.text,
2555
+ providerMetadata: {
2556
+ openai: {
2557
+ itemId: part.id,
2558
+ reasoningEncryptedContent: (_a = part.encrypted_content) != null ? _a : null
2559
+ }
2560
+ }
2561
+ });
2562
+ }
2249
2563
  break;
2250
2564
  }
2251
2565
  case "message": {
2252
2566
  for (const contentPart of part.content) {
2253
2567
  content.push({
2254
2568
  type: "text",
2255
- text: contentPart.text
2569
+ text: contentPart.text,
2570
+ providerMetadata: {
2571
+ openai: {
2572
+ itemId: part.id
2573
+ }
2574
+ }
2256
2575
  });
2257
2576
  for (const annotation of contentPart.annotations) {
2258
2577
  content.push({
2259
2578
  type: "source",
2260
2579
  sourceType: "url",
2261
- id: (_c = (_b = (_a = this.config).generateId) == null ? void 0 : _b.call(_a)) != null ? _c : generateId2(),
2580
+ id: (_d = (_c = (_b = this.config).generateId) == null ? void 0 : _c.call(_b)) != null ? _d : generateId2(),
2262
2581
  url: annotation.url,
2263
2582
  title: annotation.title
2264
2583
  });
@@ -2269,10 +2588,51 @@ var OpenAIResponsesLanguageModel = class {
2269
2588
  case "function_call": {
2270
2589
  content.push({
2271
2590
  type: "tool-call",
2272
- toolCallType: "function",
2273
2591
  toolCallId: part.call_id,
2274
2592
  toolName: part.name,
2275
- args: part.arguments
2593
+ input: part.arguments,
2594
+ providerMetadata: {
2595
+ openai: {
2596
+ itemId: part.id
2597
+ }
2598
+ }
2599
+ });
2600
+ break;
2601
+ }
2602
+ case "web_search_call": {
2603
+ content.push({
2604
+ type: "tool-call",
2605
+ toolCallId: part.id,
2606
+ toolName: "web_search_preview",
2607
+ input: "",
2608
+ providerExecuted: true
2609
+ });
2610
+ content.push({
2611
+ type: "tool-result",
2612
+ toolCallId: part.id,
2613
+ toolName: "web_search_preview",
2614
+ result: { status: part.status || "completed" },
2615
+ providerExecuted: true
2616
+ });
2617
+ break;
2618
+ }
2619
+ case "computer_call": {
2620
+ content.push({
2621
+ type: "tool-call",
2622
+ toolCallId: part.id,
2623
+ toolName: "computer_use",
2624
+ input: "",
2625
+ providerExecuted: true
2626
+ });
2627
+ content.push({
2628
+ type: "tool-result",
2629
+ toolCallId: part.id,
2630
+ toolName: "computer_use",
2631
+ result: {
2632
+ type: "computer_use_tool_result",
2633
+ status: part.status || "completed"
2634
+ },
2635
+ providerExecuted: true
2276
2636
  });
2277
2637
  break;
2278
2638
  }
@@ -2281,15 +2641,15 @@ var OpenAIResponsesLanguageModel = class {
2281
2641
  return {
2282
2642
  content,
2283
2643
  finishReason: mapOpenAIResponseFinishReason({
2284
- finishReason: (_d = response.incomplete_details) == null ? void 0 : _d.reason,
2644
+ finishReason: (_e = response.incomplete_details) == null ? void 0 : _e.reason,
2285
2645
  hasToolCalls: content.some((part) => part.type === "tool-call")
2286
2646
  }),
2287
2647
  usage: {
2288
2648
  inputTokens: response.usage.input_tokens,
2289
2649
  outputTokens: response.usage.output_tokens,
2290
2650
  totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2291
- reasoningTokens: (_f = (_e = response.usage.output_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : void 0,
2292
- cachedInputTokens: (_h = (_g = response.usage.input_tokens_details) == null ? void 0 : _g.cached_tokens) != null ? _h : void 0
2651
+ reasoningTokens: (_g = (_f = response.usage.output_tokens_details) == null ? void 0 : _f.reasoning_tokens) != null ? _g : void 0,
2652
+ cachedInputTokens: (_i = (_h = response.usage.input_tokens_details) == null ? void 0 : _h.cached_tokens) != null ? _i : void 0
2293
2653
  },
2294
2654
  request: { body },
2295
2655
  response: {
@@ -2336,6 +2696,7 @@ var OpenAIResponsesLanguageModel = class {
2336
2696
  let responseId = null;
2337
2697
  const ongoingToolCalls = {};
2338
2698
  let hasToolCalls = false;
2699
+ const activeReasoning = {};
2339
2700
  return {
2340
2701
  stream: response.pipeThrough(
2341
2702
  new TransformStream({
@@ -2343,7 +2704,10 @@ var OpenAIResponsesLanguageModel = class {
2343
2704
  controller.enqueue({ type: "stream-start", warnings });
2344
2705
  },
2345
2706
  transform(chunk, controller) {
2346
- var _a, _b, _c, _d, _e, _f, _g, _h;
2707
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
2708
+ if (options.includeRawChunks) {
2709
+ controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
2710
+ }
2347
2711
  if (!chunk.success) {
2348
2712
  finishReason = "error";
2349
2713
  controller.enqueue({ type: "error", error: chunk.error });
@@ -2357,22 +2721,151 @@ var OpenAIResponsesLanguageModel = class {
2357
2721
  toolCallId: value.item.call_id
2358
2722
  };
2359
2723
  controller.enqueue({
2360
- type: "tool-call-delta",
2361
- toolCallType: "function",
2724
+ type: "tool-input-start",
2725
+ id: value.item.call_id,
2726
+ toolName: value.item.name
2727
+ });
2728
+ } else if (value.item.type === "web_search_call") {
2729
+ ongoingToolCalls[value.output_index] = {
2730
+ toolName: "web_search_preview",
2731
+ toolCallId: value.item.id
2732
+ };
2733
+ controller.enqueue({
2734
+ type: "tool-input-start",
2735
+ id: value.item.id,
2736
+ toolName: "web_search_preview"
2737
+ });
2738
+ } else if (value.item.type === "computer_call") {
2739
+ ongoingToolCalls[value.output_index] = {
2740
+ toolName: "computer_use",
2741
+ toolCallId: value.item.id
2742
+ };
2743
+ controller.enqueue({
2744
+ type: "tool-input-start",
2745
+ id: value.item.id,
2746
+ toolName: "computer_use"
2747
+ });
2748
+ } else if (value.item.type === "message") {
2749
+ controller.enqueue({
2750
+ type: "text-start",
2751
+ id: value.item.id,
2752
+ providerMetadata: {
2753
+ openai: {
2754
+ itemId: value.item.id
2755
+ }
2756
+ }
2757
+ });
2758
+ } else if (isResponseOutputItemAddedReasoningChunk(value)) {
2759
+ activeReasoning[value.item.id] = {
2760
+ encryptedContent: value.item.encrypted_content,
2761
+ summaryParts: [0]
2762
+ };
2763
+ controller.enqueue({
2764
+ type: "reasoning-start",
2765
+ id: `${value.item.id}:0`,
2766
+ providerMetadata: {
2767
+ openai: {
2768
+ itemId: value.item.id,
2769
+ reasoningEncryptedContent: (_a = value.item.encrypted_content) != null ? _a : null
2770
+ }
2771
+ }
2772
+ });
2773
+ }
2774
+ } else if (isResponseOutputItemDoneChunk(value)) {
2775
+ if (value.item.type === "function_call") {
2776
+ ongoingToolCalls[value.output_index] = void 0;
2777
+ hasToolCalls = true;
2778
+ controller.enqueue({
2779
+ type: "tool-input-end",
2780
+ id: value.item.call_id
2781
+ });
2782
+ controller.enqueue({
2783
+ type: "tool-call",
2362
2784
  toolCallId: value.item.call_id,
2363
2785
  toolName: value.item.name,
2364
- argsTextDelta: value.item.arguments
2786
+ input: value.item.arguments,
2787
+ providerMetadata: {
2788
+ openai: {
2789
+ itemId: value.item.id
2790
+ }
2791
+ }
2365
2792
  });
2793
+ } else if (value.item.type === "web_search_call") {
2794
+ ongoingToolCalls[value.output_index] = void 0;
2795
+ hasToolCalls = true;
2796
+ controller.enqueue({
2797
+ type: "tool-input-end",
2798
+ id: value.item.id
2799
+ });
2800
+ controller.enqueue({
2801
+ type: "tool-call",
2802
+ toolCallId: value.item.id,
2803
+ toolName: "web_search_preview",
2804
+ input: "",
2805
+ providerExecuted: true
2806
+ });
2807
+ controller.enqueue({
2808
+ type: "tool-result",
2809
+ toolCallId: value.item.id,
2810
+ toolName: "web_search_preview",
2811
+ result: {
2812
+ type: "web_search_tool_result",
2813
+ status: value.item.status || "completed"
2814
+ },
2815
+ providerExecuted: true
2816
+ });
2817
+ } else if (value.item.type === "computer_call") {
2818
+ ongoingToolCalls[value.output_index] = void 0;
2819
+ hasToolCalls = true;
2820
+ controller.enqueue({
2821
+ type: "tool-input-end",
2822
+ id: value.item.id
2823
+ });
2824
+ controller.enqueue({
2825
+ type: "tool-call",
2826
+ toolCallId: value.item.id,
2827
+ toolName: "computer_use",
2828
+ input: "",
2829
+ providerExecuted: true
2830
+ });
2831
+ controller.enqueue({
2832
+ type: "tool-result",
2833
+ toolCallId: value.item.id,
2834
+ toolName: "computer_use",
2835
+ result: {
2836
+ type: "computer_use_tool_result",
2837
+ status: value.item.status || "completed"
2838
+ },
2839
+ providerExecuted: true
2840
+ });
2841
+ } else if (value.item.type === "message") {
2842
+ controller.enqueue({
2843
+ type: "text-end",
2844
+ id: value.item.id
2845
+ });
2846
+ } else if (isResponseOutputItemDoneReasoningChunk(value)) {
2847
+ const activeReasoningPart = activeReasoning[value.item.id];
2848
+ for (const summaryIndex of activeReasoningPart.summaryParts) {
2849
+ controller.enqueue({
2850
+ type: "reasoning-end",
2851
+ id: `${value.item.id}:${summaryIndex}`,
2852
+ providerMetadata: {
2853
+ openai: {
2854
+ itemId: value.item.id,
2855
+ reasoningEncryptedContent: (_b = value.item.encrypted_content) != null ? _b : null
2856
+ }
2857
+ }
2858
+ });
2859
+ }
2860
+ delete activeReasoning[value.item.id];
2366
2861
  }
2367
2862
  } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
2368
2863
  const toolCall = ongoingToolCalls[value.output_index];
2369
2864
  if (toolCall != null) {
2370
2865
  controller.enqueue({
2371
- type: "tool-call-delta",
2372
- toolCallType: "function",
2373
- toolCallId: toolCall.toolCallId,
2374
- toolName: toolCall.toolName,
2375
- argsTextDelta: value.delta
2866
+ type: "tool-input-delta",
2867
+ id: toolCall.toolCallId,
2868
+ delta: value.delta
2376
2869
  });
2377
2870
  }
2378
2871
  } else if (isResponseCreatedChunk(value)) {
@@ -2385,42 +2878,57 @@ var OpenAIResponsesLanguageModel = class {
2385
2878
  });
2386
2879
  } else if (isTextDeltaChunk(value)) {
2387
2880
  controller.enqueue({
2388
- type: "text",
2389
- text: value.delta
2881
+ type: "text-delta",
2882
+ id: value.item_id,
2883
+ delta: value.delta
2390
2884
  });
2885
+ } else if (isResponseReasoningSummaryPartAddedChunk(value)) {
2886
+ if (value.summary_index > 0) {
2887
+ (_c = activeReasoning[value.item_id]) == null ? void 0 : _c.summaryParts.push(
2888
+ value.summary_index
2889
+ );
2890
+ controller.enqueue({
2891
+ type: "reasoning-start",
2892
+ id: `${value.item_id}:${value.summary_index}`,
2893
+ providerMetadata: {
2894
+ openai: {
2895
+ itemId: value.item_id,
2896
+ reasoningEncryptedContent: (_e = (_d = activeReasoning[value.item_id]) == null ? void 0 : _d.encryptedContent) != null ? _e : null
2897
+ }
2898
+ }
2899
+ });
2900
+ }
2391
2901
  } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2392
2902
  controller.enqueue({
2393
- type: "reasoning",
2394
- text: value.delta
2395
- });
2396
- } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "function_call") {
2397
- ongoingToolCalls[value.output_index] = void 0;
2398
- hasToolCalls = true;
2399
- controller.enqueue({
2400
- type: "tool-call",
2401
- toolCallType: "function",
2402
- toolCallId: value.item.call_id,
2403
- toolName: value.item.name,
2404
- args: value.item.arguments
2903
+ type: "reasoning-delta",
2904
+ id: `${value.item_id}:${value.summary_index}`,
2905
+ delta: value.delta,
2906
+ providerMetadata: {
2907
+ openai: {
2908
+ itemId: value.item_id
2909
+ }
2910
+ }
2405
2911
  });
2406
2912
  } else if (isResponseFinishedChunk(value)) {
2407
2913
  finishReason = mapOpenAIResponseFinishReason({
2408
- finishReason: (_a = value.response.incomplete_details) == null ? void 0 : _a.reason,
2914
+ finishReason: (_f = value.response.incomplete_details) == null ? void 0 : _f.reason,
2409
2915
  hasToolCalls
2410
2916
  });
2411
2917
  usage.inputTokens = value.response.usage.input_tokens;
2412
2918
  usage.outputTokens = value.response.usage.output_tokens;
2413
2919
  usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
2414
- usage.reasoningTokens = (_c = (_b = value.response.usage.output_tokens_details) == null ? void 0 : _b.reasoning_tokens) != null ? _c : void 0;
2415
- usage.cachedInputTokens = (_e = (_d = value.response.usage.input_tokens_details) == null ? void 0 : _d.cached_tokens) != null ? _e : void 0;
2920
+ usage.reasoningTokens = (_h = (_g = value.response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : void 0;
2921
+ usage.cachedInputTokens = (_j = (_i = value.response.usage.input_tokens_details) == null ? void 0 : _i.cached_tokens) != null ? _j : void 0;
2416
2922
  } else if (isResponseAnnotationAddedChunk(value)) {
2417
2923
  controller.enqueue({
2418
2924
  type: "source",
2419
2925
  sourceType: "url",
2420
- id: (_h = (_g = (_f = self.config).generateId) == null ? void 0 : _g.call(_f)) != null ? _h : generateId2(),
2926
+ id: (_m = (_l = (_k = self.config).generateId) == null ? void 0 : _l.call(_k)) != null ? _m : generateId2(),
2421
2927
  url: value.annotation.url,
2422
2928
  title: value.annotation.title
2423
2929
  });
2930
+ } else if (isErrorChunk(value)) {
2931
+ controller.enqueue({ type: "error", error: value });
2424
2932
  }
2425
2933
  },
2426
2934
  flush(controller) {
@@ -2442,95 +2950,141 @@ var OpenAIResponsesLanguageModel = class {
2442
2950
  };
2443
2951
  }
2444
2952
  };
2445
- var usageSchema2 = z12.object({
2446
- input_tokens: z12.number(),
2447
- input_tokens_details: z12.object({ cached_tokens: z12.number().nullish() }).nullish(),
2448
- output_tokens: z12.number(),
2449
- output_tokens_details: z12.object({ reasoning_tokens: z12.number().nullish() }).nullish()
2953
+ var usageSchema2 = z15.object({
2954
+ input_tokens: z15.number(),
2955
+ input_tokens_details: z15.object({ cached_tokens: z15.number().nullish() }).nullish(),
2956
+ output_tokens: z15.number(),
2957
+ output_tokens_details: z15.object({ reasoning_tokens: z15.number().nullish() }).nullish()
2958
+ });
2959
+ var textDeltaChunkSchema = z15.object({
2960
+ type: z15.literal("response.output_text.delta"),
2961
+ item_id: z15.string(),
2962
+ delta: z15.string()
2450
2963
  });
2451
- var textDeltaChunkSchema = z12.object({
2452
- type: z12.literal("response.output_text.delta"),
2453
- delta: z12.string()
2964
+ var errorChunkSchema = z15.object({
2965
+ type: z15.literal("error"),
2966
+ code: z15.string(),
2967
+ message: z15.string(),
2968
+ param: z15.string().nullish(),
2969
+ sequence_number: z15.number()
2454
2970
  });
2455
- var responseFinishedChunkSchema = z12.object({
2456
- type: z12.enum(["response.completed", "response.incomplete"]),
2457
- response: z12.object({
2458
- incomplete_details: z12.object({ reason: z12.string() }).nullish(),
2971
+ var responseFinishedChunkSchema = z15.object({
2972
+ type: z15.enum(["response.completed", "response.incomplete"]),
2973
+ response: z15.object({
2974
+ incomplete_details: z15.object({ reason: z15.string() }).nullish(),
2459
2975
  usage: usageSchema2
2460
2976
  })
2461
2977
  });
2462
- var responseCreatedChunkSchema = z12.object({
2463
- type: z12.literal("response.created"),
2464
- response: z12.object({
2465
- id: z12.string(),
2466
- created_at: z12.number(),
2467
- model: z12.string()
2978
+ var responseCreatedChunkSchema = z15.object({
2979
+ type: z15.literal("response.created"),
2980
+ response: z15.object({
2981
+ id: z15.string(),
2982
+ created_at: z15.number(),
2983
+ model: z15.string()
2468
2984
  })
2469
2985
  });
2470
- var responseOutputItemDoneSchema = z12.object({
2471
- type: z12.literal("response.output_item.done"),
2472
- output_index: z12.number(),
2473
- item: z12.discriminatedUnion("type", [
2474
- z12.object({
2475
- type: z12.literal("message")
2986
+ var responseOutputItemAddedSchema = z15.object({
2987
+ type: z15.literal("response.output_item.added"),
2988
+ output_index: z15.number(),
2989
+ item: z15.discriminatedUnion("type", [
2990
+ z15.object({
2991
+ type: z15.literal("message"),
2992
+ id: z15.string()
2476
2993
  }),
2477
- z12.object({
2478
- type: z12.literal("function_call"),
2479
- id: z12.string(),
2480
- call_id: z12.string(),
2481
- name: z12.string(),
2482
- arguments: z12.string(),
2483
- status: z12.literal("completed")
2994
+ z15.object({
2995
+ type: z15.literal("reasoning"),
2996
+ id: z15.string(),
2997
+ encrypted_content: z15.string().nullish()
2998
+ }),
2999
+ z15.object({
3000
+ type: z15.literal("function_call"),
3001
+ id: z15.string(),
3002
+ call_id: z15.string(),
3003
+ name: z15.string(),
3004
+ arguments: z15.string()
3005
+ }),
3006
+ z15.object({
3007
+ type: z15.literal("web_search_call"),
3008
+ id: z15.string(),
3009
+ status: z15.string()
3010
+ }),
3011
+ z15.object({
3012
+ type: z15.literal("computer_call"),
3013
+ id: z15.string(),
3014
+ status: z15.string()
2484
3015
  })
2485
3016
  ])
2486
3017
  });
2487
- var responseFunctionCallArgumentsDeltaSchema = z12.object({
2488
- type: z12.literal("response.function_call_arguments.delta"),
2489
- item_id: z12.string(),
2490
- output_index: z12.number(),
2491
- delta: z12.string()
2492
- });
2493
- var responseOutputItemAddedSchema = z12.object({
2494
- type: z12.literal("response.output_item.added"),
2495
- output_index: z12.number(),
2496
- item: z12.discriminatedUnion("type", [
2497
- z12.object({
2498
- type: z12.literal("message")
3018
+ var responseOutputItemDoneSchema = z15.object({
3019
+ type: z15.literal("response.output_item.done"),
3020
+ output_index: z15.number(),
3021
+ item: z15.discriminatedUnion("type", [
3022
+ z15.object({
3023
+ type: z15.literal("message"),
3024
+ id: z15.string()
2499
3025
  }),
2500
- z12.object({
2501
- type: z12.literal("function_call"),
2502
- id: z12.string(),
2503
- call_id: z12.string(),
2504
- name: z12.string(),
2505
- arguments: z12.string()
3026
+ z15.object({
3027
+ type: z15.literal("reasoning"),
3028
+ id: z15.string(),
3029
+ encrypted_content: z15.string().nullish()
3030
+ }),
3031
+ z15.object({
3032
+ type: z15.literal("function_call"),
3033
+ id: z15.string(),
3034
+ call_id: z15.string(),
3035
+ name: z15.string(),
3036
+ arguments: z15.string(),
3037
+ status: z15.literal("completed")
3038
+ }),
3039
+ z15.object({
3040
+ type: z15.literal("web_search_call"),
3041
+ id: z15.string(),
3042
+ status: z15.literal("completed")
3043
+ }),
3044
+ z15.object({
3045
+ type: z15.literal("computer_call"),
3046
+ id: z15.string(),
3047
+ status: z15.literal("completed")
2506
3048
  })
2507
3049
  ])
2508
3050
  });
2509
- var responseAnnotationAddedSchema = z12.object({
2510
- type: z12.literal("response.output_text.annotation.added"),
2511
- annotation: z12.object({
2512
- type: z12.literal("url_citation"),
2513
- url: z12.string(),
2514
- title: z12.string()
3051
+ var responseFunctionCallArgumentsDeltaSchema = z15.object({
3052
+ type: z15.literal("response.function_call_arguments.delta"),
3053
+ item_id: z15.string(),
3054
+ output_index: z15.number(),
3055
+ delta: z15.string()
3056
+ });
3057
+ var responseAnnotationAddedSchema = z15.object({
3058
+ type: z15.literal("response.output_text.annotation.added"),
3059
+ annotation: z15.object({
3060
+ type: z15.literal("url_citation"),
3061
+ url: z15.string(),
3062
+ title: z15.string()
2515
3063
  })
2516
3064
  });
2517
- var responseReasoningSummaryTextDeltaSchema = z12.object({
2518
- type: z12.literal("response.reasoning_summary_text.delta"),
2519
- item_id: z12.string(),
2520
- output_index: z12.number(),
2521
- summary_index: z12.number(),
2522
- delta: z12.string()
3065
+ var responseReasoningSummaryPartAddedSchema = z15.object({
3066
+ type: z15.literal("response.reasoning_summary_part.added"),
3067
+ item_id: z15.string(),
3068
+ summary_index: z15.number()
2523
3069
  });
2524
- var openaiResponsesChunkSchema = z12.union([
3070
+ var responseReasoningSummaryTextDeltaSchema = z15.object({
3071
+ type: z15.literal("response.reasoning_summary_text.delta"),
3072
+ item_id: z15.string(),
3073
+ summary_index: z15.number(),
3074
+ delta: z15.string()
3075
+ });
3076
+ var openaiResponsesChunkSchema = z15.union([
2525
3077
  textDeltaChunkSchema,
2526
3078
  responseFinishedChunkSchema,
2527
3079
  responseCreatedChunkSchema,
3080
+ responseOutputItemAddedSchema,
2528
3081
  responseOutputItemDoneSchema,
2529
3082
  responseFunctionCallArgumentsDeltaSchema,
2530
- responseOutputItemAddedSchema,
2531
3083
  responseAnnotationAddedSchema,
3084
+ responseReasoningSummaryPartAddedSchema,
2532
3085
  responseReasoningSummaryTextDeltaSchema,
2533
- z12.object({ type: z12.string() }).passthrough()
3086
+ errorChunkSchema,
3087
+ z15.object({ type: z15.string() }).loose()
2534
3088
  // fallback for unknown chunks
2535
3089
  ]);
2536
3090
  function isTextDeltaChunk(chunk) {
@@ -2539,6 +3093,9 @@ function isTextDeltaChunk(chunk) {
2539
3093
  function isResponseOutputItemDoneChunk(chunk) {
2540
3094
  return chunk.type === "response.output_item.done";
2541
3095
  }
3096
+ function isResponseOutputItemDoneReasoningChunk(chunk) {
3097
+ return isResponseOutputItemDoneChunk(chunk) && chunk.item.type === "reasoning";
3098
+ }
2542
3099
  function isResponseFinishedChunk(chunk) {
2543
3100
  return chunk.type === "response.completed" || chunk.type === "response.incomplete";
2544
3101
  }
@@ -2551,14 +3108,23 @@ function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
2551
3108
  function isResponseOutputItemAddedChunk(chunk) {
2552
3109
  return chunk.type === "response.output_item.added";
2553
3110
  }
3111
+ function isResponseOutputItemAddedReasoningChunk(chunk) {
3112
+ return isResponseOutputItemAddedChunk(chunk) && chunk.item.type === "reasoning";
3113
+ }
2554
3114
  function isResponseAnnotationAddedChunk(chunk) {
2555
3115
  return chunk.type === "response.output_text.annotation.added";
2556
3116
  }
3117
+ function isResponseReasoningSummaryPartAddedChunk(chunk) {
3118
+ return chunk.type === "response.reasoning_summary_part.added";
3119
+ }
2557
3120
  function isResponseReasoningSummaryTextDeltaChunk(chunk) {
2558
3121
  return chunk.type === "response.reasoning_summary_text.delta";
2559
3122
  }
3123
+ function isErrorChunk(chunk) {
3124
+ return chunk.type === "error";
3125
+ }
2560
3126
  function getResponsesModelConfig(modelId) {
2561
- if (modelId.startsWith("o")) {
3127
+ if (modelId.startsWith("o") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
2562
3128
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
2563
3129
  return {
2564
3130
  isReasoningModel: true,
@@ -2578,16 +3144,21 @@ function getResponsesModelConfig(modelId) {
2578
3144
  requiredAutoTruncation: false
2579
3145
  };
2580
3146
  }
2581
- var openaiResponsesProviderOptionsSchema = z12.object({
2582
- metadata: z12.any().nullish(),
2583
- parallelToolCalls: z12.boolean().nullish(),
2584
- previousResponseId: z12.string().nullish(),
2585
- store: z12.boolean().nullish(),
2586
- user: z12.string().nullish(),
2587
- reasoningEffort: z12.string().nullish(),
2588
- strictSchemas: z12.boolean().nullish(),
2589
- instructions: z12.string().nullish(),
2590
- reasoningSummary: z12.string().nullish()
3147
+ function supportsFlexProcessing2(modelId) {
3148
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
3149
+ }
3150
+ var openaiResponsesProviderOptionsSchema = z15.object({
3151
+ metadata: z15.any().nullish(),
3152
+ parallelToolCalls: z15.boolean().nullish(),
3153
+ previousResponseId: z15.string().nullish(),
3154
+ store: z15.boolean().nullish(),
3155
+ user: z15.string().nullish(),
3156
+ reasoningEffort: z15.string().nullish(),
3157
+ strictJsonSchema: z15.boolean().nullish(),
3158
+ instructions: z15.string().nullish(),
3159
+ reasoningSummary: z15.string().nullish(),
3160
+ serviceTier: z15.enum(["auto", "flex"]).nullish(),
3161
+ include: z15.array(z15.enum(["reasoning.encrypted_content"])).nullish()
2591
3162
  });
2592
3163
  export {
2593
3164
  OpenAIChatLanguageModel,