@ai-sdk/openai 3.0.0-beta.99 → 3.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -83,8 +83,8 @@ var openaiFailedResponseHandler = (0, import_provider_utils.createJsonErrorRespo
83
83
  function getOpenAILanguageModelCapabilities(modelId) {
84
84
  const supportsFlexProcessing = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
85
85
  const supportsPriorityProcessing = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
86
- const isReasoningModel = !(modelId.startsWith("gpt-3") || modelId.startsWith("gpt-4") || modelId.startsWith("chatgpt-4o") || modelId.startsWith("gpt-5-chat"));
87
- const supportsNonReasoningParameters = modelId.startsWith("gpt-5.1");
86
+ const isReasoningModel = modelId.startsWith("o1") || modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("codex-mini") || modelId.startsWith("computer-use-preview") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
87
+ const supportsNonReasoningParameters = modelId.startsWith("gpt-5.1") || modelId.startsWith("gpt-5.2");
88
88
  const systemMessageMode = isReasoningModel ? "developer" : "system";
89
89
  return {
90
90
  supportsFlexProcessing,
@@ -283,6 +283,9 @@ function convertToOpenAIChatMessages({
283
283
  }
284
284
  case "tool": {
285
285
  for (const toolResponse of content) {
286
+ if (toolResponse.type === "tool-approval-response") {
287
+ continue;
288
+ }
286
289
  const output = toolResponse.output;
287
290
  let contentValue;
288
291
  switch (output.type) {
@@ -342,7 +345,7 @@ function mapOpenAIFinishReason(finishReason) {
342
345
  case "tool_calls":
343
346
  return "tool-calls";
344
347
  default:
345
- return "unknown";
348
+ return "other";
346
349
  }
347
350
  }
348
351
 
@@ -583,7 +586,26 @@ var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazySchema)(
583
586
  * username or email address, in order to avoid sending us any identifying
584
587
  * information.
585
588
  */
586
- safetyIdentifier: import_v43.z.string().optional()
589
+ safetyIdentifier: import_v43.z.string().optional(),
590
+ /**
591
+ * Override the system message mode for this model.
592
+ * - 'system': Use the 'system' role for system messages (default for most models)
593
+ * - 'developer': Use the 'developer' role for system messages (used by reasoning models)
594
+ * - 'remove': Remove system messages entirely
595
+ *
596
+ * If not specified, the mode is automatically determined based on the model.
597
+ */
598
+ systemMessageMode: import_v43.z.enum(["system", "developer", "remove"]).optional(),
599
+ /**
600
+ * Force treating this model as a reasoning model.
601
+ *
602
+ * This is useful for "stealth" reasoning models (e.g. via a custom baseURL)
603
+ * where the model ID is not recognized by the SDK's allowlist.
604
+ *
605
+ * When enabled, the SDK applies reasoning-model parameter compatibility rules
606
+ * and defaults `systemMessageMode` to `developer` unless overridden.
607
+ */
608
+ forceReasoning: import_v43.z.boolean().optional()
587
609
  })
588
610
  )
589
611
  );
@@ -678,7 +700,7 @@ var OpenAIChatLanguageModel = class {
678
700
  toolChoice,
679
701
  providerOptions
680
702
  }) {
681
- var _a, _b, _c;
703
+ var _a, _b, _c, _d, _e;
682
704
  const warnings = [];
683
705
  const openaiOptions = (_a = await (0, import_provider_utils5.parseProviderOptions)({
684
706
  provider: "openai",
@@ -686,17 +708,18 @@ var OpenAIChatLanguageModel = class {
686
708
  schema: openaiChatLanguageModelOptions
687
709
  })) != null ? _a : {};
688
710
  const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
711
+ const isReasoningModel = (_b = openaiOptions.forceReasoning) != null ? _b : modelCapabilities.isReasoningModel;
689
712
  if (topK != null) {
690
713
  warnings.push({ type: "unsupported", feature: "topK" });
691
714
  }
692
715
  const { messages, warnings: messageWarnings } = convertToOpenAIChatMessages(
693
716
  {
694
717
  prompt,
695
- systemMessageMode: modelCapabilities.systemMessageMode
718
+ systemMessageMode: (_c = openaiOptions.systemMessageMode) != null ? _c : isReasoningModel ? "developer" : modelCapabilities.systemMessageMode
696
719
  }
697
720
  );
698
721
  warnings.push(...messageWarnings);
699
- const strictJsonSchema = (_b = openaiOptions.strictJsonSchema) != null ? _b : true;
722
+ const strictJsonSchema = (_d = openaiOptions.strictJsonSchema) != null ? _d : true;
700
723
  const baseArgs = {
701
724
  // model id:
702
725
  model: this.modelId,
@@ -717,7 +740,7 @@ var OpenAIChatLanguageModel = class {
717
740
  json_schema: {
718
741
  schema: responseFormat.schema,
719
742
  strict: strictJsonSchema,
720
- name: (_c = responseFormat.name) != null ? _c : "response",
743
+ name: (_e = responseFormat.name) != null ? _e : "response",
721
744
  description: responseFormat.description
722
745
  }
723
746
  } : { type: "json_object" } : void 0,
@@ -738,7 +761,7 @@ var OpenAIChatLanguageModel = class {
738
761
  // messages:
739
762
  messages
740
763
  };
741
- if (modelCapabilities.isReasoningModel) {
764
+ if (isReasoningModel) {
742
765
  if (openaiOptions.reasoningEffort !== "none" || !modelCapabilities.supportsNonReasoningParameters) {
743
766
  if (baseArgs.temperature != null) {
744
767
  baseArgs.temperature = void 0;
@@ -844,7 +867,7 @@ var OpenAIChatLanguageModel = class {
844
867
  };
845
868
  }
846
869
  async doGenerate(options) {
847
- var _a, _b, _c, _d, _e, _f;
870
+ var _a, _b, _c, _d, _e, _f, _g;
848
871
  const { args: body, warnings } = await this.getArgs(options);
849
872
  const {
850
873
  responseHeaders,
@@ -901,7 +924,10 @@ var OpenAIChatLanguageModel = class {
901
924
  }
902
925
  return {
903
926
  content,
904
- finishReason: mapOpenAIFinishReason(choice.finish_reason),
927
+ finishReason: {
928
+ unified: mapOpenAIFinishReason(choice.finish_reason),
929
+ raw: (_g = choice.finish_reason) != null ? _g : void 0
930
+ },
905
931
  usage: convertOpenAIChatUsage(response.usage),
906
932
  request: { body },
907
933
  response: {
@@ -937,7 +963,10 @@ var OpenAIChatLanguageModel = class {
937
963
  fetch: this.config.fetch
938
964
  });
939
965
  const toolCalls = [];
940
- let finishReason = "unknown";
966
+ let finishReason = {
967
+ unified: "other",
968
+ raw: void 0
969
+ };
941
970
  let usage = void 0;
942
971
  let metadataExtracted = false;
943
972
  let isActiveText = false;
@@ -954,13 +983,13 @@ var OpenAIChatLanguageModel = class {
954
983
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
955
984
  }
956
985
  if (!chunk.success) {
957
- finishReason = "error";
986
+ finishReason = { unified: "error", raw: void 0 };
958
987
  controller.enqueue({ type: "error", error: chunk.error });
959
988
  return;
960
989
  }
961
990
  const value = chunk.value;
962
991
  if ("error" in value) {
963
- finishReason = "error";
992
+ finishReason = { unified: "error", raw: void 0 };
964
993
  controller.enqueue({ type: "error", error: value.error });
965
994
  return;
966
995
  }
@@ -985,7 +1014,10 @@ var OpenAIChatLanguageModel = class {
985
1014
  }
986
1015
  const choice = value.choices[0];
987
1016
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
988
- finishReason = mapOpenAIFinishReason(choice.finish_reason);
1017
+ finishReason = {
1018
+ unified: mapOpenAIFinishReason(choice.finish_reason),
1019
+ raw: choice.finish_reason
1020
+ };
989
1021
  }
990
1022
  if (((_e = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _e.content) != null) {
991
1023
  providerMetadata.openai.logprobs = choice.logprobs.content;
@@ -1265,7 +1297,7 @@ function mapOpenAIFinishReason2(finishReason) {
1265
1297
  case "tool_calls":
1266
1298
  return "tool-calls";
1267
1299
  default:
1268
- return "unknown";
1300
+ return "other";
1269
1301
  }
1270
1302
  }
1271
1303
 
@@ -1463,6 +1495,7 @@ var OpenAICompletionLanguageModel = class {
1463
1495
  };
1464
1496
  }
1465
1497
  async doGenerate(options) {
1498
+ var _a;
1466
1499
  const { args, warnings } = await this.getArgs(options);
1467
1500
  const {
1468
1501
  responseHeaders,
@@ -1490,7 +1523,10 @@ var OpenAICompletionLanguageModel = class {
1490
1523
  return {
1491
1524
  content: [{ type: "text", text: choice.text }],
1492
1525
  usage: convertOpenAICompletionUsage(response.usage),
1493
- finishReason: mapOpenAIFinishReason2(choice.finish_reason),
1526
+ finishReason: {
1527
+ unified: mapOpenAIFinishReason2(choice.finish_reason),
1528
+ raw: (_a = choice.finish_reason) != null ? _a : void 0
1529
+ },
1494
1530
  request: { body: args },
1495
1531
  response: {
1496
1532
  ...getResponseMetadata2(response),
@@ -1524,7 +1560,10 @@ var OpenAICompletionLanguageModel = class {
1524
1560
  abortSignal: options.abortSignal,
1525
1561
  fetch: this.config.fetch
1526
1562
  });
1527
- let finishReason = "unknown";
1563
+ let finishReason = {
1564
+ unified: "other",
1565
+ raw: void 0
1566
+ };
1528
1567
  const providerMetadata = { openai: {} };
1529
1568
  let usage = void 0;
1530
1569
  let isFirstChunk = true;
@@ -1539,13 +1578,13 @@ var OpenAICompletionLanguageModel = class {
1539
1578
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
1540
1579
  }
1541
1580
  if (!chunk.success) {
1542
- finishReason = "error";
1581
+ finishReason = { unified: "error", raw: void 0 };
1543
1582
  controller.enqueue({ type: "error", error: chunk.error });
1544
1583
  return;
1545
1584
  }
1546
1585
  const value = chunk.value;
1547
1586
  if ("error" in value) {
1548
- finishReason = "error";
1587
+ finishReason = { unified: "error", raw: void 0 };
1549
1588
  controller.enqueue({ type: "error", error: value.error });
1550
1589
  return;
1551
1590
  }
@@ -1562,7 +1601,10 @@ var OpenAICompletionLanguageModel = class {
1562
1601
  }
1563
1602
  const choice = value.choices[0];
1564
1603
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
1565
- finishReason = mapOpenAIFinishReason2(choice.finish_reason);
1604
+ finishReason = {
1605
+ unified: mapOpenAIFinishReason2(choice.finish_reason),
1606
+ raw: choice.finish_reason
1607
+ };
1566
1608
  }
1567
1609
  if ((choice == null ? void 0 : choice.logprobs) != null) {
1568
1610
  providerMetadata.openai.logprobs = choice.logprobs;
@@ -1733,11 +1775,13 @@ var modelMaxImagesPerCall = {
1733
1775
  "dall-e-3": 1,
1734
1776
  "dall-e-2": 10,
1735
1777
  "gpt-image-1": 10,
1736
- "gpt-image-1-mini": 10
1778
+ "gpt-image-1-mini": 10,
1779
+ "gpt-image-1.5": 10
1737
1780
  };
1738
1781
  var hasDefaultResponseFormat = /* @__PURE__ */ new Set([
1739
1782
  "gpt-image-1",
1740
- "gpt-image-1-mini"
1783
+ "gpt-image-1-mini",
1784
+ "gpt-image-1.5"
1741
1785
  ]);
1742
1786
 
1743
1787
  // src/image/openai-image-model.ts
@@ -1756,6 +1800,8 @@ var OpenAIImageModel = class {
1756
1800
  }
1757
1801
  async doGenerate({
1758
1802
  prompt,
1803
+ files,
1804
+ mask,
1759
1805
  n,
1760
1806
  size,
1761
1807
  aspectRatio,
@@ -1764,7 +1810,7 @@ var OpenAIImageModel = class {
1764
1810
  headers,
1765
1811
  abortSignal
1766
1812
  }) {
1767
- var _a, _b, _c, _d, _e, _f, _g;
1813
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
1768
1814
  const warnings = [];
1769
1815
  if (aspectRatio != null) {
1770
1816
  warnings.push({
@@ -1777,6 +1823,72 @@ var OpenAIImageModel = class {
1777
1823
  warnings.push({ type: "unsupported", feature: "seed" });
1778
1824
  }
1779
1825
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1826
+ if (files != null) {
1827
+ const { value: response2, responseHeaders: responseHeaders2 } = await (0, import_provider_utils13.postFormDataToApi)({
1828
+ url: this.config.url({
1829
+ path: "/images/edits",
1830
+ modelId: this.modelId
1831
+ }),
1832
+ headers: (0, import_provider_utils13.combineHeaders)(this.config.headers(), headers),
1833
+ formData: (0, import_provider_utils13.convertToFormData)({
1834
+ model: this.modelId,
1835
+ prompt,
1836
+ image: await Promise.all(
1837
+ files.map(
1838
+ (file) => file.type === "file" ? new Blob(
1839
+ [
1840
+ file.data instanceof Uint8Array ? new Blob([file.data], {
1841
+ type: file.mediaType
1842
+ }) : new Blob([(0, import_provider_utils13.convertBase64ToUint8Array)(file.data)], {
1843
+ type: file.mediaType
1844
+ })
1845
+ ],
1846
+ { type: file.mediaType }
1847
+ ) : (0, import_provider_utils13.downloadBlob)(file.url)
1848
+ )
1849
+ ),
1850
+ mask: mask != null ? await fileToBlob(mask) : void 0,
1851
+ n,
1852
+ size,
1853
+ ...(_d = providerOptions.openai) != null ? _d : {}
1854
+ }),
1855
+ failedResponseHandler: openaiFailedResponseHandler,
1856
+ successfulResponseHandler: (0, import_provider_utils13.createJsonResponseHandler)(
1857
+ openaiImageResponseSchema
1858
+ ),
1859
+ abortSignal,
1860
+ fetch: this.config.fetch
1861
+ });
1862
+ return {
1863
+ images: response2.data.map((item) => item.b64_json),
1864
+ warnings,
1865
+ usage: response2.usage != null ? {
1866
+ inputTokens: (_e = response2.usage.input_tokens) != null ? _e : void 0,
1867
+ outputTokens: (_f = response2.usage.output_tokens) != null ? _f : void 0,
1868
+ totalTokens: (_g = response2.usage.total_tokens) != null ? _g : void 0
1869
+ } : void 0,
1870
+ response: {
1871
+ timestamp: currentDate,
1872
+ modelId: this.modelId,
1873
+ headers: responseHeaders2
1874
+ },
1875
+ providerMetadata: {
1876
+ openai: {
1877
+ images: response2.data.map((item) => {
1878
+ var _a2, _b2, _c2, _d2, _e2;
1879
+ return {
1880
+ ...item.revised_prompt ? { revisedPrompt: item.revised_prompt } : {},
1881
+ created: (_a2 = response2.created) != null ? _a2 : void 0,
1882
+ size: (_b2 = response2.size) != null ? _b2 : void 0,
1883
+ quality: (_c2 = response2.quality) != null ? _c2 : void 0,
1884
+ background: (_d2 = response2.background) != null ? _d2 : void 0,
1885
+ outputFormat: (_e2 = response2.output_format) != null ? _e2 : void 0
1886
+ };
1887
+ })
1888
+ }
1889
+ }
1890
+ };
1891
+ }
1780
1892
  const { value: response, responseHeaders } = await (0, import_provider_utils13.postJsonToApi)({
1781
1893
  url: this.config.url({
1782
1894
  path: "/images/generations",
@@ -1788,7 +1900,7 @@ var OpenAIImageModel = class {
1788
1900
  prompt,
1789
1901
  n,
1790
1902
  size,
1791
- ...(_d = providerOptions.openai) != null ? _d : {},
1903
+ ...(_h = providerOptions.openai) != null ? _h : {},
1792
1904
  ...!hasDefaultResponseFormat.has(this.modelId) ? { response_format: "b64_json" } : {}
1793
1905
  },
1794
1906
  failedResponseHandler: openaiFailedResponseHandler,
@@ -1802,9 +1914,9 @@ var OpenAIImageModel = class {
1802
1914
  images: response.data.map((item) => item.b64_json),
1803
1915
  warnings,
1804
1916
  usage: response.usage != null ? {
1805
- inputTokens: (_e = response.usage.input_tokens) != null ? _e : void 0,
1806
- outputTokens: (_f = response.usage.output_tokens) != null ? _f : void 0,
1807
- totalTokens: (_g = response.usage.total_tokens) != null ? _g : void 0
1917
+ inputTokens: (_i = response.usage.input_tokens) != null ? _i : void 0,
1918
+ outputTokens: (_j = response.usage.output_tokens) != null ? _j : void 0,
1919
+ totalTokens: (_k = response.usage.total_tokens) != null ? _k : void 0
1808
1920
  } : void 0,
1809
1921
  response: {
1810
1922
  timestamp: currentDate,
@@ -1829,6 +1941,14 @@ var OpenAIImageModel = class {
1829
1941
  };
1830
1942
  }
1831
1943
  };
1944
+ async function fileToBlob(file) {
1945
+ if (!file) return void 0;
1946
+ if (file.type === "url") {
1947
+ return (0, import_provider_utils13.downloadBlob)(file.url);
1948
+ }
1949
+ const data = file.data instanceof Uint8Array ? file.data : (0, import_provider_utils13.convertBase64ToUint8Array)(file.data);
1950
+ return new Blob([data], { type: file.mediaType });
1951
+ }
1832
1952
 
1833
1953
  // src/transcription/openai-transcription-model.ts
1834
1954
  var import_provider_utils16 = require("@ai-sdk/provider-utils");
@@ -2349,9 +2469,10 @@ async function convertToOpenAIResponsesInput({
2349
2469
  hasShellTool = false,
2350
2470
  hasApplyPatchTool = false
2351
2471
  }) {
2352
- var _a, _b, _c, _d, _e;
2472
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
2353
2473
  const input = [];
2354
2474
  const warnings = [];
2475
+ const processedApprovalIds = /* @__PURE__ */ new Set();
2355
2476
  for (const { role, content } of prompt) {
2356
2477
  switch (role) {
2357
2478
  case "system": {
@@ -2442,10 +2563,13 @@ async function convertToOpenAIResponsesInput({
2442
2563
  break;
2443
2564
  }
2444
2565
  case "tool-call": {
2566
+ const id = (_g = (_d = (_c = part.providerOptions) == null ? void 0 : _c.openai) == null ? void 0 : _d.itemId) != null ? _g : (_f = (_e = part.providerMetadata) == null ? void 0 : _e.openai) == null ? void 0 : _f.itemId;
2445
2567
  if (part.providerExecuted) {
2568
+ if (store && id != null) {
2569
+ input.push({ type: "item_reference", id });
2570
+ }
2446
2571
  break;
2447
2572
  }
2448
- const id = (_d = (_c = part.providerOptions) == null ? void 0 : _c.openai) == null ? void 0 : _d.itemId;
2449
2573
  if (store && id != null) {
2450
2574
  input.push({ type: "item_reference", id });
2451
2575
  break;
@@ -2502,8 +2626,12 @@ async function convertToOpenAIResponsesInput({
2502
2626
  }
2503
2627
  // assistant tool result parts are from provider-executed tools:
2504
2628
  case "tool-result": {
2629
+ if (part.output.type === "execution-denied" || part.output.type === "json" && typeof part.output.value === "object" && part.output.value != null && "type" in part.output.value && part.output.value.type === "execution-denied") {
2630
+ break;
2631
+ }
2505
2632
  if (store) {
2506
- input.push({ type: "item_reference", id: part.toolCallId });
2633
+ const itemId = (_j = (_i = (_h = part.providerMetadata) == null ? void 0 : _h.openai) == null ? void 0 : _i.itemId) != null ? _j : part.toolCallId;
2634
+ input.push({ type: "item_reference", id: itemId });
2507
2635
  } else {
2508
2636
  warnings.push({
2509
2637
  type: "other",
@@ -2572,7 +2700,32 @@ async function convertToOpenAIResponsesInput({
2572
2700
  }
2573
2701
  case "tool": {
2574
2702
  for (const part of content) {
2703
+ if (part.type === "tool-approval-response") {
2704
+ const approvalResponse = part;
2705
+ if (processedApprovalIds.has(approvalResponse.approvalId)) {
2706
+ continue;
2707
+ }
2708
+ processedApprovalIds.add(approvalResponse.approvalId);
2709
+ if (store) {
2710
+ input.push({
2711
+ type: "item_reference",
2712
+ id: approvalResponse.approvalId
2713
+ });
2714
+ }
2715
+ input.push({
2716
+ type: "mcp_approval_response",
2717
+ approval_request_id: approvalResponse.approvalId,
2718
+ approve: approvalResponse.approved
2719
+ });
2720
+ continue;
2721
+ }
2575
2722
  const output = part.output;
2723
+ if (output.type === "execution-denied") {
2724
+ const approvalId = (_l = (_k = output.providerOptions) == null ? void 0 : _k.openai) == null ? void 0 : _l.approvalId;
2725
+ if (approvalId) {
2726
+ continue;
2727
+ }
2728
+ }
2576
2729
  const resolvedToolName = toolNameMapping.toProviderToolName(
2577
2730
  part.toolName
2578
2731
  );
@@ -2586,7 +2739,7 @@ async function convertToOpenAIResponsesInput({
2586
2739
  call_id: part.toolCallId,
2587
2740
  output: parsedOutput.output
2588
2741
  });
2589
- break;
2742
+ continue;
2590
2743
  }
2591
2744
  if (hasShellTool && resolvedToolName === "shell" && output.type === "json") {
2592
2745
  const parsedOutput = await (0, import_provider_utils22.validateTypes)({
@@ -2605,7 +2758,7 @@ async function convertToOpenAIResponsesInput({
2605
2758
  }
2606
2759
  }))
2607
2760
  });
2608
- break;
2761
+ continue;
2609
2762
  }
2610
2763
  if (hasApplyPatchTool && part.toolName === "apply_patch" && output.type === "json") {
2611
2764
  const parsedOutput = await (0, import_provider_utils22.validateTypes)({
@@ -2618,7 +2771,7 @@ async function convertToOpenAIResponsesInput({
2618
2771
  status: parsedOutput.status,
2619
2772
  output: parsedOutput.output
2620
2773
  });
2621
- break;
2774
+ continue;
2622
2775
  }
2623
2776
  let contentValue;
2624
2777
  switch (output.type) {
@@ -2627,7 +2780,7 @@ async function convertToOpenAIResponsesInput({
2627
2780
  contentValue = output.value;
2628
2781
  break;
2629
2782
  case "execution-denied":
2630
- contentValue = (_e = output.reason) != null ? _e : "Tool execution denied.";
2783
+ contentValue = (_m = output.reason) != null ? _m : "Tool execution denied.";
2631
2784
  break;
2632
2785
  case "json":
2633
2786
  case "error-json":
@@ -2699,7 +2852,7 @@ function mapOpenAIResponseFinishReason({
2699
2852
  case "content_filter":
2700
2853
  return "content-filter";
2701
2854
  default:
2702
- return hasFunctionCall ? "tool-calls" : "unknown";
2855
+ return hasFunctionCall ? "tool-calls" : "other";
2703
2856
  }
2704
2857
  }
2705
2858
 
@@ -2802,7 +2955,8 @@ var openaiResponsesChunkSchema = (0, import_provider_utils23.lazySchema)(
2802
2955
  import_v416.z.object({
2803
2956
  type: import_v416.z.literal("mcp_call"),
2804
2957
  id: import_v416.z.string(),
2805
- status: import_v416.z.string()
2958
+ status: import_v416.z.string(),
2959
+ approval_request_id: import_v416.z.string().nullish()
2806
2960
  }),
2807
2961
  import_v416.z.object({
2808
2962
  type: import_v416.z.literal("mcp_list_tools"),
@@ -2959,7 +3113,8 @@ var openaiResponsesChunkSchema = (0, import_provider_utils23.lazySchema)(
2959
3113
  code: import_v416.z.union([import_v416.z.number(), import_v416.z.string()]).optional(),
2960
3114
  message: import_v416.z.string().optional()
2961
3115
  }).loose()
2962
- ]).nullish()
3116
+ ]).nullish(),
3117
+ approval_request_id: import_v416.z.string().nullish()
2963
3118
  }),
2964
3119
  import_v416.z.object({
2965
3120
  type: import_v416.z.literal("mcp_list_tools"),
@@ -2988,7 +3143,7 @@ var openaiResponsesChunkSchema = (0, import_provider_utils23.lazySchema)(
2988
3143
  server_label: import_v416.z.string(),
2989
3144
  name: import_v416.z.string(),
2990
3145
  arguments: import_v416.z.string(),
2991
- approval_request_id: import_v416.z.string()
3146
+ approval_request_id: import_v416.z.string().optional()
2992
3147
  }),
2993
3148
  import_v416.z.object({
2994
3149
  type: import_v416.z.literal("apply_patch_call"),
@@ -3098,6 +3253,19 @@ var openaiResponsesChunkSchema = (0, import_provider_utils23.lazySchema)(
3098
3253
  item_id: import_v416.z.string(),
3099
3254
  summary_index: import_v416.z.number()
3100
3255
  }),
3256
+ import_v416.z.object({
3257
+ type: import_v416.z.literal("response.apply_patch_call_operation_diff.delta"),
3258
+ item_id: import_v416.z.string(),
3259
+ output_index: import_v416.z.number(),
3260
+ delta: import_v416.z.string(),
3261
+ obfuscation: import_v416.z.string().nullish()
3262
+ }),
3263
+ import_v416.z.object({
3264
+ type: import_v416.z.literal("response.apply_patch_call_operation_diff.done"),
3265
+ item_id: import_v416.z.string(),
3266
+ output_index: import_v416.z.number(),
3267
+ diff: import_v416.z.string()
3268
+ }),
3101
3269
  import_v416.z.object({
3102
3270
  type: import_v416.z.literal("error"),
3103
3271
  sequence_number: import_v416.z.number(),
@@ -3298,7 +3466,8 @@ var openaiResponsesResponseSchema = (0, import_provider_utils23.lazySchema)(
3298
3466
  code: import_v416.z.union([import_v416.z.number(), import_v416.z.string()]).optional(),
3299
3467
  message: import_v416.z.string().optional()
3300
3468
  }).loose()
3301
- ]).nullish()
3469
+ ]).nullish(),
3470
+ approval_request_id: import_v416.z.string().nullish()
3302
3471
  }),
3303
3472
  import_v416.z.object({
3304
3473
  type: import_v416.z.literal("mcp_list_tools"),
@@ -3327,7 +3496,7 @@ var openaiResponsesResponseSchema = (0, import_provider_utils23.lazySchema)(
3327
3496
  server_label: import_v416.z.string(),
3328
3497
  name: import_v416.z.string(),
3329
3498
  arguments: import_v416.z.string(),
3330
- approval_request_id: import_v416.z.string()
3499
+ approval_request_id: import_v416.z.string().optional()
3331
3500
  }),
3332
3501
  import_v416.z.object({
3333
3502
  type: import_v416.z.literal("apply_patch_call"),
@@ -3578,7 +3747,26 @@ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils24.lazySchem
3578
3747
  * Defaults to `undefined`.
3579
3748
  * @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
3580
3749
  */
3581
- user: import_v417.z.string().nullish()
3750
+ user: import_v417.z.string().nullish(),
3751
+ /**
3752
+ * Override the system message mode for this model.
3753
+ * - 'system': Use the 'system' role for system messages (default for most models)
3754
+ * - 'developer': Use the 'developer' role for system messages (used by reasoning models)
3755
+ * - 'remove': Remove system messages entirely
3756
+ *
3757
+ * If not specified, the mode is automatically determined based on the model.
3758
+ */
3759
+ systemMessageMode: import_v417.z.enum(["system", "developer", "remove"]).optional(),
3760
+ /**
3761
+ * Force treating this model as a reasoning model.
3762
+ *
3763
+ * This is useful for "stealth" reasoning models (e.g. via a custom baseURL)
3764
+ * where the model ID is not recognized by the SDK's allowlist.
3765
+ *
3766
+ * When enabled, the SDK applies reasoning-model parameter compatibility rules
3767
+ * and defaults `systemMessageMode` to `developer` unless overridden.
3768
+ */
3769
+ forceReasoning: import_v417.z.boolean().optional()
3582
3770
  })
3583
3771
  )
3584
3772
  );
@@ -3742,16 +3930,14 @@ var mcpArgsSchema = (0, import_provider_utils28.lazySchema)(
3742
3930
  authorization: import_v421.z.string().optional(),
3743
3931
  connectorId: import_v421.z.string().optional(),
3744
3932
  headers: import_v421.z.record(import_v421.z.string(), import_v421.z.string()).optional(),
3745
- // TODO: Integrate this MCP tool approval with our SDK's existing tool approval architecture
3746
- // requireApproval: z
3747
- // .union([
3748
- // z.enum(['always', 'never']),
3749
- // z.object({
3750
- // readOnly: z.boolean().optional(),
3751
- // toolNames: z.array(z.string()).optional(),
3752
- // }),
3753
- // ])
3754
- // .optional(),
3933
+ requireApproval: import_v421.z.union([
3934
+ import_v421.z.enum(["always", "never"]),
3935
+ import_v421.z.object({
3936
+ never: import_v421.z.object({
3937
+ toolNames: import_v421.z.array(import_v421.z.string()).optional()
3938
+ }).optional()
3939
+ })
3940
+ ]).optional(),
3755
3941
  serverDescription: import_v421.z.string().optional(),
3756
3942
  serverUrl: import_v421.z.string().optional()
3757
3943
  }).refine(
@@ -3763,36 +3949,14 @@ var mcpArgsSchema = (0, import_provider_utils28.lazySchema)(
3763
3949
  var mcpInputSchema = (0, import_provider_utils28.lazySchema)(() => (0, import_provider_utils28.zodSchema)(import_v421.z.object({})));
3764
3950
  var mcpOutputSchema = (0, import_provider_utils28.lazySchema)(
3765
3951
  () => (0, import_provider_utils28.zodSchema)(
3766
- import_v421.z.discriminatedUnion("type", [
3767
- import_v421.z.object({
3768
- type: import_v421.z.literal("call"),
3769
- serverLabel: import_v421.z.string(),
3770
- name: import_v421.z.string(),
3771
- arguments: import_v421.z.string(),
3772
- output: import_v421.z.string().nullable().optional(),
3773
- error: import_v421.z.union([import_v421.z.string(), jsonValueSchema]).optional()
3774
- }),
3775
- import_v421.z.object({
3776
- type: import_v421.z.literal("listTools"),
3777
- serverLabel: import_v421.z.string(),
3778
- tools: import_v421.z.array(
3779
- import_v421.z.object({
3780
- name: import_v421.z.string(),
3781
- description: import_v421.z.string().optional(),
3782
- inputSchema: jsonValueSchema,
3783
- annotations: import_v421.z.record(import_v421.z.string(), jsonValueSchema).optional()
3784
- })
3785
- ),
3786
- error: import_v421.z.union([import_v421.z.string(), jsonValueSchema]).optional()
3787
- }),
3788
- import_v421.z.object({
3789
- type: import_v421.z.literal("approvalRequest"),
3790
- serverLabel: import_v421.z.string(),
3791
- name: import_v421.z.string(),
3792
- arguments: import_v421.z.string(),
3793
- approvalRequestId: import_v421.z.string()
3794
- })
3795
- ])
3952
+ import_v421.z.object({
3953
+ type: import_v421.z.literal("call"),
3954
+ serverLabel: import_v421.z.string(),
3955
+ name: import_v421.z.string(),
3956
+ arguments: import_v421.z.string(),
3957
+ output: import_v421.z.string().nullish(),
3958
+ error: import_v421.z.union([import_v421.z.string(), jsonValueSchema]).optional()
3959
+ })
3796
3960
  )
3797
3961
  );
3798
3962
  var mcpToolFactory = (0, import_provider_utils28.createProviderToolFactoryWithOutputSchema)({
@@ -4025,6 +4189,11 @@ async function prepareResponsesTools({
4025
4189
  value: tool.args,
4026
4190
  schema: mcpArgsSchema
4027
4191
  });
4192
+ const mapApprovalFilter = (filter) => ({
4193
+ tool_names: filter.toolNames
4194
+ });
4195
+ const requireApproval = args.requireApproval;
4196
+ const requireApprovalParam = requireApproval == null ? void 0 : typeof requireApproval === "string" ? requireApproval : requireApproval.never != null ? { never: mapApprovalFilter(requireApproval.never) } : void 0;
4028
4197
  openaiTools.push({
4029
4198
  type: "mcp",
4030
4199
  server_label: args.serverLabel,
@@ -4035,7 +4204,7 @@ async function prepareResponsesTools({
4035
4204
  authorization: args.authorization,
4036
4205
  connector_id: args.connectorId,
4037
4206
  headers: args.headers,
4038
- require_approval: "never",
4207
+ require_approval: requireApprovalParam != null ? requireApprovalParam : "never",
4039
4208
  server_description: args.serverDescription,
4040
4209
  server_url: args.serverUrl
4041
4210
  });
@@ -4077,6 +4246,21 @@ async function prepareResponsesTools({
4077
4246
  }
4078
4247
 
4079
4248
  // src/responses/openai-responses-language-model.ts
4249
+ function extractApprovalRequestIdToToolCallIdMapping(prompt) {
4250
+ var _a, _b;
4251
+ const mapping = {};
4252
+ for (const message of prompt) {
4253
+ if (message.role !== "assistant") continue;
4254
+ for (const part of message.content) {
4255
+ if (part.type !== "tool-call") continue;
4256
+ const approvalRequestId = (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.approvalRequestId;
4257
+ if (approvalRequestId != null) {
4258
+ mapping[approvalRequestId] = part.toolCallId;
4259
+ }
4260
+ }
4261
+ }
4262
+ return mapping;
4263
+ }
4080
4264
  var OpenAIResponsesLanguageModel = class {
4081
4265
  constructor(modelId, config) {
4082
4266
  this.specificationVersion = "v3";
@@ -4105,7 +4289,7 @@ var OpenAIResponsesLanguageModel = class {
4105
4289
  toolChoice,
4106
4290
  responseFormat
4107
4291
  }) {
4108
- var _a, _b, _c, _d;
4292
+ var _a, _b, _c, _d, _e, _f;
4109
4293
  const warnings = [];
4110
4294
  const modelCapabilities = getOpenAILanguageModelCapabilities(this.modelId);
4111
4295
  if (topK != null) {
@@ -4128,6 +4312,7 @@ var OpenAIResponsesLanguageModel = class {
4128
4312
  providerOptions,
4129
4313
  schema: openaiResponsesProviderOptionsSchema
4130
4314
  });
4315
+ const isReasoningModel = (_a = openaiOptions == null ? void 0 : openaiOptions.forceReasoning) != null ? _a : modelCapabilities.isReasoningModel;
4131
4316
  if ((openaiOptions == null ? void 0 : openaiOptions.conversation) && (openaiOptions == null ? void 0 : openaiOptions.previousResponseId)) {
4132
4317
  warnings.push({
4133
4318
  type: "unsupported",
@@ -4152,15 +4337,15 @@ var OpenAIResponsesLanguageModel = class {
4152
4337
  const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
4153
4338
  prompt,
4154
4339
  toolNameMapping,
4155
- systemMessageMode: modelCapabilities.systemMessageMode,
4340
+ systemMessageMode: (_b = openaiOptions == null ? void 0 : openaiOptions.systemMessageMode) != null ? _b : isReasoningModel ? "developer" : modelCapabilities.systemMessageMode,
4156
4341
  fileIdPrefixes: this.config.fileIdPrefixes,
4157
- store: (_a = openaiOptions == null ? void 0 : openaiOptions.store) != null ? _a : true,
4342
+ store: (_c = openaiOptions == null ? void 0 : openaiOptions.store) != null ? _c : true,
4158
4343
  hasLocalShellTool: hasOpenAITool("openai.local_shell"),
4159
4344
  hasShellTool: hasOpenAITool("openai.shell"),
4160
4345
  hasApplyPatchTool: hasOpenAITool("openai.apply_patch")
4161
4346
  });
4162
4347
  warnings.push(...inputWarnings);
4163
- const strictJsonSchema = (_b = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _b : true;
4348
+ const strictJsonSchema = (_d = openaiOptions == null ? void 0 : openaiOptions.strictJsonSchema) != null ? _d : true;
4164
4349
  let include = openaiOptions == null ? void 0 : openaiOptions.include;
4165
4350
  function addInclude(key) {
4166
4351
  if (include == null) {
@@ -4176,9 +4361,9 @@ var OpenAIResponsesLanguageModel = class {
4176
4361
  if (topLogprobs) {
4177
4362
  addInclude("message.output_text.logprobs");
4178
4363
  }
4179
- const webSearchToolName = (_c = tools == null ? void 0 : tools.find(
4364
+ const webSearchToolName = (_e = tools == null ? void 0 : tools.find(
4180
4365
  (tool) => tool.type === "provider" && (tool.id === "openai.web_search" || tool.id === "openai.web_search_preview")
4181
- )) == null ? void 0 : _c.name;
4366
+ )) == null ? void 0 : _e.name;
4182
4367
  if (webSearchToolName) {
4183
4368
  addInclude("web_search_call.action.sources");
4184
4369
  }
@@ -4186,7 +4371,7 @@ var OpenAIResponsesLanguageModel = class {
4186
4371
  addInclude("code_interpreter_call.outputs");
4187
4372
  }
4188
4373
  const store = openaiOptions == null ? void 0 : openaiOptions.store;
4189
- if (store === false && modelCapabilities.isReasoningModel) {
4374
+ if (store === false && isReasoningModel) {
4190
4375
  addInclude("reasoning.encrypted_content");
4191
4376
  }
4192
4377
  const baseArgs = {
@@ -4201,7 +4386,7 @@ var OpenAIResponsesLanguageModel = class {
4201
4386
  format: responseFormat.schema != null ? {
4202
4387
  type: "json_schema",
4203
4388
  strict: strictJsonSchema,
4204
- name: (_d = responseFormat.name) != null ? _d : "response",
4389
+ name: (_f = responseFormat.name) != null ? _f : "response",
4205
4390
  description: responseFormat.description,
4206
4391
  schema: responseFormat.schema
4207
4392
  } : { type: "json_object" }
@@ -4228,7 +4413,7 @@ var OpenAIResponsesLanguageModel = class {
4228
4413
  top_logprobs: topLogprobs,
4229
4414
  truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
4230
4415
  // model-specific settings:
4231
- ...modelCapabilities.isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
4416
+ ...isReasoningModel && ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null || (openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) && {
4232
4417
  reasoning: {
4233
4418
  ...(openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null && {
4234
4419
  effort: openaiOptions.reasoningEffort
@@ -4239,7 +4424,7 @@ var OpenAIResponsesLanguageModel = class {
4239
4424
  }
4240
4425
  }
4241
4426
  };
4242
- if (modelCapabilities.isReasoningModel) {
4427
+ if (isReasoningModel) {
4243
4428
  if (!((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) === "none" && modelCapabilities.supportsNonReasoningParameters)) {
4244
4429
  if (baseArgs.temperature != null) {
4245
4430
  baseArgs.temperature = void 0;
@@ -4311,7 +4496,7 @@ var OpenAIResponsesLanguageModel = class {
4311
4496
  };
4312
4497
  }
4313
4498
  async doGenerate(options) {
4314
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
4499
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A, _B, _C, _D, _E;
4315
4500
  const {
4316
4501
  args: body,
4317
4502
  warnings,
@@ -4323,6 +4508,7 @@ var OpenAIResponsesLanguageModel = class {
4323
4508
  modelId: this.modelId
4324
4509
  });
4325
4510
  const providerKey = this.config.provider.replace(".responses", "");
4511
+ const approvalRequestIdToDummyToolCallIdFromPrompt = extractApprovalRequestIdToToolCallIdMapping(options.prompt);
4326
4512
  const {
4327
4513
  responseHeaders,
4328
4514
  value: response,
@@ -4539,17 +4725,20 @@ var OpenAIResponsesLanguageModel = class {
4539
4725
  break;
4540
4726
  }
4541
4727
  case "mcp_call": {
4728
+ const toolCallId = part.approval_request_id != null ? (_v = approvalRequestIdToDummyToolCallIdFromPrompt[part.approval_request_id]) != null ? _v : part.id : part.id;
4729
+ const toolName = `mcp.${part.name}`;
4542
4730
  content.push({
4543
4731
  type: "tool-call",
4544
- toolCallId: part.id,
4545
- toolName: toolNameMapping.toCustomToolName("mcp"),
4546
- input: JSON.stringify({}),
4547
- providerExecuted: true
4732
+ toolCallId,
4733
+ toolName,
4734
+ input: part.arguments,
4735
+ providerExecuted: true,
4736
+ dynamic: true
4548
4737
  });
4549
4738
  content.push({
4550
4739
  type: "tool-result",
4551
- toolCallId: part.id,
4552
- toolName: toolNameMapping.toCustomToolName("mcp"),
4740
+ toolCallId,
4741
+ toolName,
4553
4742
  result: {
4554
4743
  type: "call",
4555
4744
  serverLabel: part.server_label,
@@ -4557,58 +4746,34 @@ var OpenAIResponsesLanguageModel = class {
4557
4746
  arguments: part.arguments,
4558
4747
  ...part.output != null ? { output: part.output } : {},
4559
4748
  ...part.error != null ? { error: part.error } : {}
4749
+ },
4750
+ providerMetadata: {
4751
+ [providerKey]: {
4752
+ itemId: part.id
4753
+ }
4560
4754
  }
4561
4755
  });
4562
4756
  break;
4563
4757
  }
4564
4758
  case "mcp_list_tools": {
4565
- content.push({
4566
- type: "tool-call",
4567
- toolCallId: part.id,
4568
- toolName: toolNameMapping.toCustomToolName("mcp"),
4569
- input: JSON.stringify({}),
4570
- providerExecuted: true
4571
- });
4572
- content.push({
4573
- type: "tool-result",
4574
- toolCallId: part.id,
4575
- toolName: toolNameMapping.toCustomToolName("mcp"),
4576
- result: {
4577
- type: "listTools",
4578
- serverLabel: part.server_label,
4579
- tools: part.tools.map((t) => {
4580
- var _a2, _b2;
4581
- return {
4582
- name: t.name,
4583
- description: (_a2 = t.description) != null ? _a2 : void 0,
4584
- inputSchema: t.input_schema,
4585
- annotations: (_b2 = t.annotations) != null ? _b2 : void 0
4586
- };
4587
- }),
4588
- ...part.error != null ? { error: part.error } : {}
4589
- }
4590
- });
4591
4759
  break;
4592
4760
  }
4593
4761
  case "mcp_approval_request": {
4762
+ const approvalRequestId = (_w = part.approval_request_id) != null ? _w : part.id;
4763
+ const dummyToolCallId = (_z = (_y = (_x = this.config).generateId) == null ? void 0 : _y.call(_x)) != null ? _z : (0, import_provider_utils32.generateId)();
4764
+ const toolName = `mcp.${part.name}`;
4594
4765
  content.push({
4595
4766
  type: "tool-call",
4596
- toolCallId: part.id,
4597
- toolName: toolNameMapping.toCustomToolName("mcp"),
4598
- input: JSON.stringify({}),
4599
- providerExecuted: true
4767
+ toolCallId: dummyToolCallId,
4768
+ toolName,
4769
+ input: part.arguments,
4770
+ providerExecuted: true,
4771
+ dynamic: true
4600
4772
  });
4601
4773
  content.push({
4602
- type: "tool-result",
4603
- toolCallId: part.id,
4604
- toolName: toolNameMapping.toCustomToolName("mcp"),
4605
- result: {
4606
- type: "approvalRequest",
4607
- serverLabel: part.server_label,
4608
- name: part.name,
4609
- arguments: part.arguments,
4610
- approvalRequestId: part.approval_request_id
4611
- }
4774
+ type: "tool-approval-request",
4775
+ approvalId: approvalRequestId,
4776
+ toolCallId: dummyToolCallId
4612
4777
  });
4613
4778
  break;
4614
4779
  }
@@ -4645,13 +4810,13 @@ var OpenAIResponsesLanguageModel = class {
4645
4810
  toolName: toolNameMapping.toCustomToolName("file_search"),
4646
4811
  result: {
4647
4812
  queries: part.queries,
4648
- results: (_w = (_v = part.results) == null ? void 0 : _v.map((result) => ({
4813
+ results: (_B = (_A = part.results) == null ? void 0 : _A.map((result) => ({
4649
4814
  attributes: result.attributes,
4650
4815
  fileId: result.file_id,
4651
4816
  filename: result.filename,
4652
4817
  score: result.score,
4653
4818
  text: result.text
4654
- }))) != null ? _w : null
4819
+ }))) != null ? _B : null
4655
4820
  }
4656
4821
  });
4657
4822
  break;
@@ -4708,10 +4873,13 @@ var OpenAIResponsesLanguageModel = class {
4708
4873
  const usage = response.usage;
4709
4874
  return {
4710
4875
  content,
4711
- finishReason: mapOpenAIResponseFinishReason({
4712
- finishReason: (_x = response.incomplete_details) == null ? void 0 : _x.reason,
4713
- hasFunctionCall
4714
- }),
4876
+ finishReason: {
4877
+ unified: mapOpenAIResponseFinishReason({
4878
+ finishReason: (_C = response.incomplete_details) == null ? void 0 : _C.reason,
4879
+ hasFunctionCall
4880
+ }),
4881
+ raw: (_E = (_D = response.incomplete_details) == null ? void 0 : _D.reason) != null ? _E : void 0
4882
+ },
4715
4883
  usage: convertOpenAIResponsesUsage(usage),
4716
4884
  request: { body },
4717
4885
  response: {
@@ -4752,7 +4920,12 @@ var OpenAIResponsesLanguageModel = class {
4752
4920
  });
4753
4921
  const self = this;
4754
4922
  const providerKey = this.config.provider.replace(".responses", "");
4755
- let finishReason = "unknown";
4923
+ const approvalRequestIdToDummyToolCallIdFromPrompt = extractApprovalRequestIdToToolCallIdMapping(options.prompt);
4924
+ const approvalRequestIdToDummyToolCallIdFromStream = /* @__PURE__ */ new Map();
4925
+ let finishReason = {
4926
+ unified: "other",
4927
+ raw: void 0
4928
+ };
4756
4929
  let usage = void 0;
4757
4930
  const logprobs = [];
4758
4931
  let responseId = null;
@@ -4768,12 +4941,12 @@ var OpenAIResponsesLanguageModel = class {
4768
4941
  controller.enqueue({ type: "stream-start", warnings });
4769
4942
  },
4770
4943
  transform(chunk, controller) {
4771
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A;
4944
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A, _B, _C, _D, _E, _F, _G, _H, _I, _J;
4772
4945
  if (options.includeRawChunks) {
4773
4946
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
4774
4947
  }
4775
4948
  if (!chunk.success) {
4776
- finishReason = "error";
4949
+ finishReason = { unified: "error", raw: void 0 };
4777
4950
  controller.enqueue({ type: "error", error: chunk.error });
4778
4951
  return;
4779
4952
  }
@@ -4864,32 +5037,41 @@ var OpenAIResponsesLanguageModel = class {
4864
5037
  providerExecuted: true
4865
5038
  });
4866
5039
  } else if (value.item.type === "mcp_call" || value.item.type === "mcp_list_tools" || value.item.type === "mcp_approval_request") {
4867
- controller.enqueue({
4868
- type: "tool-call",
4869
- toolCallId: value.item.id,
4870
- toolName: toolNameMapping.toCustomToolName("mcp"),
4871
- input: "{}",
4872
- providerExecuted: true
4873
- });
4874
5040
  } else if (value.item.type === "apply_patch_call") {
5041
+ const { call_id: callId, operation } = value.item;
4875
5042
  ongoingToolCalls[value.output_index] = {
4876
5043
  toolName: toolNameMapping.toCustomToolName("apply_patch"),
4877
- toolCallId: value.item.call_id
5044
+ toolCallId: callId,
5045
+ applyPatch: {
5046
+ // delete_file doesn't have diff
5047
+ hasDiff: operation.type === "delete_file",
5048
+ endEmitted: operation.type === "delete_file"
5049
+ }
4878
5050
  };
4879
- if (value.item.status === "completed") {
5051
+ controller.enqueue({
5052
+ type: "tool-input-start",
5053
+ id: callId,
5054
+ toolName: toolNameMapping.toCustomToolName("apply_patch")
5055
+ });
5056
+ if (operation.type === "delete_file") {
5057
+ const inputString = JSON.stringify({
5058
+ callId,
5059
+ operation
5060
+ });
4880
5061
  controller.enqueue({
4881
- type: "tool-call",
4882
- toolCallId: value.item.call_id,
4883
- toolName: toolNameMapping.toCustomToolName("apply_patch"),
4884
- input: JSON.stringify({
4885
- callId: value.item.call_id,
4886
- operation: value.item.operation
4887
- }),
4888
- providerMetadata: {
4889
- [providerKey]: {
4890
- itemId: value.item.id
4891
- }
4892
- }
5062
+ type: "tool-input-delta",
5063
+ id: callId,
5064
+ delta: inputString
5065
+ });
5066
+ controller.enqueue({
5067
+ type: "tool-input-end",
5068
+ id: callId
5069
+ });
5070
+ } else {
5071
+ controller.enqueue({
5072
+ type: "tool-input-delta",
5073
+ id: callId,
5074
+ delta: `{"callId":"${escapeJSONDelta(callId)}","operation":{"type":"${escapeJSONDelta(operation.type)}","path":"${escapeJSONDelta(operation.path)}","diff":"`
4893
5075
  });
4894
5076
  }
4895
5077
  } else if (value.item.type === "shell_call") {
@@ -5026,10 +5208,23 @@ var OpenAIResponsesLanguageModel = class {
5026
5208
  });
5027
5209
  } else if (value.item.type === "mcp_call") {
5028
5210
  ongoingToolCalls[value.output_index] = void 0;
5211
+ const approvalRequestId = (_d = value.item.approval_request_id) != null ? _d : void 0;
5212
+ const aliasedToolCallId = approvalRequestId != null ? (_f = (_e = approvalRequestIdToDummyToolCallIdFromStream.get(
5213
+ approvalRequestId
5214
+ )) != null ? _e : approvalRequestIdToDummyToolCallIdFromPrompt[approvalRequestId]) != null ? _f : value.item.id : value.item.id;
5215
+ const toolName = `mcp.${value.item.name}`;
5216
+ controller.enqueue({
5217
+ type: "tool-call",
5218
+ toolCallId: aliasedToolCallId,
5219
+ toolName,
5220
+ input: value.item.arguments,
5221
+ providerExecuted: true,
5222
+ dynamic: true
5223
+ });
5029
5224
  controller.enqueue({
5030
5225
  type: "tool-result",
5031
- toolCallId: value.item.id,
5032
- toolName: toolNameMapping.toCustomToolName("mcp"),
5226
+ toolCallId: aliasedToolCallId,
5227
+ toolName,
5033
5228
  result: {
5034
5229
  type: "call",
5035
5230
  serverLabel: value.item.server_label,
@@ -5037,35 +5232,40 @@ var OpenAIResponsesLanguageModel = class {
5037
5232
  arguments: value.item.arguments,
5038
5233
  ...value.item.output != null ? { output: value.item.output } : {},
5039
5234
  ...value.item.error != null ? { error: value.item.error } : {}
5235
+ },
5236
+ providerMetadata: {
5237
+ [providerKey]: {
5238
+ itemId: value.item.id
5239
+ }
5040
5240
  }
5041
5241
  });
5042
5242
  } else if (value.item.type === "mcp_list_tools") {
5043
5243
  ongoingToolCalls[value.output_index] = void 0;
5044
- controller.enqueue({
5045
- type: "tool-result",
5046
- toolCallId: value.item.id,
5047
- toolName: toolNameMapping.toCustomToolName("mcp"),
5048
- result: {
5049
- type: "listTools",
5050
- serverLabel: value.item.server_label,
5051
- tools: value.item.tools.map((t) => {
5052
- var _a2, _b2;
5053
- return {
5054
- name: t.name,
5055
- description: (_a2 = t.description) != null ? _a2 : void 0,
5056
- inputSchema: t.input_schema,
5057
- annotations: (_b2 = t.annotations) != null ? _b2 : void 0
5058
- };
5059
- }),
5060
- ...value.item.error != null ? { error: value.item.error } : {}
5061
- }
5062
- });
5063
5244
  } else if (value.item.type === "apply_patch_call") {
5064
- ongoingToolCalls[value.output_index] = void 0;
5065
- if (value.item.status === "completed") {
5245
+ const toolCall = ongoingToolCalls[value.output_index];
5246
+ if ((toolCall == null ? void 0 : toolCall.applyPatch) && !toolCall.applyPatch.endEmitted && value.item.operation.type !== "delete_file") {
5247
+ if (!toolCall.applyPatch.hasDiff) {
5248
+ controller.enqueue({
5249
+ type: "tool-input-delta",
5250
+ id: toolCall.toolCallId,
5251
+ delta: escapeJSONDelta(value.item.operation.diff)
5252
+ });
5253
+ }
5254
+ controller.enqueue({
5255
+ type: "tool-input-delta",
5256
+ id: toolCall.toolCallId,
5257
+ delta: '"}}'
5258
+ });
5259
+ controller.enqueue({
5260
+ type: "tool-input-end",
5261
+ id: toolCall.toolCallId
5262
+ });
5263
+ toolCall.applyPatch.endEmitted = true;
5264
+ }
5265
+ if (toolCall && value.item.status === "completed") {
5066
5266
  controller.enqueue({
5067
5267
  type: "tool-call",
5068
- toolCallId: value.item.call_id,
5268
+ toolCallId: toolCall.toolCallId,
5069
5269
  toolName: toolNameMapping.toCustomToolName("apply_patch"),
5070
5270
  input: JSON.stringify({
5071
5271
  callId: value.item.call_id,
@@ -5078,19 +5278,28 @@ var OpenAIResponsesLanguageModel = class {
5078
5278
  }
5079
5279
  });
5080
5280
  }
5281
+ ongoingToolCalls[value.output_index] = void 0;
5081
5282
  } else if (value.item.type === "mcp_approval_request") {
5082
5283
  ongoingToolCalls[value.output_index] = void 0;
5284
+ const dummyToolCallId = (_i = (_h = (_g = self.config).generateId) == null ? void 0 : _h.call(_g)) != null ? _i : (0, import_provider_utils32.generateId)();
5285
+ const approvalRequestId = (_j = value.item.approval_request_id) != null ? _j : value.item.id;
5286
+ approvalRequestIdToDummyToolCallIdFromStream.set(
5287
+ approvalRequestId,
5288
+ dummyToolCallId
5289
+ );
5290
+ const toolName = `mcp.${value.item.name}`;
5083
5291
  controller.enqueue({
5084
- type: "tool-result",
5085
- toolCallId: value.item.id,
5086
- toolName: toolNameMapping.toCustomToolName("mcp"),
5087
- result: {
5088
- type: "approvalRequest",
5089
- serverLabel: value.item.server_label,
5090
- name: value.item.name,
5091
- arguments: value.item.arguments,
5092
- approvalRequestId: value.item.approval_request_id
5093
- }
5292
+ type: "tool-call",
5293
+ toolCallId: dummyToolCallId,
5294
+ toolName,
5295
+ input: value.item.arguments,
5296
+ providerExecuted: true,
5297
+ dynamic: true
5298
+ });
5299
+ controller.enqueue({
5300
+ type: "tool-approval-request",
5301
+ approvalId: approvalRequestId,
5302
+ toolCallId: dummyToolCallId
5094
5303
  });
5095
5304
  } else if (value.item.type === "local_shell_call") {
5096
5305
  ongoingToolCalls[value.output_index] = void 0;
@@ -5141,7 +5350,7 @@ var OpenAIResponsesLanguageModel = class {
5141
5350
  providerMetadata: {
5142
5351
  [providerKey]: {
5143
5352
  itemId: value.item.id,
5144
- reasoningEncryptedContent: (_d = value.item.encrypted_content) != null ? _d : null
5353
+ reasoningEncryptedContent: (_k = value.item.encrypted_content) != null ? _k : null
5145
5354
  }
5146
5355
  }
5147
5356
  });
@@ -5157,6 +5366,38 @@ var OpenAIResponsesLanguageModel = class {
5157
5366
  delta: value.delta
5158
5367
  });
5159
5368
  }
5369
+ } else if (isResponseApplyPatchCallOperationDiffDeltaChunk(value)) {
5370
+ const toolCall = ongoingToolCalls[value.output_index];
5371
+ if (toolCall == null ? void 0 : toolCall.applyPatch) {
5372
+ controller.enqueue({
5373
+ type: "tool-input-delta",
5374
+ id: toolCall.toolCallId,
5375
+ delta: escapeJSONDelta(value.delta)
5376
+ });
5377
+ toolCall.applyPatch.hasDiff = true;
5378
+ }
5379
+ } else if (isResponseApplyPatchCallOperationDiffDoneChunk(value)) {
5380
+ const toolCall = ongoingToolCalls[value.output_index];
5381
+ if ((toolCall == null ? void 0 : toolCall.applyPatch) && !toolCall.applyPatch.endEmitted) {
5382
+ if (!toolCall.applyPatch.hasDiff) {
5383
+ controller.enqueue({
5384
+ type: "tool-input-delta",
5385
+ id: toolCall.toolCallId,
5386
+ delta: escapeJSONDelta(value.diff)
5387
+ });
5388
+ toolCall.applyPatch.hasDiff = true;
5389
+ }
5390
+ controller.enqueue({
5391
+ type: "tool-input-delta",
5392
+ id: toolCall.toolCallId,
5393
+ delta: '"}}'
5394
+ });
5395
+ controller.enqueue({
5396
+ type: "tool-input-end",
5397
+ id: toolCall.toolCallId
5398
+ });
5399
+ toolCall.applyPatch.endEmitted = true;
5400
+ }
5160
5401
  } else if (isResponseImageGenerationCallPartialImageChunk(value)) {
5161
5402
  controller.enqueue({
5162
5403
  type: "tool-result",
@@ -5173,9 +5414,7 @@ var OpenAIResponsesLanguageModel = class {
5173
5414
  controller.enqueue({
5174
5415
  type: "tool-input-delta",
5175
5416
  id: toolCall.toolCallId,
5176
- // The delta is code, which is embedding in a JSON string.
5177
- // To escape it, we use JSON.stringify and slice to remove the outer quotes.
5178
- delta: JSON.stringify(value.delta).slice(1, -1)
5417
+ delta: escapeJSONDelta(value.delta)
5179
5418
  });
5180
5419
  }
5181
5420
  } else if (isResponseCodeInterpreterCallCodeDoneChunk(value)) {
@@ -5215,7 +5454,7 @@ var OpenAIResponsesLanguageModel = class {
5215
5454
  id: value.item_id,
5216
5455
  delta: value.delta
5217
5456
  });
5218
- if (((_f = (_e = options.providerOptions) == null ? void 0 : _e.openai) == null ? void 0 : _f.logprobs) && value.logprobs) {
5457
+ if (((_m = (_l = options.providerOptions) == null ? void 0 : _l.openai) == null ? void 0 : _m.logprobs) && value.logprobs) {
5219
5458
  logprobs.push(value.logprobs);
5220
5459
  }
5221
5460
  } else if (value.type === "response.reasoning_summary_part.added") {
@@ -5242,7 +5481,7 @@ var OpenAIResponsesLanguageModel = class {
5242
5481
  providerMetadata: {
5243
5482
  [providerKey]: {
5244
5483
  itemId: value.item_id,
5245
- reasoningEncryptedContent: (_h = (_g = activeReasoning[value.item_id]) == null ? void 0 : _g.encryptedContent) != null ? _h : null
5484
+ reasoningEncryptedContent: (_o = (_n = activeReasoning[value.item_id]) == null ? void 0 : _n.encryptedContent) != null ? _o : null
5246
5485
  }
5247
5486
  }
5248
5487
  });
@@ -5272,10 +5511,13 @@ var OpenAIResponsesLanguageModel = class {
5272
5511
  activeReasoning[value.item_id].summaryParts[value.summary_index] = "can-conclude";
5273
5512
  }
5274
5513
  } else if (isResponseFinishedChunk(value)) {
5275
- finishReason = mapOpenAIResponseFinishReason({
5276
- finishReason: (_i = value.response.incomplete_details) == null ? void 0 : _i.reason,
5277
- hasFunctionCall
5278
- });
5514
+ finishReason = {
5515
+ unified: mapOpenAIResponseFinishReason({
5516
+ finishReason: (_p = value.response.incomplete_details) == null ? void 0 : _p.reason,
5517
+ hasFunctionCall
5518
+ }),
5519
+ raw: (_r = (_q = value.response.incomplete_details) == null ? void 0 : _q.reason) != null ? _r : void 0
5520
+ };
5279
5521
  usage = value.response.usage;
5280
5522
  if (typeof value.response.service_tier === "string") {
5281
5523
  serviceTier = value.response.service_tier;
@@ -5286,7 +5528,7 @@ var OpenAIResponsesLanguageModel = class {
5286
5528
  controller.enqueue({
5287
5529
  type: "source",
5288
5530
  sourceType: "url",
5289
- id: (_l = (_k = (_j = self.config).generateId) == null ? void 0 : _k.call(_j)) != null ? _l : (0, import_provider_utils32.generateId)(),
5531
+ id: (_u = (_t = (_s = self.config).generateId) == null ? void 0 : _t.call(_s)) != null ? _u : (0, import_provider_utils32.generateId)(),
5290
5532
  url: value.annotation.url,
5291
5533
  title: value.annotation.title
5292
5534
  });
@@ -5294,10 +5536,10 @@ var OpenAIResponsesLanguageModel = class {
5294
5536
  controller.enqueue({
5295
5537
  type: "source",
5296
5538
  sourceType: "document",
5297
- id: (_o = (_n = (_m = self.config).generateId) == null ? void 0 : _n.call(_m)) != null ? _o : (0, import_provider_utils32.generateId)(),
5539
+ id: (_x = (_w = (_v = self.config).generateId) == null ? void 0 : _w.call(_v)) != null ? _x : (0, import_provider_utils32.generateId)(),
5298
5540
  mediaType: "text/plain",
5299
- title: (_q = (_p = value.annotation.quote) != null ? _p : value.annotation.filename) != null ? _q : "Document",
5300
- filename: (_r = value.annotation.filename) != null ? _r : value.annotation.file_id,
5541
+ title: (_z = (_y = value.annotation.quote) != null ? _y : value.annotation.filename) != null ? _z : "Document",
5542
+ filename: (_A = value.annotation.filename) != null ? _A : value.annotation.file_id,
5301
5543
  ...value.annotation.file_id ? {
5302
5544
  providerMetadata: {
5303
5545
  [providerKey]: {
@@ -5310,10 +5552,10 @@ var OpenAIResponsesLanguageModel = class {
5310
5552
  controller.enqueue({
5311
5553
  type: "source",
5312
5554
  sourceType: "document",
5313
- id: (_u = (_t = (_s = self.config).generateId) == null ? void 0 : _t.call(_s)) != null ? _u : (0, import_provider_utils32.generateId)(),
5555
+ id: (_D = (_C = (_B = self.config).generateId) == null ? void 0 : _C.call(_B)) != null ? _D : (0, import_provider_utils32.generateId)(),
5314
5556
  mediaType: "text/plain",
5315
- title: (_w = (_v = value.annotation.filename) != null ? _v : value.annotation.file_id) != null ? _w : "Document",
5316
- filename: (_x = value.annotation.filename) != null ? _x : value.annotation.file_id,
5557
+ title: (_F = (_E = value.annotation.filename) != null ? _E : value.annotation.file_id) != null ? _F : "Document",
5558
+ filename: (_G = value.annotation.filename) != null ? _G : value.annotation.file_id,
5317
5559
  providerMetadata: {
5318
5560
  [providerKey]: {
5319
5561
  fileId: value.annotation.file_id,
@@ -5326,7 +5568,7 @@ var OpenAIResponsesLanguageModel = class {
5326
5568
  controller.enqueue({
5327
5569
  type: "source",
5328
5570
  sourceType: "document",
5329
- id: (_A = (_z = (_y = self.config).generateId) == null ? void 0 : _z.call(_y)) != null ? _A : (0, import_provider_utils32.generateId)(),
5571
+ id: (_J = (_I = (_H = self.config).generateId) == null ? void 0 : _I.call(_H)) != null ? _J : (0, import_provider_utils32.generateId)(),
5330
5572
  mediaType: "application/octet-stream",
5331
5573
  title: value.annotation.file_id,
5332
5574
  filename: value.annotation.file_id,
@@ -5392,6 +5634,12 @@ function isResponseCodeInterpreterCallCodeDeltaChunk(chunk) {
5392
5634
  function isResponseCodeInterpreterCallCodeDoneChunk(chunk) {
5393
5635
  return chunk.type === "response.code_interpreter_call_code.done";
5394
5636
  }
5637
+ function isResponseApplyPatchCallOperationDiffDeltaChunk(chunk) {
5638
+ return chunk.type === "response.apply_patch_call_operation_diff.delta";
5639
+ }
5640
+ function isResponseApplyPatchCallOperationDiffDoneChunk(chunk) {
5641
+ return chunk.type === "response.apply_patch_call_operation_diff.done";
5642
+ }
5395
5643
  function isResponseOutputItemAddedChunk(chunk) {
5396
5644
  return chunk.type === "response.output_item.added";
5397
5645
  }
@@ -5422,6 +5670,9 @@ function mapWebSearchOutput(action) {
5422
5670
  };
5423
5671
  }
5424
5672
  }
5673
+ function escapeJSONDelta(delta) {
5674
+ return JSON.stringify(delta).slice(1, -1);
5675
+ }
5425
5676
  // Annotate the CommonJS export names for ESM import in node:
5426
5677
  0 && (module.exports = {
5427
5678
  OpenAIChatLanguageModel,