@ai-sdk/openai 2.0.0-beta.8 → 2.0.0-beta.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2006,7 +2006,7 @@ async function convertToOpenAIResponsesMessages({
2006
2006
  prompt,
2007
2007
  systemMessageMode
2008
2008
  }) {
2009
- var _a, _b, _c, _d, _e;
2009
+ var _a, _b, _c, _d, _e, _f, _g, _h;
2010
2010
  const messages = [];
2011
2011
  const warnings = [];
2012
2012
  for (const { role, content } of prompt) {
@@ -2084,7 +2084,8 @@ async function convertToOpenAIResponsesMessages({
2084
2084
  case "text": {
2085
2085
  messages.push({
2086
2086
  role: "assistant",
2087
- content: [{ type: "output_text", text: part.text }]
2087
+ content: [{ type: "output_text", text: part.text }],
2088
+ id: (_c = (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.itemId) != null ? _c : void 0
2088
2089
  });
2089
2090
  break;
2090
2091
  }
@@ -2097,7 +2098,7 @@ async function convertToOpenAIResponsesMessages({
2097
2098
  call_id: part.toolCallId,
2098
2099
  name: part.toolName,
2099
2100
  arguments: JSON.stringify(part.input),
2100
- id: (_c = (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.itemId) != null ? _c : void 0
2101
+ id: (_f = (_e = (_d = part.providerOptions) == null ? void 0 : _d.openai) == null ? void 0 : _e.itemId) != null ? _f : void 0
2101
2102
  });
2102
2103
  break;
2103
2104
  }
@@ -2114,7 +2115,7 @@ async function convertToOpenAIResponsesMessages({
2114
2115
  providerOptions: part.providerOptions,
2115
2116
  schema: openaiResponsesReasoningProviderOptionsSchema
2116
2117
  });
2117
- const reasoningId = (_d = providerOptions == null ? void 0 : providerOptions.reasoning) == null ? void 0 : _d.id;
2118
+ const reasoningId = (_g = providerOptions == null ? void 0 : providerOptions.reasoning) == null ? void 0 : _g.id;
2118
2119
  if (reasoningId != null) {
2119
2120
  const existingReasoningMessage = reasoningMessages[reasoningId];
2120
2121
  const summaryParts = [];
@@ -2130,7 +2131,7 @@ async function convertToOpenAIResponsesMessages({
2130
2131
  reasoningMessages[reasoningId] = {
2131
2132
  type: "reasoning",
2132
2133
  id: reasoningId,
2133
- encrypted_content: (_e = providerOptions == null ? void 0 : providerOptions.reasoning) == null ? void 0 : _e.encryptedContent,
2134
+ encrypted_content: (_h = providerOptions == null ? void 0 : providerOptions.reasoning) == null ? void 0 : _h.encryptedContent,
2134
2135
  summary: summaryParts
2135
2136
  };
2136
2137
  messages.push(reasoningMessages[reasoningId]);
@@ -2477,6 +2478,7 @@ var OpenAIResponsesLanguageModel = class {
2477
2478
  z15.object({
2478
2479
  type: z15.literal("message"),
2479
2480
  role: z15.literal("assistant"),
2481
+ id: z15.string(),
2480
2482
  content: z15.array(
2481
2483
  z15.object({
2482
2484
  type: z15.literal("output_text"),
@@ -2568,7 +2570,12 @@ var OpenAIResponsesLanguageModel = class {
2568
2570
  for (const contentPart of part.content) {
2569
2571
  content.push({
2570
2572
  type: "text",
2571
- text: contentPart.text
2573
+ text: contentPart.text,
2574
+ providerMetadata: {
2575
+ openai: {
2576
+ itemId: part.id
2577
+ }
2578
+ }
2572
2579
  });
2573
2580
  for (const annotation of contentPart.annotations) {
2574
2581
  content.push({
@@ -2745,7 +2752,12 @@ var OpenAIResponsesLanguageModel = class {
2745
2752
  } else if (value.item.type === "message") {
2746
2753
  controller.enqueue({
2747
2754
  type: "text-start",
2748
- id: value.item.id
2755
+ id: value.item.id,
2756
+ providerMetadata: {
2757
+ openai: {
2758
+ itemId: value.item.id
2759
+ }
2760
+ }
2749
2761
  });
2750
2762
  } else if (isResponseOutputItemAddedReasoningChunk(value)) {
2751
2763
  activeReasoning[value.item.id] = {