@ai-sdk/openai 2.0.0-beta.7 → 2.0.0-beta.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1906,7 +1906,7 @@ async function convertToOpenAIResponsesMessages({
1906
1906
  prompt,
1907
1907
  systemMessageMode
1908
1908
  }) {
1909
- var _a, _b;
1909
+ var _a, _b, _c, _d, _e, _f, _g, _h;
1910
1910
  const messages = [];
1911
1911
  const warnings = [];
1912
1912
  for (const { role, content } of prompt) {
@@ -1941,7 +1941,7 @@ async function convertToOpenAIResponsesMessages({
1941
1941
  messages.push({
1942
1942
  role: "user",
1943
1943
  content: content.map((part, index) => {
1944
- var _a2, _b2, _c;
1944
+ var _a2, _b2, _c2;
1945
1945
  switch (part.type) {
1946
1946
  case "text": {
1947
1947
  return { type: "input_text", text: part.text };
@@ -1963,7 +1963,7 @@ async function convertToOpenAIResponsesMessages({
1963
1963
  }
1964
1964
  return {
1965
1965
  type: "input_file",
1966
- filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
1966
+ filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
1967
1967
  file_data: `data:application/pdf;base64,${part.data}`
1968
1968
  };
1969
1969
  } else {
@@ -1984,7 +1984,8 @@ async function convertToOpenAIResponsesMessages({
1984
1984
  case "text": {
1985
1985
  messages.push({
1986
1986
  role: "assistant",
1987
- content: [{ type: "output_text", text: part.text }]
1987
+ content: [{ type: "output_text", text: part.text }],
1988
+ id: (_c = (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.itemId) != null ? _c : void 0
1988
1989
  });
1989
1990
  break;
1990
1991
  }
@@ -1996,7 +1997,8 @@ async function convertToOpenAIResponsesMessages({
1996
1997
  type: "function_call",
1997
1998
  call_id: part.toolCallId,
1998
1999
  name: part.toolName,
1999
- arguments: JSON.stringify(part.input)
2000
+ arguments: JSON.stringify(part.input),
2001
+ id: (_f = (_e = (_d = part.providerOptions) == null ? void 0 : _d.openai) == null ? void 0 : _e.itemId) != null ? _f : void 0
2000
2002
  });
2001
2003
  break;
2002
2004
  }
@@ -2013,7 +2015,7 @@ async function convertToOpenAIResponsesMessages({
2013
2015
  providerOptions: part.providerOptions,
2014
2016
  schema: openaiResponsesReasoningProviderOptionsSchema
2015
2017
  });
2016
- const reasoningId = (_a = providerOptions == null ? void 0 : providerOptions.reasoning) == null ? void 0 : _a.id;
2018
+ const reasoningId = (_g = providerOptions == null ? void 0 : providerOptions.reasoning) == null ? void 0 : _g.id;
2017
2019
  if (reasoningId != null) {
2018
2020
  const existingReasoningMessage = reasoningMessages[reasoningId];
2019
2021
  const summaryParts = [];
@@ -2029,7 +2031,7 @@ async function convertToOpenAIResponsesMessages({
2029
2031
  reasoningMessages[reasoningId] = {
2030
2032
  type: "reasoning",
2031
2033
  id: reasoningId,
2032
- encrypted_content: (_b = providerOptions == null ? void 0 : providerOptions.reasoning) == null ? void 0 : _b.encryptedContent,
2034
+ encrypted_content: (_h = providerOptions == null ? void 0 : providerOptions.reasoning) == null ? void 0 : _h.encryptedContent,
2033
2035
  summary: summaryParts
2034
2036
  };
2035
2037
  messages.push(reasoningMessages[reasoningId]);
@@ -2376,6 +2378,7 @@ var OpenAIResponsesLanguageModel = class {
2376
2378
  z14.object({
2377
2379
  type: z14.literal("message"),
2378
2380
  role: z14.literal("assistant"),
2381
+ id: z14.string(),
2379
2382
  content: z14.array(
2380
2383
  z14.object({
2381
2384
  type: z14.literal("output_text"),
@@ -2396,7 +2399,8 @@ var OpenAIResponsesLanguageModel = class {
2396
2399
  type: z14.literal("function_call"),
2397
2400
  call_id: z14.string(),
2398
2401
  name: z14.string(),
2399
- arguments: z14.string()
2402
+ arguments: z14.string(),
2403
+ id: z14.string()
2400
2404
  }),
2401
2405
  z14.object({
2402
2406
  type: z14.literal("web_search_call"),
@@ -2466,7 +2470,12 @@ var OpenAIResponsesLanguageModel = class {
2466
2470
  for (const contentPart of part.content) {
2467
2471
  content.push({
2468
2472
  type: "text",
2469
- text: contentPart.text
2473
+ text: contentPart.text,
2474
+ providerMetadata: {
2475
+ openai: {
2476
+ itemId: part.id
2477
+ }
2478
+ }
2470
2479
  });
2471
2480
  for (const annotation of contentPart.annotations) {
2472
2481
  content.push({
@@ -2485,7 +2494,12 @@ var OpenAIResponsesLanguageModel = class {
2485
2494
  type: "tool-call",
2486
2495
  toolCallId: part.call_id,
2487
2496
  toolName: part.name,
2488
- input: part.arguments
2497
+ input: part.arguments,
2498
+ providerMetadata: {
2499
+ openai: {
2500
+ itemId: part.id
2501
+ }
2502
+ }
2489
2503
  });
2490
2504
  break;
2491
2505
  }
@@ -2638,7 +2652,12 @@ var OpenAIResponsesLanguageModel = class {
2638
2652
  } else if (value.item.type === "message") {
2639
2653
  controller.enqueue({
2640
2654
  type: "text-start",
2641
- id: value.item.id
2655
+ id: value.item.id,
2656
+ providerMetadata: {
2657
+ openai: {
2658
+ itemId: value.item.id
2659
+ }
2660
+ }
2642
2661
  });
2643
2662
  } else if (isResponseOutputItemAddedReasoningChunk(value)) {
2644
2663
  activeReasoning[value.item.id] = {
@@ -2670,7 +2689,12 @@ var OpenAIResponsesLanguageModel = class {
2670
2689
  type: "tool-call",
2671
2690
  toolCallId: value.item.call_id,
2672
2691
  toolName: value.item.name,
2673
- input: value.item.arguments
2692
+ input: value.item.arguments,
2693
+ providerMetadata: {
2694
+ openai: {
2695
+ itemId: value.item.id
2696
+ }
2697
+ }
2674
2698
  });
2675
2699
  } else if (value.item.type === "web_search_call") {
2676
2700
  ongoingToolCalls[value.output_index] = void 0;