@ai-sdk/openai 2.0.0-beta.7 → 2.0.0-beta.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,19 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 2.0.0-beta.9
4
+
5
+ ### Patch Changes
6
+
7
+ - faea29f: fix (provider/openai): multi-step reasoning with text
8
+
9
+ ## 2.0.0-beta.8
10
+
11
+ ### Patch Changes
12
+
13
+ - db64cbe: fix (provider/openai): multi-step reasoning with tool calls
14
+ - Updated dependencies [05d2819]
15
+ - @ai-sdk/provider-utils@3.0.0-beta.3
16
+
3
17
  ## 2.0.0-beta.7
4
18
 
5
19
  ### Patch Changes
package/dist/index.js CHANGED
@@ -1879,7 +1879,7 @@ async function convertToOpenAIResponsesMessages({
1879
1879
  prompt,
1880
1880
  systemMessageMode
1881
1881
  }) {
1882
- var _a, _b;
1882
+ var _a, _b, _c, _d, _e, _f, _g, _h;
1883
1883
  const messages = [];
1884
1884
  const warnings = [];
1885
1885
  for (const { role, content } of prompt) {
@@ -1914,7 +1914,7 @@ async function convertToOpenAIResponsesMessages({
1914
1914
  messages.push({
1915
1915
  role: "user",
1916
1916
  content: content.map((part, index) => {
1917
- var _a2, _b2, _c;
1917
+ var _a2, _b2, _c2;
1918
1918
  switch (part.type) {
1919
1919
  case "text": {
1920
1920
  return { type: "input_text", text: part.text };
@@ -1936,7 +1936,7 @@ async function convertToOpenAIResponsesMessages({
1936
1936
  }
1937
1937
  return {
1938
1938
  type: "input_file",
1939
- filename: (_c = part.filename) != null ? _c : `part-${index}.pdf`,
1939
+ filename: (_c2 = part.filename) != null ? _c2 : `part-${index}.pdf`,
1940
1940
  file_data: `data:application/pdf;base64,${part.data}`
1941
1941
  };
1942
1942
  } else {
@@ -1957,7 +1957,8 @@ async function convertToOpenAIResponsesMessages({
1957
1957
  case "text": {
1958
1958
  messages.push({
1959
1959
  role: "assistant",
1960
- content: [{ type: "output_text", text: part.text }]
1960
+ content: [{ type: "output_text", text: part.text }],
1961
+ id: (_c = (_b = (_a = part.providerOptions) == null ? void 0 : _a.openai) == null ? void 0 : _b.itemId) != null ? _c : void 0
1961
1962
  });
1962
1963
  break;
1963
1964
  }
@@ -1969,7 +1970,8 @@ async function convertToOpenAIResponsesMessages({
1969
1970
  type: "function_call",
1970
1971
  call_id: part.toolCallId,
1971
1972
  name: part.toolName,
1972
- arguments: JSON.stringify(part.input)
1973
+ arguments: JSON.stringify(part.input),
1974
+ id: (_f = (_e = (_d = part.providerOptions) == null ? void 0 : _d.openai) == null ? void 0 : _e.itemId) != null ? _f : void 0
1973
1975
  });
1974
1976
  break;
1975
1977
  }
@@ -1986,7 +1988,7 @@ async function convertToOpenAIResponsesMessages({
1986
1988
  providerOptions: part.providerOptions,
1987
1989
  schema: openaiResponsesReasoningProviderOptionsSchema
1988
1990
  });
1989
- const reasoningId = (_a = providerOptions == null ? void 0 : providerOptions.reasoning) == null ? void 0 : _a.id;
1991
+ const reasoningId = (_g = providerOptions == null ? void 0 : providerOptions.reasoning) == null ? void 0 : _g.id;
1990
1992
  if (reasoningId != null) {
1991
1993
  const existingReasoningMessage = reasoningMessages[reasoningId];
1992
1994
  const summaryParts = [];
@@ -2002,7 +2004,7 @@ async function convertToOpenAIResponsesMessages({
2002
2004
  reasoningMessages[reasoningId] = {
2003
2005
  type: "reasoning",
2004
2006
  id: reasoningId,
2005
- encrypted_content: (_b = providerOptions == null ? void 0 : providerOptions.reasoning) == null ? void 0 : _b.encryptedContent,
2007
+ encrypted_content: (_h = providerOptions == null ? void 0 : providerOptions.reasoning) == null ? void 0 : _h.encryptedContent,
2006
2008
  summary: summaryParts
2007
2009
  };
2008
2010
  messages.push(reasoningMessages[reasoningId]);
@@ -2347,6 +2349,7 @@ var OpenAIResponsesLanguageModel = class {
2347
2349
  import_v414.z.object({
2348
2350
  type: import_v414.z.literal("message"),
2349
2351
  role: import_v414.z.literal("assistant"),
2352
+ id: import_v414.z.string(),
2350
2353
  content: import_v414.z.array(
2351
2354
  import_v414.z.object({
2352
2355
  type: import_v414.z.literal("output_text"),
@@ -2367,7 +2370,8 @@ var OpenAIResponsesLanguageModel = class {
2367
2370
  type: import_v414.z.literal("function_call"),
2368
2371
  call_id: import_v414.z.string(),
2369
2372
  name: import_v414.z.string(),
2370
- arguments: import_v414.z.string()
2373
+ arguments: import_v414.z.string(),
2374
+ id: import_v414.z.string()
2371
2375
  }),
2372
2376
  import_v414.z.object({
2373
2377
  type: import_v414.z.literal("web_search_call"),
@@ -2437,7 +2441,12 @@ var OpenAIResponsesLanguageModel = class {
2437
2441
  for (const contentPart of part.content) {
2438
2442
  content.push({
2439
2443
  type: "text",
2440
- text: contentPart.text
2444
+ text: contentPart.text,
2445
+ providerMetadata: {
2446
+ openai: {
2447
+ itemId: part.id
2448
+ }
2449
+ }
2441
2450
  });
2442
2451
  for (const annotation of contentPart.annotations) {
2443
2452
  content.push({
@@ -2456,7 +2465,12 @@ var OpenAIResponsesLanguageModel = class {
2456
2465
  type: "tool-call",
2457
2466
  toolCallId: part.call_id,
2458
2467
  toolName: part.name,
2459
- input: part.arguments
2468
+ input: part.arguments,
2469
+ providerMetadata: {
2470
+ openai: {
2471
+ itemId: part.id
2472
+ }
2473
+ }
2460
2474
  });
2461
2475
  break;
2462
2476
  }
@@ -2609,7 +2623,12 @@ var OpenAIResponsesLanguageModel = class {
2609
2623
  } else if (value.item.type === "message") {
2610
2624
  controller.enqueue({
2611
2625
  type: "text-start",
2612
- id: value.item.id
2626
+ id: value.item.id,
2627
+ providerMetadata: {
2628
+ openai: {
2629
+ itemId: value.item.id
2630
+ }
2631
+ }
2613
2632
  });
2614
2633
  } else if (isResponseOutputItemAddedReasoningChunk(value)) {
2615
2634
  activeReasoning[value.item.id] = {
@@ -2641,7 +2660,12 @@ var OpenAIResponsesLanguageModel = class {
2641
2660
  type: "tool-call",
2642
2661
  toolCallId: value.item.call_id,
2643
2662
  toolName: value.item.name,
2644
- input: value.item.arguments
2663
+ input: value.item.arguments,
2664
+ providerMetadata: {
2665
+ openai: {
2666
+ itemId: value.item.id
2667
+ }
2668
+ }
2645
2669
  });
2646
2670
  } else if (value.item.type === "web_search_call") {
2647
2671
  ongoingToolCalls[value.output_index] = void 0;