@ai-sdk/openai 2.0.0-beta.5 → 2.0.0-beta.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,19 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 2.0.0-beta.7
4
+
5
+ ### Patch Changes
6
+
7
+ - 209256d: Add missing file_search tool support to OpenAI Responses API
8
+
9
+ ## 2.0.0-beta.6
10
+
11
+ ### Patch Changes
12
+
13
+ - 0eee6a8: Fix streaming and reconstruction of reasoning summary parts
14
+ - b5a0e32: fix (provider/openai): correct default for chat model strict mode
15
+ - c7d3b2e: fix (provider/openai): push first reasoning chunk in output item added event
16
+
3
17
  ## 2.0.0-beta.5
4
18
 
5
19
  ### Patch Changes
package/dist/index.js CHANGED
@@ -306,7 +306,7 @@ var openaiProviderOptions = import_v4.z.object({
306
306
  /**
307
307
  * Whether to use strict JSON schema validation.
308
308
  *
309
- * @default true
309
+ * @default false
310
310
  */
311
311
  strictJsonSchema: import_v4.z.boolean().optional()
312
312
  });
@@ -2103,6 +2103,16 @@ function prepareResponsesTools({
2103
2103
  break;
2104
2104
  case "provider-defined":
2105
2105
  switch (tool.id) {
2106
+ case "openai.file_search": {
2107
+ const args = fileSearchArgsSchema.parse(tool.args);
2108
+ openaiTools2.push({
2109
+ type: "file_search",
2110
+ vector_store_ids: args.vectorStoreIds,
2111
+ max_results: args.maxResults,
2112
+ search_type: args.searchType
2113
+ });
2114
+ break;
2115
+ }
2106
2116
  case "openai.web_search_preview":
2107
2117
  openaiTools2.push({
2108
2118
  type: "web_search_preview",
@@ -2132,7 +2142,7 @@ function prepareResponsesTools({
2132
2142
  case "tool":
2133
2143
  return {
2134
2144
  tools: openaiTools2,
2135
- toolChoice: toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
2145
+ toolChoice: toolChoice.toolName === "file_search" ? { type: "file_search" } : toolChoice.toolName === "web_search_preview" ? { type: "web_search_preview" } : { type: "function", name: toolChoice.toolName },
2136
2146
  toolWarnings
2137
2147
  };
2138
2148
  default: {
@@ -2547,6 +2557,7 @@ var OpenAIResponsesLanguageModel = class {
2547
2557
  let responseId = null;
2548
2558
  const ongoingToolCalls = {};
2549
2559
  let hasToolCalls = false;
2560
+ const activeReasoning = {};
2550
2561
  return {
2551
2562
  stream: response.pipeThrough(
2552
2563
  new TransformStream({
@@ -2554,7 +2565,7 @@ var OpenAIResponsesLanguageModel = class {
2554
2565
  controller.enqueue({ type: "stream-start", warnings });
2555
2566
  },
2556
2567
  transform(chunk, controller) {
2557
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
2568
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
2558
2569
  if (options.includeRawChunks) {
2559
2570
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
2560
2571
  }
@@ -2600,10 +2611,14 @@ var OpenAIResponsesLanguageModel = class {
2600
2611
  type: "text-start",
2601
2612
  id: value.item.id
2602
2613
  });
2603
- } else if (value.item.type === "reasoning") {
2614
+ } else if (isResponseOutputItemAddedReasoningChunk(value)) {
2615
+ activeReasoning[value.item.id] = {
2616
+ encryptedContent: value.item.encrypted_content,
2617
+ summaryParts: [0]
2618
+ };
2604
2619
  controller.enqueue({
2605
2620
  type: "reasoning-start",
2606
- id: value.item.id,
2621
+ id: `${value.item.id}:0`,
2607
2622
  providerMetadata: {
2608
2623
  openai: {
2609
2624
  reasoning: {
@@ -2681,19 +2696,23 @@ var OpenAIResponsesLanguageModel = class {
2681
2696
  type: "text-end",
2682
2697
  id: value.item.id
2683
2698
  });
2684
- } else if (value.item.type === "reasoning") {
2685
- controller.enqueue({
2686
- type: "reasoning-end",
2687
- id: value.item.id,
2688
- providerMetadata: {
2689
- openai: {
2690
- reasoning: {
2691
- id: value.item.id,
2692
- encryptedContent: (_b = value.item.encrypted_content) != null ? _b : null
2699
+ } else if (isResponseOutputItemDoneReasoningChunk(value)) {
2700
+ const activeReasoningPart = activeReasoning[value.item.id];
2701
+ for (const summaryIndex of activeReasoningPart.summaryParts) {
2702
+ controller.enqueue({
2703
+ type: "reasoning-end",
2704
+ id: `${value.item.id}:${summaryIndex}`,
2705
+ providerMetadata: {
2706
+ openai: {
2707
+ reasoning: {
2708
+ id: value.item.id,
2709
+ encryptedContent: (_b = value.item.encrypted_content) != null ? _b : null
2710
+ }
2693
2711
  }
2694
2712
  }
2695
- }
2696
- });
2713
+ });
2714
+ }
2715
+ delete activeReasoning[value.item.id];
2697
2716
  }
2698
2717
  } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
2699
2718
  const toolCall = ongoingToolCalls[value.output_index];
@@ -2718,27 +2737,52 @@ var OpenAIResponsesLanguageModel = class {
2718
2737
  id: value.item_id,
2719
2738
  delta: value.delta
2720
2739
  });
2740
+ } else if (isResponseReasoningSummaryPartAddedChunk(value)) {
2741
+ if (value.summary_index > 0) {
2742
+ (_c = activeReasoning[value.item_id]) == null ? void 0 : _c.summaryParts.push(
2743
+ value.summary_index
2744
+ );
2745
+ controller.enqueue({
2746
+ type: "reasoning-start",
2747
+ id: `${value.item_id}:${value.summary_index}`,
2748
+ providerMetadata: {
2749
+ openai: {
2750
+ reasoning: {
2751
+ id: value.item_id,
2752
+ encryptedContent: (_e = (_d = activeReasoning[value.item_id]) == null ? void 0 : _d.encryptedContent) != null ? _e : null
2753
+ }
2754
+ }
2755
+ }
2756
+ });
2757
+ }
2721
2758
  } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2722
2759
  controller.enqueue({
2723
2760
  type: "reasoning-delta",
2724
- id: value.item_id,
2725
- delta: value.delta
2761
+ id: `${value.item_id}:${value.summary_index}`,
2762
+ delta: value.delta,
2763
+ providerMetadata: {
2764
+ openai: {
2765
+ reasoning: {
2766
+ id: value.item_id
2767
+ }
2768
+ }
2769
+ }
2726
2770
  });
2727
2771
  } else if (isResponseFinishedChunk(value)) {
2728
2772
  finishReason = mapOpenAIResponseFinishReason({
2729
- finishReason: (_c = value.response.incomplete_details) == null ? void 0 : _c.reason,
2773
+ finishReason: (_f = value.response.incomplete_details) == null ? void 0 : _f.reason,
2730
2774
  hasToolCalls
2731
2775
  });
2732
2776
  usage.inputTokens = value.response.usage.input_tokens;
2733
2777
  usage.outputTokens = value.response.usage.output_tokens;
2734
2778
  usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
2735
- usage.reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
2736
- usage.cachedInputTokens = (_g = (_f = value.response.usage.input_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
2779
+ usage.reasoningTokens = (_h = (_g = value.response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : void 0;
2780
+ usage.cachedInputTokens = (_j = (_i = value.response.usage.input_tokens_details) == null ? void 0 : _i.cached_tokens) != null ? _j : void 0;
2737
2781
  } else if (isResponseAnnotationAddedChunk(value)) {
2738
2782
  controller.enqueue({
2739
2783
  type: "source",
2740
2784
  sourceType: "url",
2741
- id: (_j = (_i = (_h = self.config).generateId) == null ? void 0 : _i.call(_h)) != null ? _j : (0, import_provider_utils11.generateId)(),
2785
+ id: (_m = (_l = (_k = self.config).generateId) == null ? void 0 : _l.call(_k)) != null ? _m : (0, import_provider_utils11.generateId)(),
2742
2786
  url: value.annotation.url,
2743
2787
  title: value.annotation.title
2744
2788
  });
@@ -2809,13 +2853,7 @@ var responseOutputItemAddedSchema = import_v414.z.object({
2809
2853
  import_v414.z.object({
2810
2854
  type: import_v414.z.literal("reasoning"),
2811
2855
  id: import_v414.z.string(),
2812
- encrypted_content: import_v414.z.string().nullish(),
2813
- summary: import_v414.z.array(
2814
- import_v414.z.object({
2815
- type: import_v414.z.literal("summary_text"),
2816
- text: import_v414.z.string()
2817
- })
2818
- )
2856
+ encrypted_content: import_v414.z.string().nullish()
2819
2857
  }),
2820
2858
  import_v414.z.object({
2821
2859
  type: import_v414.z.literal("function_call"),
@@ -2847,13 +2885,7 @@ var responseOutputItemDoneSchema = import_v414.z.object({
2847
2885
  import_v414.z.object({
2848
2886
  type: import_v414.z.literal("reasoning"),
2849
2887
  id: import_v414.z.string(),
2850
- encrypted_content: import_v414.z.string().nullish(),
2851
- summary: import_v414.z.array(
2852
- import_v414.z.object({
2853
- type: import_v414.z.literal("summary_text"),
2854
- text: import_v414.z.string()
2855
- })
2856
- )
2888
+ encrypted_content: import_v414.z.string().nullish()
2857
2889
  }),
2858
2890
  import_v414.z.object({
2859
2891
  type: import_v414.z.literal("function_call"),
@@ -2889,9 +2921,15 @@ var responseAnnotationAddedSchema = import_v414.z.object({
2889
2921
  title: import_v414.z.string()
2890
2922
  })
2891
2923
  });
2924
+ var responseReasoningSummaryPartAddedSchema = import_v414.z.object({
2925
+ type: import_v414.z.literal("response.reasoning_summary_part.added"),
2926
+ item_id: import_v414.z.string(),
2927
+ summary_index: import_v414.z.number()
2928
+ });
2892
2929
  var responseReasoningSummaryTextDeltaSchema = import_v414.z.object({
2893
2930
  type: import_v414.z.literal("response.reasoning_summary_text.delta"),
2894
2931
  item_id: import_v414.z.string(),
2932
+ summary_index: import_v414.z.number(),
2895
2933
  delta: import_v414.z.string()
2896
2934
  });
2897
2935
  var openaiResponsesChunkSchema = import_v414.z.union([
@@ -2902,6 +2940,7 @@ var openaiResponsesChunkSchema = import_v414.z.union([
2902
2940
  responseOutputItemDoneSchema,
2903
2941
  responseFunctionCallArgumentsDeltaSchema,
2904
2942
  responseAnnotationAddedSchema,
2943
+ responseReasoningSummaryPartAddedSchema,
2905
2944
  responseReasoningSummaryTextDeltaSchema,
2906
2945
  errorChunkSchema,
2907
2946
  import_v414.z.object({ type: import_v414.z.string() }).loose()
@@ -2913,6 +2952,9 @@ function isTextDeltaChunk(chunk) {
2913
2952
  function isResponseOutputItemDoneChunk(chunk) {
2914
2953
  return chunk.type === "response.output_item.done";
2915
2954
  }
2955
+ function isResponseOutputItemDoneReasoningChunk(chunk) {
2956
+ return isResponseOutputItemDoneChunk(chunk) && chunk.item.type === "reasoning";
2957
+ }
2916
2958
  function isResponseFinishedChunk(chunk) {
2917
2959
  return chunk.type === "response.completed" || chunk.type === "response.incomplete";
2918
2960
  }
@@ -2925,9 +2967,15 @@ function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
2925
2967
  function isResponseOutputItemAddedChunk(chunk) {
2926
2968
  return chunk.type === "response.output_item.added";
2927
2969
  }
2970
+ function isResponseOutputItemAddedReasoningChunk(chunk) {
2971
+ return isResponseOutputItemAddedChunk(chunk) && chunk.item.type === "reasoning";
2972
+ }
2928
2973
  function isResponseAnnotationAddedChunk(chunk) {
2929
2974
  return chunk.type === "response.output_text.annotation.added";
2930
2975
  }
2976
+ function isResponseReasoningSummaryPartAddedChunk(chunk) {
2977
+ return chunk.type === "response.reasoning_summary_part.added";
2978
+ }
2931
2979
  function isResponseReasoningSummaryTextDeltaChunk(chunk) {
2932
2980
  return chunk.type === "response.reasoning_summary_text.delta";
2933
2981
  }