@ai-sdk/openai 2.0.0-beta.4 → 2.0.0-beta.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,20 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 2.0.0-beta.6
4
+
5
+ ### Patch Changes
6
+
7
+ - 0eee6a8: Fix streaming and reconstruction of reasoning summary parts
8
+ - b5a0e32: fix (provider/openai): correct default for chat model strict mode
9
+ - c7d3b2e: fix (provider/openai): push first reasoning chunk in output item added event
10
+
11
+ ## 2.0.0-beta.5
12
+
13
+ ### Patch Changes
14
+
15
+ - 48249c4: Do not warn if empty text is the first part of a reasoning sequence
16
+ - e497698: fix (provider/openai): handle responses api errors
17
+
3
18
  ## 2.0.0-beta.4
4
19
 
5
20
  ### Patch Changes
package/dist/index.js CHANGED
@@ -306,7 +306,7 @@ var openaiProviderOptions = import_v4.z.object({
306
306
  /**
307
307
  * Whether to use strict JSON schema validation.
308
308
  *
309
- * @default true
309
+ * @default false
310
310
  */
311
311
  strictJsonSchema: import_v4.z.boolean().optional()
312
312
  });
@@ -1867,6 +1867,7 @@ var openaiTranscriptionResponseSchema = import_v412.z.object({
1867
1867
  });
1868
1868
 
1869
1869
  // src/responses/openai-responses-language-model.ts
1870
+ var import_provider8 = require("@ai-sdk/provider");
1870
1871
  var import_provider_utils11 = require("@ai-sdk/provider-utils");
1871
1872
  var import_v414 = require("zod/v4");
1872
1873
 
@@ -1991,7 +1992,7 @@ async function convertToOpenAIResponsesMessages({
1991
1992
  const summaryParts = [];
1992
1993
  if (part.text.length > 0) {
1993
1994
  summaryParts.push({ type: "summary_text", text: part.text });
1994
- } else {
1995
+ } else if (existingReasoningMessage !== void 0) {
1995
1996
  warnings.push({
1996
1997
  type: "other",
1997
1998
  message: `Cannot append empty reasoning part to existing reasoning sequence. Skipping reasoning part: ${JSON.stringify(part)}.`
@@ -2309,15 +2310,16 @@ var OpenAIResponsesLanguageModel = class {
2309
2310
  async doGenerate(options) {
2310
2311
  var _a, _b, _c, _d, _e, _f, _g, _h, _i;
2311
2312
  const { args: body, warnings } = await this.getArgs(options);
2313
+ const url = this.config.url({
2314
+ path: "/responses",
2315
+ modelId: this.modelId
2316
+ });
2312
2317
  const {
2313
2318
  responseHeaders,
2314
2319
  value: response,
2315
2320
  rawValue: rawResponse
2316
2321
  } = await (0, import_provider_utils11.postJsonToApi)({
2317
- url: this.config.url({
2318
- path: "/responses",
2319
- modelId: this.modelId
2320
- }),
2322
+ url,
2321
2323
  headers: (0, import_provider_utils11.combineHeaders)(this.config.headers(), options.headers),
2322
2324
  body,
2323
2325
  failedResponseHandler: openaiFailedResponseHandler,
@@ -2325,6 +2327,10 @@ var OpenAIResponsesLanguageModel = class {
2325
2327
  import_v414.z.object({
2326
2328
  id: import_v414.z.string(),
2327
2329
  created_at: import_v414.z.number(),
2330
+ error: import_v414.z.object({
2331
+ code: import_v414.z.string(),
2332
+ message: import_v414.z.string()
2333
+ }).nullish(),
2328
2334
  model: import_v414.z.string(),
2329
2335
  output: import_v414.z.array(
2330
2336
  import_v414.z.discriminatedUnion("type", [
@@ -2383,6 +2389,17 @@ var OpenAIResponsesLanguageModel = class {
2383
2389
  abortSignal: options.abortSignal,
2384
2390
  fetch: this.config.fetch
2385
2391
  });
2392
+ if (response.error) {
2393
+ throw new import_provider8.APICallError({
2394
+ message: response.error.message,
2395
+ url,
2396
+ requestBodyValues: body,
2397
+ statusCode: 400,
2398
+ responseHeaders,
2399
+ responseBody: rawResponse,
2400
+ isRetryable: false
2401
+ });
2402
+ }
2386
2403
  const content = [];
2387
2404
  for (const part of response.output) {
2388
2405
  switch (part.type) {
@@ -2530,6 +2547,7 @@ var OpenAIResponsesLanguageModel = class {
2530
2547
  let responseId = null;
2531
2548
  const ongoingToolCalls = {};
2532
2549
  let hasToolCalls = false;
2550
+ const activeReasoning = {};
2533
2551
  return {
2534
2552
  stream: response.pipeThrough(
2535
2553
  new TransformStream({
@@ -2537,7 +2555,7 @@ var OpenAIResponsesLanguageModel = class {
2537
2555
  controller.enqueue({ type: "stream-start", warnings });
2538
2556
  },
2539
2557
  transform(chunk, controller) {
2540
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
2558
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
2541
2559
  if (options.includeRawChunks) {
2542
2560
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
2543
2561
  }
@@ -2583,10 +2601,14 @@ var OpenAIResponsesLanguageModel = class {
2583
2601
  type: "text-start",
2584
2602
  id: value.item.id
2585
2603
  });
2586
- } else if (value.item.type === "reasoning") {
2604
+ } else if (isResponseOutputItemAddedReasoningChunk(value)) {
2605
+ activeReasoning[value.item.id] = {
2606
+ encryptedContent: value.item.encrypted_content,
2607
+ summaryParts: [0]
2608
+ };
2587
2609
  controller.enqueue({
2588
2610
  type: "reasoning-start",
2589
- id: value.item.id,
2611
+ id: `${value.item.id}:0`,
2590
2612
  providerMetadata: {
2591
2613
  openai: {
2592
2614
  reasoning: {
@@ -2664,19 +2686,23 @@ var OpenAIResponsesLanguageModel = class {
2664
2686
  type: "text-end",
2665
2687
  id: value.item.id
2666
2688
  });
2667
- } else if (value.item.type === "reasoning") {
2668
- controller.enqueue({
2669
- type: "reasoning-end",
2670
- id: value.item.id,
2671
- providerMetadata: {
2672
- openai: {
2673
- reasoning: {
2674
- id: value.item.id,
2675
- encryptedContent: (_b = value.item.encrypted_content) != null ? _b : null
2689
+ } else if (isResponseOutputItemDoneReasoningChunk(value)) {
2690
+ const activeReasoningPart = activeReasoning[value.item.id];
2691
+ for (const summaryIndex of activeReasoningPart.summaryParts) {
2692
+ controller.enqueue({
2693
+ type: "reasoning-end",
2694
+ id: `${value.item.id}:${summaryIndex}`,
2695
+ providerMetadata: {
2696
+ openai: {
2697
+ reasoning: {
2698
+ id: value.item.id,
2699
+ encryptedContent: (_b = value.item.encrypted_content) != null ? _b : null
2700
+ }
2676
2701
  }
2677
2702
  }
2678
- }
2679
- });
2703
+ });
2704
+ }
2705
+ delete activeReasoning[value.item.id];
2680
2706
  }
2681
2707
  } else if (isResponseFunctionCallArgumentsDeltaChunk(value)) {
2682
2708
  const toolCall = ongoingToolCalls[value.output_index];
@@ -2701,30 +2727,57 @@ var OpenAIResponsesLanguageModel = class {
2701
2727
  id: value.item_id,
2702
2728
  delta: value.delta
2703
2729
  });
2730
+ } else if (isResponseReasoningSummaryPartAddedChunk(value)) {
2731
+ if (value.summary_index > 0) {
2732
+ (_c = activeReasoning[value.item_id]) == null ? void 0 : _c.summaryParts.push(
2733
+ value.summary_index
2734
+ );
2735
+ controller.enqueue({
2736
+ type: "reasoning-start",
2737
+ id: `${value.item_id}:${value.summary_index}`,
2738
+ providerMetadata: {
2739
+ openai: {
2740
+ reasoning: {
2741
+ id: value.item_id,
2742
+ encryptedContent: (_e = (_d = activeReasoning[value.item_id]) == null ? void 0 : _d.encryptedContent) != null ? _e : null
2743
+ }
2744
+ }
2745
+ }
2746
+ });
2747
+ }
2704
2748
  } else if (isResponseReasoningSummaryTextDeltaChunk(value)) {
2705
2749
  controller.enqueue({
2706
2750
  type: "reasoning-delta",
2707
- id: value.item_id,
2708
- delta: value.delta
2751
+ id: `${value.item_id}:${value.summary_index}`,
2752
+ delta: value.delta,
2753
+ providerMetadata: {
2754
+ openai: {
2755
+ reasoning: {
2756
+ id: value.item_id
2757
+ }
2758
+ }
2759
+ }
2709
2760
  });
2710
2761
  } else if (isResponseFinishedChunk(value)) {
2711
2762
  finishReason = mapOpenAIResponseFinishReason({
2712
- finishReason: (_c = value.response.incomplete_details) == null ? void 0 : _c.reason,
2763
+ finishReason: (_f = value.response.incomplete_details) == null ? void 0 : _f.reason,
2713
2764
  hasToolCalls
2714
2765
  });
2715
2766
  usage.inputTokens = value.response.usage.input_tokens;
2716
2767
  usage.outputTokens = value.response.usage.output_tokens;
2717
2768
  usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
2718
- usage.reasoningTokens = (_e = (_d = value.response.usage.output_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
2719
- usage.cachedInputTokens = (_g = (_f = value.response.usage.input_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
2769
+ usage.reasoningTokens = (_h = (_g = value.response.usage.output_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : void 0;
2770
+ usage.cachedInputTokens = (_j = (_i = value.response.usage.input_tokens_details) == null ? void 0 : _i.cached_tokens) != null ? _j : void 0;
2720
2771
  } else if (isResponseAnnotationAddedChunk(value)) {
2721
2772
  controller.enqueue({
2722
2773
  type: "source",
2723
2774
  sourceType: "url",
2724
- id: (_j = (_i = (_h = self.config).generateId) == null ? void 0 : _i.call(_h)) != null ? _j : (0, import_provider_utils11.generateId)(),
2775
+ id: (_m = (_l = (_k = self.config).generateId) == null ? void 0 : _l.call(_k)) != null ? _m : (0, import_provider_utils11.generateId)(),
2725
2776
  url: value.annotation.url,
2726
2777
  title: value.annotation.title
2727
2778
  });
2779
+ } else if (isErrorChunk(value)) {
2780
+ controller.enqueue({ type: "error", error: value });
2728
2781
  }
2729
2782
  },
2730
2783
  flush(controller) {
@@ -2757,6 +2810,13 @@ var textDeltaChunkSchema = import_v414.z.object({
2757
2810
  item_id: import_v414.z.string(),
2758
2811
  delta: import_v414.z.string()
2759
2812
  });
2813
+ var errorChunkSchema = import_v414.z.object({
2814
+ type: import_v414.z.literal("error"),
2815
+ code: import_v414.z.string(),
2816
+ message: import_v414.z.string(),
2817
+ param: import_v414.z.string().nullish(),
2818
+ sequence_number: import_v414.z.number()
2819
+ });
2760
2820
  var responseFinishedChunkSchema = import_v414.z.object({
2761
2821
  type: import_v414.z.enum(["response.completed", "response.incomplete"]),
2762
2822
  response: import_v414.z.object({
@@ -2783,13 +2843,7 @@ var responseOutputItemAddedSchema = import_v414.z.object({
2783
2843
  import_v414.z.object({
2784
2844
  type: import_v414.z.literal("reasoning"),
2785
2845
  id: import_v414.z.string(),
2786
- encrypted_content: import_v414.z.string().nullish(),
2787
- summary: import_v414.z.array(
2788
- import_v414.z.object({
2789
- type: import_v414.z.literal("summary_text"),
2790
- text: import_v414.z.string()
2791
- })
2792
- )
2846
+ encrypted_content: import_v414.z.string().nullish()
2793
2847
  }),
2794
2848
  import_v414.z.object({
2795
2849
  type: import_v414.z.literal("function_call"),
@@ -2821,13 +2875,7 @@ var responseOutputItemDoneSchema = import_v414.z.object({
2821
2875
  import_v414.z.object({
2822
2876
  type: import_v414.z.literal("reasoning"),
2823
2877
  id: import_v414.z.string(),
2824
- encrypted_content: import_v414.z.string().nullish(),
2825
- summary: import_v414.z.array(
2826
- import_v414.z.object({
2827
- type: import_v414.z.literal("summary_text"),
2828
- text: import_v414.z.string()
2829
- })
2830
- )
2878
+ encrypted_content: import_v414.z.string().nullish()
2831
2879
  }),
2832
2880
  import_v414.z.object({
2833
2881
  type: import_v414.z.literal("function_call"),
@@ -2863,9 +2911,15 @@ var responseAnnotationAddedSchema = import_v414.z.object({
2863
2911
  title: import_v414.z.string()
2864
2912
  })
2865
2913
  });
2914
+ var responseReasoningSummaryPartAddedSchema = import_v414.z.object({
2915
+ type: import_v414.z.literal("response.reasoning_summary_part.added"),
2916
+ item_id: import_v414.z.string(),
2917
+ summary_index: import_v414.z.number()
2918
+ });
2866
2919
  var responseReasoningSummaryTextDeltaSchema = import_v414.z.object({
2867
2920
  type: import_v414.z.literal("response.reasoning_summary_text.delta"),
2868
2921
  item_id: import_v414.z.string(),
2922
+ summary_index: import_v414.z.number(),
2869
2923
  delta: import_v414.z.string()
2870
2924
  });
2871
2925
  var openaiResponsesChunkSchema = import_v414.z.union([
@@ -2876,8 +2930,10 @@ var openaiResponsesChunkSchema = import_v414.z.union([
2876
2930
  responseOutputItemDoneSchema,
2877
2931
  responseFunctionCallArgumentsDeltaSchema,
2878
2932
  responseAnnotationAddedSchema,
2933
+ responseReasoningSummaryPartAddedSchema,
2879
2934
  responseReasoningSummaryTextDeltaSchema,
2880
- import_v414.z.object({ type: import_v414.z.string() }).passthrough()
2935
+ errorChunkSchema,
2936
+ import_v414.z.object({ type: import_v414.z.string() }).loose()
2881
2937
  // fallback for unknown chunks
2882
2938
  ]);
2883
2939
  function isTextDeltaChunk(chunk) {
@@ -2886,6 +2942,9 @@ function isTextDeltaChunk(chunk) {
2886
2942
  function isResponseOutputItemDoneChunk(chunk) {
2887
2943
  return chunk.type === "response.output_item.done";
2888
2944
  }
2945
+ function isResponseOutputItemDoneReasoningChunk(chunk) {
2946
+ return isResponseOutputItemDoneChunk(chunk) && chunk.item.type === "reasoning";
2947
+ }
2889
2948
  function isResponseFinishedChunk(chunk) {
2890
2949
  return chunk.type === "response.completed" || chunk.type === "response.incomplete";
2891
2950
  }
@@ -2898,12 +2957,21 @@ function isResponseFunctionCallArgumentsDeltaChunk(chunk) {
2898
2957
  function isResponseOutputItemAddedChunk(chunk) {
2899
2958
  return chunk.type === "response.output_item.added";
2900
2959
  }
2960
+ function isResponseOutputItemAddedReasoningChunk(chunk) {
2961
+ return isResponseOutputItemAddedChunk(chunk) && chunk.item.type === "reasoning";
2962
+ }
2901
2963
  function isResponseAnnotationAddedChunk(chunk) {
2902
2964
  return chunk.type === "response.output_text.annotation.added";
2903
2965
  }
2966
+ function isResponseReasoningSummaryPartAddedChunk(chunk) {
2967
+ return chunk.type === "response.reasoning_summary_part.added";
2968
+ }
2904
2969
  function isResponseReasoningSummaryTextDeltaChunk(chunk) {
2905
2970
  return chunk.type === "response.reasoning_summary_text.delta";
2906
2971
  }
2972
+ function isErrorChunk(chunk) {
2973
+ return chunk.type === "error";
2974
+ }
2907
2975
  function getResponsesModelConfig(modelId) {
2908
2976
  if (modelId.startsWith("o") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
2909
2977
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {