@ai-sdk/openai 2.0.68 → 2.0.70

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,17 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 2.0.70
4
+
5
+ ### Patch Changes
6
+
7
+ - dafda29: Set the annotations from the Responses API to doStream
8
+
9
+ ## 2.0.69
10
+
11
+ ### Patch Changes
12
+
13
+ - 38a9f48: fix: error schema for Responses API
14
+
3
15
  ## 2.0.68
4
16
 
5
17
  ### Patch Changes
package/dist/index.js CHANGED
@@ -2648,10 +2648,13 @@ var openaiResponsesChunkSchema = (0, import_provider_utils21.lazyValidator)(
2648
2648
  }),
2649
2649
  import_v416.z.object({
2650
2650
  type: import_v416.z.literal("error"),
2651
- code: import_v416.z.string(),
2652
- message: import_v416.z.string(),
2653
- param: import_v416.z.string().nullish(),
2654
- sequence_number: import_v416.z.number()
2651
+ sequence_number: import_v416.z.number(),
2652
+ error: import_v416.z.object({
2653
+ type: import_v416.z.string(),
2654
+ code: import_v416.z.string(),
2655
+ message: import_v416.z.string(),
2656
+ param: import_v416.z.string().nullish()
2657
+ })
2655
2658
  }),
2656
2659
  import_v416.z.object({ type: import_v416.z.string() }).loose().transform((value) => ({
2657
2660
  type: "unknown_chunk",
@@ -2664,13 +2667,15 @@ var openaiResponsesChunkSchema = (0, import_provider_utils21.lazyValidator)(
2664
2667
  var openaiResponsesResponseSchema = (0, import_provider_utils21.lazyValidator)(
2665
2668
  () => (0, import_provider_utils21.zodSchema)(
2666
2669
  import_v416.z.object({
2667
- id: import_v416.z.string(),
2668
- created_at: import_v416.z.number(),
2670
+ id: import_v416.z.string().optional(),
2671
+ created_at: import_v416.z.number().optional(),
2669
2672
  error: import_v416.z.object({
2670
- code: import_v416.z.string(),
2671
- message: import_v416.z.string()
2673
+ message: import_v416.z.string(),
2674
+ type: import_v416.z.string(),
2675
+ param: import_v416.z.string().nullish(),
2676
+ code: import_v416.z.string()
2672
2677
  }).nullish(),
2673
- model: import_v416.z.string(),
2678
+ model: import_v416.z.string().optional(),
2674
2679
  output: import_v416.z.array(
2675
2680
  import_v416.z.discriminatedUnion("type", [
2676
2681
  import_v416.z.object({
@@ -2712,7 +2717,18 @@ var openaiResponsesResponseSchema = (0, import_provider_utils21.lazyValidator)(
2712
2717
  quote: import_v416.z.string().nullish()
2713
2718
  }),
2714
2719
  import_v416.z.object({
2715
- type: import_v416.z.literal("container_file_citation")
2720
+ type: import_v416.z.literal("container_file_citation"),
2721
+ container_id: import_v416.z.string(),
2722
+ file_id: import_v416.z.string(),
2723
+ filename: import_v416.z.string().nullish(),
2724
+ start_index: import_v416.z.number().nullish(),
2725
+ end_index: import_v416.z.number().nullish(),
2726
+ index: import_v416.z.number().nullish()
2727
+ }),
2728
+ import_v416.z.object({
2729
+ type: import_v416.z.literal("file_path"),
2730
+ file_id: import_v416.z.string(),
2731
+ index: import_v416.z.number().nullish()
2716
2732
  })
2717
2733
  ])
2718
2734
  )
@@ -2751,7 +2767,10 @@ var openaiResponsesResponseSchema = (0, import_provider_utils21.lazyValidator)(
2751
2767
  queries: import_v416.z.array(import_v416.z.string()),
2752
2768
  results: import_v416.z.array(
2753
2769
  import_v416.z.object({
2754
- attributes: import_v416.z.record(import_v416.z.string(), import_v416.z.unknown()),
2770
+ attributes: import_v416.z.record(
2771
+ import_v416.z.string(),
2772
+ import_v416.z.union([import_v416.z.string(), import_v416.z.number(), import_v416.z.boolean()])
2773
+ ),
2755
2774
  file_id: import_v416.z.string(),
2756
2775
  filename: import_v416.z.string(),
2757
2776
  score: import_v416.z.number(),
@@ -2813,7 +2832,7 @@ var openaiResponsesResponseSchema = (0, import_provider_utils21.lazyValidator)(
2813
2832
  )
2814
2833
  })
2815
2834
  ])
2816
- ),
2835
+ ).optional(),
2817
2836
  service_tier: import_v416.z.string().nullish(),
2818
2837
  incomplete_details: import_v416.z.object({ reason: import_v416.z.string() }).nullish(),
2819
2838
  usage: import_v416.z.object({
@@ -2821,7 +2840,7 @@ var openaiResponsesResponseSchema = (0, import_provider_utils21.lazyValidator)(
2821
2840
  input_tokens_details: import_v416.z.object({ cached_tokens: import_v416.z.number().nullish() }).nullish(),
2822
2841
  output_tokens: import_v416.z.number(),
2823
2842
  output_tokens_details: import_v416.z.object({ reasoning_tokens: import_v416.z.number().nullish() }).nullish()
2824
- })
2843
+ }).optional()
2825
2844
  })
2826
2845
  )
2827
2846
  );
@@ -3558,7 +3577,9 @@ var OpenAIResponsesLanguageModel = class {
3558
3577
  }
3559
3578
  }
3560
3579
  const providerMetadata = {
3561
- openai: { responseId: response.id }
3580
+ openai: {
3581
+ ...response.id != null ? { responseId: response.id } : {}
3582
+ }
3562
3583
  };
3563
3584
  if (logprobs.length > 0) {
3564
3585
  providerMetadata.openai.logprobs = logprobs;
@@ -3566,6 +3587,7 @@ var OpenAIResponsesLanguageModel = class {
3566
3587
  if (typeof response.service_tier === "string") {
3567
3588
  providerMetadata.openai.serviceTier = response.service_tier;
3568
3589
  }
3590
+ const usage = response.usage;
3569
3591
  return {
3570
3592
  content,
3571
3593
  finishReason: mapOpenAIResponseFinishReason({
@@ -3573,11 +3595,11 @@ var OpenAIResponsesLanguageModel = class {
3573
3595
  hasFunctionCall
3574
3596
  }),
3575
3597
  usage: {
3576
- inputTokens: response.usage.input_tokens,
3577
- outputTokens: response.usage.output_tokens,
3578
- totalTokens: response.usage.input_tokens + response.usage.output_tokens,
3579
- reasoningTokens: (_q = (_p = response.usage.output_tokens_details) == null ? void 0 : _p.reasoning_tokens) != null ? _q : void 0,
3580
- cachedInputTokens: (_s = (_r = response.usage.input_tokens_details) == null ? void 0 : _r.cached_tokens) != null ? _s : void 0
3598
+ inputTokens: usage.input_tokens,
3599
+ outputTokens: usage.output_tokens,
3600
+ totalTokens: usage.input_tokens + usage.output_tokens,
3601
+ reasoningTokens: (_q = (_p = usage.output_tokens_details) == null ? void 0 : _p.reasoning_tokens) != null ? _q : void 0,
3602
+ cachedInputTokens: (_s = (_r = usage.input_tokens_details) == null ? void 0 : _r.cached_tokens) != null ? _s : void 0
3581
3603
  },
3582
3604
  request: { body },
3583
3605
  response: {
@@ -3625,6 +3647,7 @@ var OpenAIResponsesLanguageModel = class {
3625
3647
  const logprobs = [];
3626
3648
  let responseId = null;
3627
3649
  const ongoingToolCalls = {};
3650
+ const ongoingAnnotations = [];
3628
3651
  let hasFunctionCall = false;
3629
3652
  const activeReasoning = {};
3630
3653
  let serviceTier;
@@ -3725,6 +3748,7 @@ var OpenAIResponsesLanguageModel = class {
3725
3748
  providerExecuted: true
3726
3749
  });
3727
3750
  } else if (value.item.type === "message") {
3751
+ ongoingAnnotations.splice(0, ongoingAnnotations.length);
3728
3752
  controller.enqueue({
3729
3753
  type: "text-start",
3730
3754
  id: value.item.id,
@@ -3750,7 +3774,7 @@ var OpenAIResponsesLanguageModel = class {
3750
3774
  }
3751
3775
  });
3752
3776
  }
3753
- } else if (isResponseOutputItemDoneChunk(value)) {
3777
+ } else if (isResponseOutputItemDoneChunk(value) && value.item.type !== "message") {
3754
3778
  if (value.item.type === "function_call") {
3755
3779
  ongoingToolCalls[value.output_index] = void 0;
3756
3780
  hasFunctionCall = true;
@@ -3860,11 +3884,6 @@ var OpenAIResponsesLanguageModel = class {
3860
3884
  openai: { itemId: value.item.id }
3861
3885
  }
3862
3886
  });
3863
- } else if (value.item.type === "message") {
3864
- controller.enqueue({
3865
- type: "text-end",
3866
- id: value.item.id
3867
- });
3868
3887
  } else if (value.item.type === "reasoning") {
3869
3888
  const activeReasoningPart = activeReasoning[value.item.id];
3870
3889
  const summaryPartIndices = Object.entries(
@@ -4011,6 +4030,7 @@ var OpenAIResponsesLanguageModel = class {
4011
4030
  serviceTier = value.response.service_tier;
4012
4031
  }
4013
4032
  } else if (isResponseAnnotationAddedChunk(value)) {
4033
+ ongoingAnnotations.push(value.annotation);
4014
4034
  if (value.annotation.type === "url_citation") {
4015
4035
  controller.enqueue({
4016
4036
  type: "source",
@@ -4036,6 +4056,19 @@ var OpenAIResponsesLanguageModel = class {
4036
4056
  } : {}
4037
4057
  });
4038
4058
  }
4059
+ } else if (isResponseOutputItemDoneChunk(value) && value.item.type === "message") {
4060
+ controller.enqueue({
4061
+ type: "text-end",
4062
+ id: value.item.id,
4063
+ providerMetadata: {
4064
+ openai: {
4065
+ itemId: value.item.id,
4066
+ ...ongoingAnnotations.length > 0 && {
4067
+ annotations: ongoingAnnotations
4068
+ }
4069
+ }
4070
+ }
4071
+ });
4039
4072
  } else if (isErrorChunk(value)) {
4040
4073
  controller.enqueue({ type: "error", error: value });
4041
4074
  }
@@ -4493,7 +4526,7 @@ var OpenAITranscriptionModel = class {
4493
4526
  };
4494
4527
 
4495
4528
  // src/version.ts
4496
- var VERSION = true ? "2.0.68" : "0.0.0-test";
4529
+ var VERSION = true ? "2.0.70" : "0.0.0-test";
4497
4530
 
4498
4531
  // src/openai-provider.ts
4499
4532
  function createOpenAI(options = {}) {