@ai-sdk/openai 2.0.67 → 2.0.69

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2754,10 +2754,13 @@ var openaiResponsesChunkSchema = lazyValidator11(
2754
2754
  }),
2755
2755
  z14.object({
2756
2756
  type: z14.literal("error"),
2757
- code: z14.string(),
2758
- message: z14.string(),
2759
- param: z14.string().nullish(),
2760
- sequence_number: z14.number()
2757
+ sequence_number: z14.number(),
2758
+ error: z14.object({
2759
+ type: z14.string(),
2760
+ code: z14.string(),
2761
+ message: z14.string(),
2762
+ param: z14.string().nullish()
2763
+ })
2761
2764
  }),
2762
2765
  z14.object({ type: z14.string() }).loose().transform((value) => ({
2763
2766
  type: "unknown_chunk",
@@ -2770,13 +2773,15 @@ var openaiResponsesChunkSchema = lazyValidator11(
2770
2773
  var openaiResponsesResponseSchema = lazyValidator11(
2771
2774
  () => zodSchema12(
2772
2775
  z14.object({
2773
- id: z14.string(),
2774
- created_at: z14.number(),
2776
+ id: z14.string().optional(),
2777
+ created_at: z14.number().optional(),
2775
2778
  error: z14.object({
2776
- code: z14.string(),
2777
- message: z14.string()
2779
+ message: z14.string(),
2780
+ type: z14.string(),
2781
+ param: z14.string().nullish(),
2782
+ code: z14.string()
2778
2783
  }).nullish(),
2779
- model: z14.string(),
2784
+ model: z14.string().optional(),
2780
2785
  output: z14.array(
2781
2786
  z14.discriminatedUnion("type", [
2782
2787
  z14.object({
@@ -2818,7 +2823,18 @@ var openaiResponsesResponseSchema = lazyValidator11(
2818
2823
  quote: z14.string().nullish()
2819
2824
  }),
2820
2825
  z14.object({
2821
- type: z14.literal("container_file_citation")
2826
+ type: z14.literal("container_file_citation"),
2827
+ container_id: z14.string(),
2828
+ file_id: z14.string(),
2829
+ filename: z14.string().nullish(),
2830
+ start_index: z14.number().nullish(),
2831
+ end_index: z14.number().nullish(),
2832
+ index: z14.number().nullish()
2833
+ }),
2834
+ z14.object({
2835
+ type: z14.literal("file_path"),
2836
+ file_id: z14.string(),
2837
+ index: z14.number().nullish()
2822
2838
  })
2823
2839
  ])
2824
2840
  )
@@ -2857,7 +2873,10 @@ var openaiResponsesResponseSchema = lazyValidator11(
2857
2873
  queries: z14.array(z14.string()),
2858
2874
  results: z14.array(
2859
2875
  z14.object({
2860
- attributes: z14.record(z14.string(), z14.unknown()),
2876
+ attributes: z14.record(
2877
+ z14.string(),
2878
+ z14.union([z14.string(), z14.number(), z14.boolean()])
2879
+ ),
2861
2880
  file_id: z14.string(),
2862
2881
  filename: z14.string(),
2863
2882
  score: z14.number(),
@@ -2919,7 +2938,7 @@ var openaiResponsesResponseSchema = lazyValidator11(
2919
2938
  )
2920
2939
  })
2921
2940
  ])
2922
- ),
2941
+ ).optional(),
2923
2942
  service_tier: z14.string().nullish(),
2924
2943
  incomplete_details: z14.object({ reason: z14.string() }).nullish(),
2925
2944
  usage: z14.object({
@@ -2927,7 +2946,7 @@ var openaiResponsesResponseSchema = lazyValidator11(
2927
2946
  input_tokens_details: z14.object({ cached_tokens: z14.number().nullish() }).nullish(),
2928
2947
  output_tokens: z14.number(),
2929
2948
  output_tokens_details: z14.object({ reasoning_tokens: z14.number().nullish() }).nullish()
2930
- })
2949
+ }).optional()
2931
2950
  })
2932
2951
  )
2933
2952
  );
@@ -3007,6 +3026,7 @@ var openaiResponsesModelIds = [
3007
3026
  var openaiResponsesProviderOptionsSchema = lazyValidator12(
3008
3027
  () => zodSchema13(
3009
3028
  z15.object({
3029
+ conversation: z15.string().nullish(),
3010
3030
  include: z15.array(
3011
3031
  z15.enum([
3012
3032
  "reasoning.encrypted_content",
@@ -3515,6 +3535,13 @@ var OpenAIResponsesLanguageModel = class {
3515
3535
  providerOptions,
3516
3536
  schema: openaiResponsesProviderOptionsSchema
3517
3537
  });
3538
+ if ((openaiOptions == null ? void 0 : openaiOptions.conversation) && (openaiOptions == null ? void 0 : openaiOptions.previousResponseId)) {
3539
+ warnings.push({
3540
+ type: "unsupported-setting",
3541
+ setting: "conversation",
3542
+ details: "conversation and previousResponseId cannot be used together"
3543
+ });
3544
+ }
3518
3545
  const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
3519
3546
  prompt,
3520
3547
  systemMessageMode: modelConfig.systemMessageMode,
@@ -3577,6 +3604,7 @@ var OpenAIResponsesLanguageModel = class {
3577
3604
  }
3578
3605
  },
3579
3606
  // provider options:
3607
+ conversation: openaiOptions == null ? void 0 : openaiOptions.conversation,
3580
3608
  max_tool_calls: openaiOptions == null ? void 0 : openaiOptions.maxToolCalls,
3581
3609
  metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
3582
3610
  parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
@@ -3914,7 +3942,9 @@ var OpenAIResponsesLanguageModel = class {
3914
3942
  }
3915
3943
  }
3916
3944
  const providerMetadata = {
3917
- openai: { responseId: response.id }
3945
+ openai: {
3946
+ ...response.id != null ? { responseId: response.id } : {}
3947
+ }
3918
3948
  };
3919
3949
  if (logprobs.length > 0) {
3920
3950
  providerMetadata.openai.logprobs = logprobs;
@@ -3922,6 +3952,7 @@ var OpenAIResponsesLanguageModel = class {
3922
3952
  if (typeof response.service_tier === "string") {
3923
3953
  providerMetadata.openai.serviceTier = response.service_tier;
3924
3954
  }
3955
+ const usage = response.usage;
3925
3956
  return {
3926
3957
  content,
3927
3958
  finishReason: mapOpenAIResponseFinishReason({
@@ -3929,11 +3960,11 @@ var OpenAIResponsesLanguageModel = class {
3929
3960
  hasFunctionCall
3930
3961
  }),
3931
3962
  usage: {
3932
- inputTokens: response.usage.input_tokens,
3933
- outputTokens: response.usage.output_tokens,
3934
- totalTokens: response.usage.input_tokens + response.usage.output_tokens,
3935
- reasoningTokens: (_q = (_p = response.usage.output_tokens_details) == null ? void 0 : _p.reasoning_tokens) != null ? _q : void 0,
3936
- cachedInputTokens: (_s = (_r = response.usage.input_tokens_details) == null ? void 0 : _r.cached_tokens) != null ? _s : void 0
3963
+ inputTokens: usage.input_tokens,
3964
+ outputTokens: usage.output_tokens,
3965
+ totalTokens: usage.input_tokens + usage.output_tokens,
3966
+ reasoningTokens: (_q = (_p = usage.output_tokens_details) == null ? void 0 : _p.reasoning_tokens) != null ? _q : void 0,
3967
+ cachedInputTokens: (_s = (_r = usage.input_tokens_details) == null ? void 0 : _r.cached_tokens) != null ? _s : void 0
3937
3968
  },
3938
3969
  request: { body },
3939
3970
  response: {