@ai-sdk/openai 2.0.67 → 2.0.69

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2715,10 +2715,13 @@ var openaiResponsesChunkSchema = (0, import_provider_utils21.lazyValidator)(
2715
2715
  }),
2716
2716
  import_v414.z.object({
2717
2717
  type: import_v414.z.literal("error"),
2718
- code: import_v414.z.string(),
2719
- message: import_v414.z.string(),
2720
- param: import_v414.z.string().nullish(),
2721
- sequence_number: import_v414.z.number()
2718
+ sequence_number: import_v414.z.number(),
2719
+ error: import_v414.z.object({
2720
+ type: import_v414.z.string(),
2721
+ code: import_v414.z.string(),
2722
+ message: import_v414.z.string(),
2723
+ param: import_v414.z.string().nullish()
2724
+ })
2722
2725
  }),
2723
2726
  import_v414.z.object({ type: import_v414.z.string() }).loose().transform((value) => ({
2724
2727
  type: "unknown_chunk",
@@ -2731,13 +2734,15 @@ var openaiResponsesChunkSchema = (0, import_provider_utils21.lazyValidator)(
2731
2734
  var openaiResponsesResponseSchema = (0, import_provider_utils21.lazyValidator)(
2732
2735
  () => (0, import_provider_utils21.zodSchema)(
2733
2736
  import_v414.z.object({
2734
- id: import_v414.z.string(),
2735
- created_at: import_v414.z.number(),
2737
+ id: import_v414.z.string().optional(),
2738
+ created_at: import_v414.z.number().optional(),
2736
2739
  error: import_v414.z.object({
2737
- code: import_v414.z.string(),
2738
- message: import_v414.z.string()
2740
+ message: import_v414.z.string(),
2741
+ type: import_v414.z.string(),
2742
+ param: import_v414.z.string().nullish(),
2743
+ code: import_v414.z.string()
2739
2744
  }).nullish(),
2740
- model: import_v414.z.string(),
2745
+ model: import_v414.z.string().optional(),
2741
2746
  output: import_v414.z.array(
2742
2747
  import_v414.z.discriminatedUnion("type", [
2743
2748
  import_v414.z.object({
@@ -2779,7 +2784,18 @@ var openaiResponsesResponseSchema = (0, import_provider_utils21.lazyValidator)(
2779
2784
  quote: import_v414.z.string().nullish()
2780
2785
  }),
2781
2786
  import_v414.z.object({
2782
- type: import_v414.z.literal("container_file_citation")
2787
+ type: import_v414.z.literal("container_file_citation"),
2788
+ container_id: import_v414.z.string(),
2789
+ file_id: import_v414.z.string(),
2790
+ filename: import_v414.z.string().nullish(),
2791
+ start_index: import_v414.z.number().nullish(),
2792
+ end_index: import_v414.z.number().nullish(),
2793
+ index: import_v414.z.number().nullish()
2794
+ }),
2795
+ import_v414.z.object({
2796
+ type: import_v414.z.literal("file_path"),
2797
+ file_id: import_v414.z.string(),
2798
+ index: import_v414.z.number().nullish()
2783
2799
  })
2784
2800
  ])
2785
2801
  )
@@ -2818,7 +2834,10 @@ var openaiResponsesResponseSchema = (0, import_provider_utils21.lazyValidator)(
2818
2834
  queries: import_v414.z.array(import_v414.z.string()),
2819
2835
  results: import_v414.z.array(
2820
2836
  import_v414.z.object({
2821
- attributes: import_v414.z.record(import_v414.z.string(), import_v414.z.unknown()),
2837
+ attributes: import_v414.z.record(
2838
+ import_v414.z.string(),
2839
+ import_v414.z.union([import_v414.z.string(), import_v414.z.number(), import_v414.z.boolean()])
2840
+ ),
2822
2841
  file_id: import_v414.z.string(),
2823
2842
  filename: import_v414.z.string(),
2824
2843
  score: import_v414.z.number(),
@@ -2880,7 +2899,7 @@ var openaiResponsesResponseSchema = (0, import_provider_utils21.lazyValidator)(
2880
2899
  )
2881
2900
  })
2882
2901
  ])
2883
- ),
2902
+ ).optional(),
2884
2903
  service_tier: import_v414.z.string().nullish(),
2885
2904
  incomplete_details: import_v414.z.object({ reason: import_v414.z.string() }).nullish(),
2886
2905
  usage: import_v414.z.object({
@@ -2888,7 +2907,7 @@ var openaiResponsesResponseSchema = (0, import_provider_utils21.lazyValidator)(
2888
2907
  input_tokens_details: import_v414.z.object({ cached_tokens: import_v414.z.number().nullish() }).nullish(),
2889
2908
  output_tokens: import_v414.z.number(),
2890
2909
  output_tokens_details: import_v414.z.object({ reasoning_tokens: import_v414.z.number().nullish() }).nullish()
2891
- })
2910
+ }).optional()
2892
2911
  })
2893
2912
  )
2894
2913
  );
@@ -2965,6 +2984,7 @@ var openaiResponsesModelIds = [
2965
2984
  var openaiResponsesProviderOptionsSchema = (0, import_provider_utils22.lazyValidator)(
2966
2985
  () => (0, import_provider_utils22.zodSchema)(
2967
2986
  import_v415.z.object({
2987
+ conversation: import_v415.z.string().nullish(),
2968
2988
  include: import_v415.z.array(
2969
2989
  import_v415.z.enum([
2970
2990
  "reasoning.encrypted_content",
@@ -3451,6 +3471,13 @@ var OpenAIResponsesLanguageModel = class {
3451
3471
  providerOptions,
3452
3472
  schema: openaiResponsesProviderOptionsSchema
3453
3473
  });
3474
+ if ((openaiOptions == null ? void 0 : openaiOptions.conversation) && (openaiOptions == null ? void 0 : openaiOptions.previousResponseId)) {
3475
+ warnings.push({
3476
+ type: "unsupported-setting",
3477
+ setting: "conversation",
3478
+ details: "conversation and previousResponseId cannot be used together"
3479
+ });
3480
+ }
3454
3481
  const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
3455
3482
  prompt,
3456
3483
  systemMessageMode: modelConfig.systemMessageMode,
@@ -3513,6 +3540,7 @@ var OpenAIResponsesLanguageModel = class {
3513
3540
  }
3514
3541
  },
3515
3542
  // provider options:
3543
+ conversation: openaiOptions == null ? void 0 : openaiOptions.conversation,
3516
3544
  max_tool_calls: openaiOptions == null ? void 0 : openaiOptions.maxToolCalls,
3517
3545
  metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
3518
3546
  parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
@@ -3850,7 +3878,9 @@ var OpenAIResponsesLanguageModel = class {
3850
3878
  }
3851
3879
  }
3852
3880
  const providerMetadata = {
3853
- openai: { responseId: response.id }
3881
+ openai: {
3882
+ ...response.id != null ? { responseId: response.id } : {}
3883
+ }
3854
3884
  };
3855
3885
  if (logprobs.length > 0) {
3856
3886
  providerMetadata.openai.logprobs = logprobs;
@@ -3858,6 +3888,7 @@ var OpenAIResponsesLanguageModel = class {
3858
3888
  if (typeof response.service_tier === "string") {
3859
3889
  providerMetadata.openai.serviceTier = response.service_tier;
3860
3890
  }
3891
+ const usage = response.usage;
3861
3892
  return {
3862
3893
  content,
3863
3894
  finishReason: mapOpenAIResponseFinishReason({
@@ -3865,11 +3896,11 @@ var OpenAIResponsesLanguageModel = class {
3865
3896
  hasFunctionCall
3866
3897
  }),
3867
3898
  usage: {
3868
- inputTokens: response.usage.input_tokens,
3869
- outputTokens: response.usage.output_tokens,
3870
- totalTokens: response.usage.input_tokens + response.usage.output_tokens,
3871
- reasoningTokens: (_q = (_p = response.usage.output_tokens_details) == null ? void 0 : _p.reasoning_tokens) != null ? _q : void 0,
3872
- cachedInputTokens: (_s = (_r = response.usage.input_tokens_details) == null ? void 0 : _r.cached_tokens) != null ? _s : void 0
3899
+ inputTokens: usage.input_tokens,
3900
+ outputTokens: usage.output_tokens,
3901
+ totalTokens: usage.input_tokens + usage.output_tokens,
3902
+ reasoningTokens: (_q = (_p = usage.output_tokens_details) == null ? void 0 : _p.reasoning_tokens) != null ? _q : void 0,
3903
+ cachedInputTokens: (_s = (_r = usage.input_tokens_details) == null ? void 0 : _r.cached_tokens) != null ? _s : void 0
3873
3904
  },
3874
3905
  request: { body },
3875
3906
  response: {