@ai-sdk/openai 2.0.67 → 2.0.69

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -2716,10 +2716,13 @@ var openaiResponsesChunkSchema = lazyValidator8(
2716
2716
  }),
2717
2717
  z16.object({
2718
2718
  type: z16.literal("error"),
2719
- code: z16.string(),
2720
- message: z16.string(),
2721
- param: z16.string().nullish(),
2722
- sequence_number: z16.number()
2719
+ sequence_number: z16.number(),
2720
+ error: z16.object({
2721
+ type: z16.string(),
2722
+ code: z16.string(),
2723
+ message: z16.string(),
2724
+ param: z16.string().nullish()
2725
+ })
2723
2726
  }),
2724
2727
  z16.object({ type: z16.string() }).loose().transform((value) => ({
2725
2728
  type: "unknown_chunk",
@@ -2732,13 +2735,15 @@ var openaiResponsesChunkSchema = lazyValidator8(
2732
2735
  var openaiResponsesResponseSchema = lazyValidator8(
2733
2736
  () => zodSchema14(
2734
2737
  z16.object({
2735
- id: z16.string(),
2736
- created_at: z16.number(),
2738
+ id: z16.string().optional(),
2739
+ created_at: z16.number().optional(),
2737
2740
  error: z16.object({
2738
- code: z16.string(),
2739
- message: z16.string()
2741
+ message: z16.string(),
2742
+ type: z16.string(),
2743
+ param: z16.string().nullish(),
2744
+ code: z16.string()
2740
2745
  }).nullish(),
2741
- model: z16.string(),
2746
+ model: z16.string().optional(),
2742
2747
  output: z16.array(
2743
2748
  z16.discriminatedUnion("type", [
2744
2749
  z16.object({
@@ -2780,7 +2785,18 @@ var openaiResponsesResponseSchema = lazyValidator8(
2780
2785
  quote: z16.string().nullish()
2781
2786
  }),
2782
2787
  z16.object({
2783
- type: z16.literal("container_file_citation")
2788
+ type: z16.literal("container_file_citation"),
2789
+ container_id: z16.string(),
2790
+ file_id: z16.string(),
2791
+ filename: z16.string().nullish(),
2792
+ start_index: z16.number().nullish(),
2793
+ end_index: z16.number().nullish(),
2794
+ index: z16.number().nullish()
2795
+ }),
2796
+ z16.object({
2797
+ type: z16.literal("file_path"),
2798
+ file_id: z16.string(),
2799
+ index: z16.number().nullish()
2784
2800
  })
2785
2801
  ])
2786
2802
  )
@@ -2819,7 +2835,10 @@ var openaiResponsesResponseSchema = lazyValidator8(
2819
2835
  queries: z16.array(z16.string()),
2820
2836
  results: z16.array(
2821
2837
  z16.object({
2822
- attributes: z16.record(z16.string(), z16.unknown()),
2838
+ attributes: z16.record(
2839
+ z16.string(),
2840
+ z16.union([z16.string(), z16.number(), z16.boolean()])
2841
+ ),
2823
2842
  file_id: z16.string(),
2824
2843
  filename: z16.string(),
2825
2844
  score: z16.number(),
@@ -2881,7 +2900,7 @@ var openaiResponsesResponseSchema = lazyValidator8(
2881
2900
  )
2882
2901
  })
2883
2902
  ])
2884
- ),
2903
+ ).optional(),
2885
2904
  service_tier: z16.string().nullish(),
2886
2905
  incomplete_details: z16.object({ reason: z16.string() }).nullish(),
2887
2906
  usage: z16.object({
@@ -2889,7 +2908,7 @@ var openaiResponsesResponseSchema = lazyValidator8(
2889
2908
  input_tokens_details: z16.object({ cached_tokens: z16.number().nullish() }).nullish(),
2890
2909
  output_tokens: z16.number(),
2891
2910
  output_tokens_details: z16.object({ reasoning_tokens: z16.number().nullish() }).nullish()
2892
- })
2911
+ }).optional()
2893
2912
  })
2894
2913
  )
2895
2914
  );
@@ -2969,6 +2988,7 @@ var openaiResponsesModelIds = [
2969
2988
  var openaiResponsesProviderOptionsSchema = lazyValidator9(
2970
2989
  () => zodSchema15(
2971
2990
  z17.object({
2991
+ conversation: z17.string().nullish(),
2972
2992
  include: z17.array(
2973
2993
  z17.enum([
2974
2994
  "reasoning.encrypted_content",
@@ -3223,6 +3243,13 @@ var OpenAIResponsesLanguageModel = class {
3223
3243
  providerOptions,
3224
3244
  schema: openaiResponsesProviderOptionsSchema
3225
3245
  });
3246
+ if ((openaiOptions == null ? void 0 : openaiOptions.conversation) && (openaiOptions == null ? void 0 : openaiOptions.previousResponseId)) {
3247
+ warnings.push({
3248
+ type: "unsupported-setting",
3249
+ setting: "conversation",
3250
+ details: "conversation and previousResponseId cannot be used together"
3251
+ });
3252
+ }
3226
3253
  const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
3227
3254
  prompt,
3228
3255
  systemMessageMode: modelConfig.systemMessageMode,
@@ -3285,6 +3312,7 @@ var OpenAIResponsesLanguageModel = class {
3285
3312
  }
3286
3313
  },
3287
3314
  // provider options:
3315
+ conversation: openaiOptions == null ? void 0 : openaiOptions.conversation,
3288
3316
  max_tool_calls: openaiOptions == null ? void 0 : openaiOptions.maxToolCalls,
3289
3317
  metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
3290
3318
  parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
@@ -3622,7 +3650,9 @@ var OpenAIResponsesLanguageModel = class {
3622
3650
  }
3623
3651
  }
3624
3652
  const providerMetadata = {
3625
- openai: { responseId: response.id }
3653
+ openai: {
3654
+ ...response.id != null ? { responseId: response.id } : {}
3655
+ }
3626
3656
  };
3627
3657
  if (logprobs.length > 0) {
3628
3658
  providerMetadata.openai.logprobs = logprobs;
@@ -3630,6 +3660,7 @@ var OpenAIResponsesLanguageModel = class {
3630
3660
  if (typeof response.service_tier === "string") {
3631
3661
  providerMetadata.openai.serviceTier = response.service_tier;
3632
3662
  }
3663
+ const usage = response.usage;
3633
3664
  return {
3634
3665
  content,
3635
3666
  finishReason: mapOpenAIResponseFinishReason({
@@ -3637,11 +3668,11 @@ var OpenAIResponsesLanguageModel = class {
3637
3668
  hasFunctionCall
3638
3669
  }),
3639
3670
  usage: {
3640
- inputTokens: response.usage.input_tokens,
3641
- outputTokens: response.usage.output_tokens,
3642
- totalTokens: response.usage.input_tokens + response.usage.output_tokens,
3643
- reasoningTokens: (_q = (_p = response.usage.output_tokens_details) == null ? void 0 : _p.reasoning_tokens) != null ? _q : void 0,
3644
- cachedInputTokens: (_s = (_r = response.usage.input_tokens_details) == null ? void 0 : _r.cached_tokens) != null ? _s : void 0
3671
+ inputTokens: usage.input_tokens,
3672
+ outputTokens: usage.output_tokens,
3673
+ totalTokens: usage.input_tokens + usage.output_tokens,
3674
+ reasoningTokens: (_q = (_p = usage.output_tokens_details) == null ? void 0 : _p.reasoning_tokens) != null ? _q : void 0,
3675
+ cachedInputTokens: (_s = (_r = usage.input_tokens_details) == null ? void 0 : _r.cached_tokens) != null ? _s : void 0
3645
3676
  },
3646
3677
  request: { body },
3647
3678
  response: {
@@ -4575,7 +4606,7 @@ var OpenAITranscriptionModel = class {
4575
4606
  };
4576
4607
 
4577
4608
  // src/version.ts
4578
- var VERSION = true ? "2.0.67" : "0.0.0-test";
4609
+ var VERSION = true ? "2.0.69" : "0.0.0-test";
4579
4610
 
4580
4611
  // src/openai-provider.ts
4581
4612
  function createOpenAI(options = {}) {