@ai-sdk/openai 2.0.67 → 2.0.69

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,17 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 2.0.69
4
+
5
+ ### Patch Changes
6
+
7
+ - 38a9f48: fix: error schema for Responses API
8
+
9
+ ## 2.0.68
10
+
11
+ ### Patch Changes
12
+
13
+ - a4c6a12: feat(provider/openai): support conversations api
14
+
3
15
  ## 2.0.67
4
16
 
5
17
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -325,6 +325,7 @@ declare const openaiTools: {
325
325
 
326
326
  type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
327
327
  declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazyValidator<{
328
+ conversation?: string | null | undefined;
328
329
  include?: ("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined;
329
330
  instructions?: string | null | undefined;
330
331
  logprobs?: number | boolean | undefined;
package/dist/index.d.ts CHANGED
@@ -325,6 +325,7 @@ declare const openaiTools: {
325
325
 
326
326
  type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
327
327
  declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazyValidator<{
328
+ conversation?: string | null | undefined;
328
329
  include?: ("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined;
329
330
  instructions?: string | null | undefined;
330
331
  logprobs?: number | boolean | undefined;
package/dist/index.js CHANGED
@@ -2648,10 +2648,13 @@ var openaiResponsesChunkSchema = (0, import_provider_utils21.lazyValidator)(
2648
2648
  }),
2649
2649
  import_v416.z.object({
2650
2650
  type: import_v416.z.literal("error"),
2651
- code: import_v416.z.string(),
2652
- message: import_v416.z.string(),
2653
- param: import_v416.z.string().nullish(),
2654
- sequence_number: import_v416.z.number()
2651
+ sequence_number: import_v416.z.number(),
2652
+ error: import_v416.z.object({
2653
+ type: import_v416.z.string(),
2654
+ code: import_v416.z.string(),
2655
+ message: import_v416.z.string(),
2656
+ param: import_v416.z.string().nullish()
2657
+ })
2655
2658
  }),
2656
2659
  import_v416.z.object({ type: import_v416.z.string() }).loose().transform((value) => ({
2657
2660
  type: "unknown_chunk",
@@ -2664,13 +2667,15 @@ var openaiResponsesChunkSchema = (0, import_provider_utils21.lazyValidator)(
2664
2667
  var openaiResponsesResponseSchema = (0, import_provider_utils21.lazyValidator)(
2665
2668
  () => (0, import_provider_utils21.zodSchema)(
2666
2669
  import_v416.z.object({
2667
- id: import_v416.z.string(),
2668
- created_at: import_v416.z.number(),
2670
+ id: import_v416.z.string().optional(),
2671
+ created_at: import_v416.z.number().optional(),
2669
2672
  error: import_v416.z.object({
2670
- code: import_v416.z.string(),
2671
- message: import_v416.z.string()
2673
+ message: import_v416.z.string(),
2674
+ type: import_v416.z.string(),
2675
+ param: import_v416.z.string().nullish(),
2676
+ code: import_v416.z.string()
2672
2677
  }).nullish(),
2673
- model: import_v416.z.string(),
2678
+ model: import_v416.z.string().optional(),
2674
2679
  output: import_v416.z.array(
2675
2680
  import_v416.z.discriminatedUnion("type", [
2676
2681
  import_v416.z.object({
@@ -2712,7 +2717,18 @@ var openaiResponsesResponseSchema = (0, import_provider_utils21.lazyValidator)(
2712
2717
  quote: import_v416.z.string().nullish()
2713
2718
  }),
2714
2719
  import_v416.z.object({
2715
- type: import_v416.z.literal("container_file_citation")
2720
+ type: import_v416.z.literal("container_file_citation"),
2721
+ container_id: import_v416.z.string(),
2722
+ file_id: import_v416.z.string(),
2723
+ filename: import_v416.z.string().nullish(),
2724
+ start_index: import_v416.z.number().nullish(),
2725
+ end_index: import_v416.z.number().nullish(),
2726
+ index: import_v416.z.number().nullish()
2727
+ }),
2728
+ import_v416.z.object({
2729
+ type: import_v416.z.literal("file_path"),
2730
+ file_id: import_v416.z.string(),
2731
+ index: import_v416.z.number().nullish()
2716
2732
  })
2717
2733
  ])
2718
2734
  )
@@ -2751,7 +2767,10 @@ var openaiResponsesResponseSchema = (0, import_provider_utils21.lazyValidator)(
2751
2767
  queries: import_v416.z.array(import_v416.z.string()),
2752
2768
  results: import_v416.z.array(
2753
2769
  import_v416.z.object({
2754
- attributes: import_v416.z.record(import_v416.z.string(), import_v416.z.unknown()),
2770
+ attributes: import_v416.z.record(
2771
+ import_v416.z.string(),
2772
+ import_v416.z.union([import_v416.z.string(), import_v416.z.number(), import_v416.z.boolean()])
2773
+ ),
2755
2774
  file_id: import_v416.z.string(),
2756
2775
  filename: import_v416.z.string(),
2757
2776
  score: import_v416.z.number(),
@@ -2813,7 +2832,7 @@ var openaiResponsesResponseSchema = (0, import_provider_utils21.lazyValidator)(
2813
2832
  )
2814
2833
  })
2815
2834
  ])
2816
- ),
2835
+ ).optional(),
2817
2836
  service_tier: import_v416.z.string().nullish(),
2818
2837
  incomplete_details: import_v416.z.object({ reason: import_v416.z.string() }).nullish(),
2819
2838
  usage: import_v416.z.object({
@@ -2821,7 +2840,7 @@ var openaiResponsesResponseSchema = (0, import_provider_utils21.lazyValidator)(
2821
2840
  input_tokens_details: import_v416.z.object({ cached_tokens: import_v416.z.number().nullish() }).nullish(),
2822
2841
  output_tokens: import_v416.z.number(),
2823
2842
  output_tokens_details: import_v416.z.object({ reasoning_tokens: import_v416.z.number().nullish() }).nullish()
2824
- })
2843
+ }).optional()
2825
2844
  })
2826
2845
  )
2827
2846
  );
@@ -2898,6 +2917,7 @@ var openaiResponsesModelIds = [
2898
2917
  var openaiResponsesProviderOptionsSchema = (0, import_provider_utils22.lazyValidator)(
2899
2918
  () => (0, import_provider_utils22.zodSchema)(
2900
2919
  import_v417.z.object({
2920
+ conversation: import_v417.z.string().nullish(),
2901
2921
  include: import_v417.z.array(
2902
2922
  import_v417.z.enum([
2903
2923
  "reasoning.encrypted_content",
@@ -3150,6 +3170,13 @@ var OpenAIResponsesLanguageModel = class {
3150
3170
  providerOptions,
3151
3171
  schema: openaiResponsesProviderOptionsSchema
3152
3172
  });
3173
+ if ((openaiOptions == null ? void 0 : openaiOptions.conversation) && (openaiOptions == null ? void 0 : openaiOptions.previousResponseId)) {
3174
+ warnings.push({
3175
+ type: "unsupported-setting",
3176
+ setting: "conversation",
3177
+ details: "conversation and previousResponseId cannot be used together"
3178
+ });
3179
+ }
3153
3180
  const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
3154
3181
  prompt,
3155
3182
  systemMessageMode: modelConfig.systemMessageMode,
@@ -3212,6 +3239,7 @@ var OpenAIResponsesLanguageModel = class {
3212
3239
  }
3213
3240
  },
3214
3241
  // provider options:
3242
+ conversation: openaiOptions == null ? void 0 : openaiOptions.conversation,
3215
3243
  max_tool_calls: openaiOptions == null ? void 0 : openaiOptions.maxToolCalls,
3216
3244
  metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
3217
3245
  parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
@@ -3549,7 +3577,9 @@ var OpenAIResponsesLanguageModel = class {
3549
3577
  }
3550
3578
  }
3551
3579
  const providerMetadata = {
3552
- openai: { responseId: response.id }
3580
+ openai: {
3581
+ ...response.id != null ? { responseId: response.id } : {}
3582
+ }
3553
3583
  };
3554
3584
  if (logprobs.length > 0) {
3555
3585
  providerMetadata.openai.logprobs = logprobs;
@@ -3557,6 +3587,7 @@ var OpenAIResponsesLanguageModel = class {
3557
3587
  if (typeof response.service_tier === "string") {
3558
3588
  providerMetadata.openai.serviceTier = response.service_tier;
3559
3589
  }
3590
+ const usage = response.usage;
3560
3591
  return {
3561
3592
  content,
3562
3593
  finishReason: mapOpenAIResponseFinishReason({
@@ -3564,11 +3595,11 @@ var OpenAIResponsesLanguageModel = class {
3564
3595
  hasFunctionCall
3565
3596
  }),
3566
3597
  usage: {
3567
- inputTokens: response.usage.input_tokens,
3568
- outputTokens: response.usage.output_tokens,
3569
- totalTokens: response.usage.input_tokens + response.usage.output_tokens,
3570
- reasoningTokens: (_q = (_p = response.usage.output_tokens_details) == null ? void 0 : _p.reasoning_tokens) != null ? _q : void 0,
3571
- cachedInputTokens: (_s = (_r = response.usage.input_tokens_details) == null ? void 0 : _r.cached_tokens) != null ? _s : void 0
3598
+ inputTokens: usage.input_tokens,
3599
+ outputTokens: usage.output_tokens,
3600
+ totalTokens: usage.input_tokens + usage.output_tokens,
3601
+ reasoningTokens: (_q = (_p = usage.output_tokens_details) == null ? void 0 : _p.reasoning_tokens) != null ? _q : void 0,
3602
+ cachedInputTokens: (_s = (_r = usage.input_tokens_details) == null ? void 0 : _r.cached_tokens) != null ? _s : void 0
3572
3603
  },
3573
3604
  request: { body },
3574
3605
  response: {
@@ -4484,7 +4515,7 @@ var OpenAITranscriptionModel = class {
4484
4515
  };
4485
4516
 
4486
4517
  // src/version.ts
4487
- var VERSION = true ? "2.0.67" : "0.0.0-test";
4518
+ var VERSION = true ? "2.0.69" : "0.0.0-test";
4488
4519
 
4489
4520
  // src/openai-provider.ts
4490
4521
  function createOpenAI(options = {}) {