@ai-sdk/openai 3.0.0-beta.59 → 3.0.0-beta.61

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,17 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 3.0.0-beta.61
4
+
5
+ ### Patch Changes
6
+
7
+ - 23f132b: fix: error schema for Responses API
8
+
9
+ ## 3.0.0-beta.60
10
+
11
+ ### Patch Changes
12
+
13
+ - 0877683: feat(provider/openai): support conversations api
14
+
3
15
  ## 3.0.0-beta.59
4
16
 
5
17
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -382,6 +382,7 @@ declare const openaiTools: {
382
382
 
383
383
  type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
384
384
  declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazySchema<{
385
+ conversation?: string | null | undefined;
385
386
  include?: ("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined;
386
387
  instructions?: string | null | undefined;
387
388
  logprobs?: number | boolean | undefined;
package/dist/index.d.ts CHANGED
@@ -382,6 +382,7 @@ declare const openaiTools: {
382
382
 
383
383
  type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
384
384
  declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazySchema<{
385
+ conversation?: string | null | undefined;
385
386
  include?: ("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined;
386
387
  instructions?: string | null | undefined;
387
388
  logprobs?: number | boolean | undefined;
package/dist/index.js CHANGED
@@ -2847,10 +2847,13 @@ var openaiResponsesChunkSchema = (0, import_provider_utils22.lazySchema)(
2847
2847
  }),
2848
2848
  import_v417.z.object({
2849
2849
  type: import_v417.z.literal("error"),
2850
- code: import_v417.z.string(),
2851
- message: import_v417.z.string(),
2852
- param: import_v417.z.string().nullish(),
2853
- sequence_number: import_v417.z.number()
2850
+ sequence_number: import_v417.z.number(),
2851
+ error: import_v417.z.object({
2852
+ type: import_v417.z.string(),
2853
+ code: import_v417.z.string(),
2854
+ message: import_v417.z.string(),
2855
+ param: import_v417.z.string().nullish()
2856
+ })
2854
2857
  }),
2855
2858
  import_v417.z.object({ type: import_v417.z.string() }).loose().transform((value) => ({
2856
2859
  type: "unknown_chunk",
@@ -2863,13 +2866,15 @@ var openaiResponsesChunkSchema = (0, import_provider_utils22.lazySchema)(
2863
2866
  var openaiResponsesResponseSchema = (0, import_provider_utils22.lazySchema)(
2864
2867
  () => (0, import_provider_utils22.zodSchema)(
2865
2868
  import_v417.z.object({
2866
- id: import_v417.z.string(),
2867
- created_at: import_v417.z.number(),
2869
+ id: import_v417.z.string().optional(),
2870
+ created_at: import_v417.z.number().optional(),
2868
2871
  error: import_v417.z.object({
2869
- code: import_v417.z.string(),
2870
- message: import_v417.z.string()
2872
+ message: import_v417.z.string(),
2873
+ type: import_v417.z.string(),
2874
+ param: import_v417.z.string().nullish(),
2875
+ code: import_v417.z.string()
2871
2876
  }).nullish(),
2872
- model: import_v417.z.string(),
2877
+ model: import_v417.z.string().optional(),
2873
2878
  output: import_v417.z.array(
2874
2879
  import_v417.z.discriminatedUnion("type", [
2875
2880
  import_v417.z.object({
@@ -3072,7 +3077,7 @@ var openaiResponsesResponseSchema = (0, import_provider_utils22.lazySchema)(
3072
3077
  approval_request_id: import_v417.z.string()
3073
3078
  })
3074
3079
  ])
3075
- ),
3080
+ ).optional(),
3076
3081
  service_tier: import_v417.z.string().nullish(),
3077
3082
  incomplete_details: import_v417.z.object({ reason: import_v417.z.string() }).nullish(),
3078
3083
  usage: import_v417.z.object({
@@ -3080,7 +3085,7 @@ var openaiResponsesResponseSchema = (0, import_provider_utils22.lazySchema)(
3080
3085
  input_tokens_details: import_v417.z.object({ cached_tokens: import_v417.z.number().nullish() }).nullish(),
3081
3086
  output_tokens: import_v417.z.number(),
3082
3087
  output_tokens_details: import_v417.z.object({ reasoning_tokens: import_v417.z.number().nullish() }).nullish()
3083
- })
3088
+ }).optional()
3084
3089
  })
3085
3090
  )
3086
3091
  );
@@ -3157,6 +3162,7 @@ var openaiResponsesModelIds = [
3157
3162
  var openaiResponsesProviderOptionsSchema = (0, import_provider_utils23.lazySchema)(
3158
3163
  () => (0, import_provider_utils23.zodSchema)(
3159
3164
  import_v418.z.object({
3165
+ conversation: import_v418.z.string().nullish(),
3160
3166
  include: import_v418.z.array(
3161
3167
  import_v418.z.enum([
3162
3168
  "reasoning.encrypted_content",
@@ -3440,6 +3446,13 @@ var OpenAIResponsesLanguageModel = class {
3440
3446
  providerOptions,
3441
3447
  schema: openaiResponsesProviderOptionsSchema
3442
3448
  });
3449
+ if ((openaiOptions == null ? void 0 : openaiOptions.conversation) && (openaiOptions == null ? void 0 : openaiOptions.previousResponseId)) {
3450
+ warnings.push({
3451
+ type: "unsupported-setting",
3452
+ setting: "conversation",
3453
+ details: "conversation and previousResponseId cannot be used together"
3454
+ });
3455
+ }
3443
3456
  const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
3444
3457
  prompt,
3445
3458
  systemMessageMode: modelConfig.systemMessageMode,
@@ -3502,6 +3515,7 @@ var OpenAIResponsesLanguageModel = class {
3502
3515
  }
3503
3516
  },
3504
3517
  // provider options:
3518
+ conversation: openaiOptions == null ? void 0 : openaiOptions.conversation,
3505
3519
  max_tool_calls: openaiOptions == null ? void 0 : openaiOptions.maxToolCalls,
3506
3520
  metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
3507
3521
  parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
@@ -3951,6 +3965,7 @@ var OpenAIResponsesLanguageModel = class {
3951
3965
  if (typeof response.service_tier === "string") {
3952
3966
  providerMetadata.openai.serviceTier = response.service_tier;
3953
3967
  }
3968
+ const usage = response.usage;
3954
3969
  return {
3955
3970
  content,
3956
3971
  finishReason: mapOpenAIResponseFinishReason({
@@ -3958,11 +3973,11 @@ var OpenAIResponsesLanguageModel = class {
3958
3973
  hasFunctionCall
3959
3974
  }),
3960
3975
  usage: {
3961
- inputTokens: response.usage.input_tokens,
3962
- outputTokens: response.usage.output_tokens,
3963
- totalTokens: response.usage.input_tokens + response.usage.output_tokens,
3964
- reasoningTokens: (_z = (_y = response.usage.output_tokens_details) == null ? void 0 : _y.reasoning_tokens) != null ? _z : void 0,
3965
- cachedInputTokens: (_B = (_A = response.usage.input_tokens_details) == null ? void 0 : _A.cached_tokens) != null ? _B : void 0
3976
+ inputTokens: usage.input_tokens,
3977
+ outputTokens: usage.output_tokens,
3978
+ totalTokens: usage.input_tokens + usage.output_tokens,
3979
+ reasoningTokens: (_z = (_y = usage.output_tokens_details) == null ? void 0 : _y.reasoning_tokens) != null ? _z : void 0,
3980
+ cachedInputTokens: (_B = (_A = usage.input_tokens_details) == null ? void 0 : _A.cached_tokens) != null ? _B : void 0
3966
3981
  },
3967
3982
  request: { body },
3968
3983
  response: {
@@ -4961,7 +4976,7 @@ var OpenAITranscriptionModel = class {
4961
4976
  };
4962
4977
 
4963
4978
  // src/version.ts
4964
- var VERSION = true ? "3.0.0-beta.59" : "0.0.0-test";
4979
+ var VERSION = true ? "3.0.0-beta.61" : "0.0.0-test";
4965
4980
 
4966
4981
  // src/openai-provider.ts
4967
4982
  function createOpenAI(options = {}) {