@ai-sdk/openai 3.0.0-beta.59 → 3.0.0-beta.61

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2821,10 +2821,13 @@ var openaiResponsesChunkSchema = lazySchema12(
2821
2821
  }),
2822
2822
  z14.object({
2823
2823
  type: z14.literal("error"),
2824
- code: z14.string(),
2825
- message: z14.string(),
2826
- param: z14.string().nullish(),
2827
- sequence_number: z14.number()
2824
+ sequence_number: z14.number(),
2825
+ error: z14.object({
2826
+ type: z14.string(),
2827
+ code: z14.string(),
2828
+ message: z14.string(),
2829
+ param: z14.string().nullish()
2830
+ })
2828
2831
  }),
2829
2832
  z14.object({ type: z14.string() }).loose().transform((value) => ({
2830
2833
  type: "unknown_chunk",
@@ -2837,13 +2840,15 @@ var openaiResponsesChunkSchema = lazySchema12(
2837
2840
  var openaiResponsesResponseSchema = lazySchema12(
2838
2841
  () => zodSchema12(
2839
2842
  z14.object({
2840
- id: z14.string(),
2841
- created_at: z14.number(),
2843
+ id: z14.string().optional(),
2844
+ created_at: z14.number().optional(),
2842
2845
  error: z14.object({
2843
- code: z14.string(),
2844
- message: z14.string()
2846
+ message: z14.string(),
2847
+ type: z14.string(),
2848
+ param: z14.string().nullish(),
2849
+ code: z14.string()
2845
2850
  }).nullish(),
2846
- model: z14.string(),
2851
+ model: z14.string().optional(),
2847
2852
  output: z14.array(
2848
2853
  z14.discriminatedUnion("type", [
2849
2854
  z14.object({
@@ -3046,7 +3051,7 @@ var openaiResponsesResponseSchema = lazySchema12(
3046
3051
  approval_request_id: z14.string()
3047
3052
  })
3048
3053
  ])
3049
- ),
3054
+ ).optional(),
3050
3055
  service_tier: z14.string().nullish(),
3051
3056
  incomplete_details: z14.object({ reason: z14.string() }).nullish(),
3052
3057
  usage: z14.object({
@@ -3054,7 +3059,7 @@ var openaiResponsesResponseSchema = lazySchema12(
3054
3059
  input_tokens_details: z14.object({ cached_tokens: z14.number().nullish() }).nullish(),
3055
3060
  output_tokens: z14.number(),
3056
3061
  output_tokens_details: z14.object({ reasoning_tokens: z14.number().nullish() }).nullish()
3057
- })
3062
+ }).optional()
3058
3063
  })
3059
3064
  )
3060
3065
  );
@@ -3131,6 +3136,7 @@ var openaiResponsesModelIds = [
3131
3136
  var openaiResponsesProviderOptionsSchema = lazySchema13(
3132
3137
  () => zodSchema13(
3133
3138
  z15.object({
3139
+ conversation: z15.string().nullish(),
3134
3140
  include: z15.array(
3135
3141
  z15.enum([
3136
3142
  "reasoning.encrypted_content",
@@ -3761,6 +3767,13 @@ var OpenAIResponsesLanguageModel = class {
3761
3767
  providerOptions,
3762
3768
  schema: openaiResponsesProviderOptionsSchema
3763
3769
  });
3770
+ if ((openaiOptions == null ? void 0 : openaiOptions.conversation) && (openaiOptions == null ? void 0 : openaiOptions.previousResponseId)) {
3771
+ warnings.push({
3772
+ type: "unsupported-setting",
3773
+ setting: "conversation",
3774
+ details: "conversation and previousResponseId cannot be used together"
3775
+ });
3776
+ }
3764
3777
  const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
3765
3778
  prompt,
3766
3779
  systemMessageMode: modelConfig.systemMessageMode,
@@ -3823,6 +3836,7 @@ var OpenAIResponsesLanguageModel = class {
3823
3836
  }
3824
3837
  },
3825
3838
  // provider options:
3839
+ conversation: openaiOptions == null ? void 0 : openaiOptions.conversation,
3826
3840
  max_tool_calls: openaiOptions == null ? void 0 : openaiOptions.maxToolCalls,
3827
3841
  metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
3828
3842
  parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
@@ -4272,6 +4286,7 @@ var OpenAIResponsesLanguageModel = class {
4272
4286
  if (typeof response.service_tier === "string") {
4273
4287
  providerMetadata.openai.serviceTier = response.service_tier;
4274
4288
  }
4289
+ const usage = response.usage;
4275
4290
  return {
4276
4291
  content,
4277
4292
  finishReason: mapOpenAIResponseFinishReason({
@@ -4279,11 +4294,11 @@ var OpenAIResponsesLanguageModel = class {
4279
4294
  hasFunctionCall
4280
4295
  }),
4281
4296
  usage: {
4282
- inputTokens: response.usage.input_tokens,
4283
- outputTokens: response.usage.output_tokens,
4284
- totalTokens: response.usage.input_tokens + response.usage.output_tokens,
4285
- reasoningTokens: (_z = (_y = response.usage.output_tokens_details) == null ? void 0 : _y.reasoning_tokens) != null ? _z : void 0,
4286
- cachedInputTokens: (_B = (_A = response.usage.input_tokens_details) == null ? void 0 : _A.cached_tokens) != null ? _B : void 0
4297
+ inputTokens: usage.input_tokens,
4298
+ outputTokens: usage.output_tokens,
4299
+ totalTokens: usage.input_tokens + usage.output_tokens,
4300
+ reasoningTokens: (_z = (_y = usage.output_tokens_details) == null ? void 0 : _y.reasoning_tokens) != null ? _z : void 0,
4301
+ cachedInputTokens: (_B = (_A = usage.input_tokens_details) == null ? void 0 : _A.cached_tokens) != null ? _B : void 0
4287
4302
  },
4288
4303
  request: { body },
4289
4304
  response: {