@ai-sdk/openai 3.0.0-beta.60 → 3.0.0-beta.61

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2805,10 +2805,13 @@ var openaiResponsesChunkSchema = (0, import_provider_utils21.lazySchema)(
2805
2805
  }),
2806
2806
  import_v414.z.object({
2807
2807
  type: import_v414.z.literal("error"),
2808
- code: import_v414.z.string(),
2809
- message: import_v414.z.string(),
2810
- param: import_v414.z.string().nullish(),
2811
- sequence_number: import_v414.z.number()
2808
+ sequence_number: import_v414.z.number(),
2809
+ error: import_v414.z.object({
2810
+ type: import_v414.z.string(),
2811
+ code: import_v414.z.string(),
2812
+ message: import_v414.z.string(),
2813
+ param: import_v414.z.string().nullish()
2814
+ })
2812
2815
  }),
2813
2816
  import_v414.z.object({ type: import_v414.z.string() }).loose().transform((value) => ({
2814
2817
  type: "unknown_chunk",
@@ -2821,13 +2824,15 @@ var openaiResponsesChunkSchema = (0, import_provider_utils21.lazySchema)(
2821
2824
  var openaiResponsesResponseSchema = (0, import_provider_utils21.lazySchema)(
2822
2825
  () => (0, import_provider_utils21.zodSchema)(
2823
2826
  import_v414.z.object({
2824
- id: import_v414.z.string(),
2825
- created_at: import_v414.z.number(),
2827
+ id: import_v414.z.string().optional(),
2828
+ created_at: import_v414.z.number().optional(),
2826
2829
  error: import_v414.z.object({
2827
- code: import_v414.z.string(),
2828
- message: import_v414.z.string()
2830
+ message: import_v414.z.string(),
2831
+ type: import_v414.z.string(),
2832
+ param: import_v414.z.string().nullish(),
2833
+ code: import_v414.z.string()
2829
2834
  }).nullish(),
2830
- model: import_v414.z.string(),
2835
+ model: import_v414.z.string().optional(),
2831
2836
  output: import_v414.z.array(
2832
2837
  import_v414.z.discriminatedUnion("type", [
2833
2838
  import_v414.z.object({
@@ -3030,7 +3035,7 @@ var openaiResponsesResponseSchema = (0, import_provider_utils21.lazySchema)(
3030
3035
  approval_request_id: import_v414.z.string()
3031
3036
  })
3032
3037
  ])
3033
- ),
3038
+ ).optional(),
3034
3039
  service_tier: import_v414.z.string().nullish(),
3035
3040
  incomplete_details: import_v414.z.object({ reason: import_v414.z.string() }).nullish(),
3036
3041
  usage: import_v414.z.object({
@@ -3038,7 +3043,7 @@ var openaiResponsesResponseSchema = (0, import_provider_utils21.lazySchema)(
3038
3043
  input_tokens_details: import_v414.z.object({ cached_tokens: import_v414.z.number().nullish() }).nullish(),
3039
3044
  output_tokens: import_v414.z.number(),
3040
3045
  output_tokens_details: import_v414.z.object({ reasoning_tokens: import_v414.z.number().nullish() }).nullish()
3041
- })
3046
+ }).optional()
3042
3047
  })
3043
3048
  )
3044
3049
  );
@@ -4239,6 +4244,7 @@ var OpenAIResponsesLanguageModel = class {
4239
4244
  if (typeof response.service_tier === "string") {
4240
4245
  providerMetadata.openai.serviceTier = response.service_tier;
4241
4246
  }
4247
+ const usage = response.usage;
4242
4248
  return {
4243
4249
  content,
4244
4250
  finishReason: mapOpenAIResponseFinishReason({
@@ -4246,11 +4252,11 @@ var OpenAIResponsesLanguageModel = class {
4246
4252
  hasFunctionCall
4247
4253
  }),
4248
4254
  usage: {
4249
- inputTokens: response.usage.input_tokens,
4250
- outputTokens: response.usage.output_tokens,
4251
- totalTokens: response.usage.input_tokens + response.usage.output_tokens,
4252
- reasoningTokens: (_z = (_y = response.usage.output_tokens_details) == null ? void 0 : _y.reasoning_tokens) != null ? _z : void 0,
4253
- cachedInputTokens: (_B = (_A = response.usage.input_tokens_details) == null ? void 0 : _A.cached_tokens) != null ? _B : void 0
4255
+ inputTokens: usage.input_tokens,
4256
+ outputTokens: usage.output_tokens,
4257
+ totalTokens: usage.input_tokens + usage.output_tokens,
4258
+ reasoningTokens: (_z = (_y = usage.output_tokens_details) == null ? void 0 : _y.reasoning_tokens) != null ? _z : void 0,
4259
+ cachedInputTokens: (_B = (_A = usage.input_tokens_details) == null ? void 0 : _A.cached_tokens) != null ? _B : void 0
4254
4260
  },
4255
4261
  request: { body },
4256
4262
  response: {