@ai-sdk/openai 3.0.0-beta.60 → 3.0.0-beta.62

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2775,6 +2775,8 @@ var openaiResponsesChunkSchema = lazySchema12(
2775
2775
  annotation: z14.discriminatedUnion("type", [
2776
2776
  z14.object({
2777
2777
  type: z14.literal("url_citation"),
2778
+ start_index: z14.number(),
2779
+ end_index: z14.number(),
2778
2780
  url: z14.string(),
2779
2781
  title: z14.string()
2780
2782
  }),
@@ -2821,10 +2823,13 @@ var openaiResponsesChunkSchema = lazySchema12(
2821
2823
  }),
2822
2824
  z14.object({
2823
2825
  type: z14.literal("error"),
2824
- code: z14.string(),
2825
- message: z14.string(),
2826
- param: z14.string().nullish(),
2827
- sequence_number: z14.number()
2826
+ sequence_number: z14.number(),
2827
+ error: z14.object({
2828
+ type: z14.string(),
2829
+ code: z14.string(),
2830
+ message: z14.string(),
2831
+ param: z14.string().nullish()
2832
+ })
2828
2833
  }),
2829
2834
  z14.object({ type: z14.string() }).loose().transform((value) => ({
2830
2835
  type: "unknown_chunk",
@@ -2837,13 +2842,15 @@ var openaiResponsesChunkSchema = lazySchema12(
2837
2842
  var openaiResponsesResponseSchema = lazySchema12(
2838
2843
  () => zodSchema12(
2839
2844
  z14.object({
2840
- id: z14.string(),
2841
- created_at: z14.number(),
2845
+ id: z14.string().optional(),
2846
+ created_at: z14.number().optional(),
2842
2847
  error: z14.object({
2843
- code: z14.string(),
2844
- message: z14.string()
2848
+ message: z14.string(),
2849
+ type: z14.string(),
2850
+ param: z14.string().nullish(),
2851
+ code: z14.string()
2845
2852
  }).nullish(),
2846
- model: z14.string(),
2853
+ model: z14.string().optional(),
2847
2854
  output: z14.array(
2848
2855
  z14.discriminatedUnion("type", [
2849
2856
  z14.object({
@@ -3046,7 +3053,7 @@ var openaiResponsesResponseSchema = lazySchema12(
3046
3053
  approval_request_id: z14.string()
3047
3054
  })
3048
3055
  ])
3049
- ),
3056
+ ).optional(),
3050
3057
  service_tier: z14.string().nullish(),
3051
3058
  incomplete_details: z14.object({ reason: z14.string() }).nullish(),
3052
3059
  usage: z14.object({
@@ -3054,7 +3061,7 @@ var openaiResponsesResponseSchema = lazySchema12(
3054
3061
  input_tokens_details: z14.object({ cached_tokens: z14.number().nullish() }).nullish(),
3055
3062
  output_tokens: z14.number(),
3056
3063
  output_tokens_details: z14.object({ reasoning_tokens: z14.number().nullish() }).nullish()
3057
- })
3064
+ }).optional()
3058
3065
  })
3059
3066
  )
3060
3067
  );
@@ -4281,6 +4288,7 @@ var OpenAIResponsesLanguageModel = class {
4281
4288
  if (typeof response.service_tier === "string") {
4282
4289
  providerMetadata.openai.serviceTier = response.service_tier;
4283
4290
  }
4291
+ const usage = response.usage;
4284
4292
  return {
4285
4293
  content,
4286
4294
  finishReason: mapOpenAIResponseFinishReason({
@@ -4288,11 +4296,11 @@ var OpenAIResponsesLanguageModel = class {
4288
4296
  hasFunctionCall
4289
4297
  }),
4290
4298
  usage: {
4291
- inputTokens: response.usage.input_tokens,
4292
- outputTokens: response.usage.output_tokens,
4293
- totalTokens: response.usage.input_tokens + response.usage.output_tokens,
4294
- reasoningTokens: (_z = (_y = response.usage.output_tokens_details) == null ? void 0 : _y.reasoning_tokens) != null ? _z : void 0,
4295
- cachedInputTokens: (_B = (_A = response.usage.input_tokens_details) == null ? void 0 : _A.cached_tokens) != null ? _B : void 0
4299
+ inputTokens: usage.input_tokens,
4300
+ outputTokens: usage.output_tokens,
4301
+ totalTokens: usage.input_tokens + usage.output_tokens,
4302
+ reasoningTokens: (_z = (_y = usage.output_tokens_details) == null ? void 0 : _y.reasoning_tokens) != null ? _z : void 0,
4303
+ cachedInputTokens: (_B = (_A = usage.input_tokens_details) == null ? void 0 : _A.cached_tokens) != null ? _B : void 0
4296
4304
  },
4297
4305
  request: { body },
4298
4306
  response: {
@@ -4389,7 +4397,7 @@ var OpenAIResponsesLanguageModel = class {
4389
4397
  controller.enqueue({
4390
4398
  type: "tool-call",
4391
4399
  toolCallId: value.item.id,
4392
- toolName: "web_search",
4400
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search",
4393
4401
  input: JSON.stringify({}),
4394
4402
  providerExecuted: true
4395
4403
  });
@@ -4497,7 +4505,7 @@ var OpenAIResponsesLanguageModel = class {
4497
4505
  controller.enqueue({
4498
4506
  type: "tool-result",
4499
4507
  toolCallId: value.item.id,
4500
- toolName: "web_search",
4508
+ toolName: webSearchToolName != null ? webSearchToolName : "web_search",
4501
4509
  result: mapWebSearchOutput(value.item.action)
4502
4510
  });
4503
4511
  } else if (value.item.type === "computer_call") {
@@ -4962,6 +4970,9 @@ export {
4962
4970
  openaiChatLanguageModelOptions,
4963
4971
  openaiCompletionProviderOptions,
4964
4972
  openaiEmbeddingProviderOptions,
4965
- openaiSpeechProviderOptionsSchema
4973
+ openaiSpeechProviderOptionsSchema,
4974
+ webSearchPreview,
4975
+ webSearchPreviewArgsSchema,
4976
+ webSearchPreviewInputSchema
4966
4977
  };
4967
4978
  //# sourceMappingURL=index.mjs.map