@ai-sdk/openai 3.0.0-beta.64 → 3.0.0-beta.66

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3284,6 +3284,7 @@ var import_v418 = require("zod/v4");
3284
3284
  var webSearchArgsSchema = (0, import_provider_utils25.lazySchema)(
3285
3285
  () => (0, import_provider_utils25.zodSchema)(
3286
3286
  import_v418.z.object({
3287
+ externalWebAccess: import_v418.z.boolean().optional(),
3287
3288
  filters: import_v418.z.object({ allowedDomains: import_v418.z.array(import_v418.z.string()).optional() }).optional(),
3288
3289
  searchContextSize: import_v418.z.enum(["low", "medium", "high"]).optional(),
3289
3290
  userLocation: import_v418.z.object({
@@ -3571,6 +3572,7 @@ async function prepareResponsesTools({
3571
3572
  openaiTools.push({
3572
3573
  type: "web_search",
3573
3574
  filters: args.filters != null ? { allowed_domains: args.filters.allowedDomains } : void 0,
3575
+ external_web_access: args.externalWebAccess,
3574
3576
  search_context_size: args.searchContextSize,
3575
3577
  user_location: args.userLocation
3576
3578
  });
@@ -3906,6 +3908,7 @@ var OpenAIResponsesLanguageModel = class {
3906
3908
  path: "/responses",
3907
3909
  modelId: this.modelId
3908
3910
  });
3911
+ const providerKey = this.config.provider.replace(".responses", "");
3909
3912
  const {
3910
3913
  responseHeaders,
3911
3914
  value: response,
@@ -3946,7 +3949,7 @@ var OpenAIResponsesLanguageModel = class {
3946
3949
  type: "reasoning",
3947
3950
  text: summary.text,
3948
3951
  providerMetadata: {
3949
- openai: {
3952
+ [providerKey]: {
3950
3953
  itemId: part.id,
3951
3954
  reasoningEncryptedContent: (_a = part.encrypted_content) != null ? _a : null
3952
3955
  }
@@ -3982,7 +3985,7 @@ var OpenAIResponsesLanguageModel = class {
3982
3985
  action: part.action
3983
3986
  }),
3984
3987
  providerMetadata: {
3985
- openai: {
3988
+ [providerKey]: {
3986
3989
  itemId: part.id
3987
3990
  }
3988
3991
  }
@@ -4004,7 +4007,7 @@ var OpenAIResponsesLanguageModel = class {
4004
4007
  type: "text",
4005
4008
  text: contentPart.text,
4006
4009
  providerMetadata: {
4007
- openai: providerMetadata2
4010
+ [providerKey]: providerMetadata2
4008
4011
  }
4009
4012
  });
4010
4013
  for (const annotation of contentPart.annotations) {
@@ -4026,7 +4029,7 @@ var OpenAIResponsesLanguageModel = class {
4026
4029
  filename: (_l = annotation.filename) != null ? _l : annotation.file_id,
4027
4030
  ...annotation.file_id ? {
4028
4031
  providerMetadata: {
4029
- openai: {
4032
+ [providerKey]: {
4030
4033
  fileId: annotation.file_id
4031
4034
  }
4032
4035
  }
@@ -4041,7 +4044,7 @@ var OpenAIResponsesLanguageModel = class {
4041
4044
  title: (_q = (_p = annotation.filename) != null ? _p : annotation.file_id) != null ? _q : "Document",
4042
4045
  filename: (_r = annotation.filename) != null ? _r : annotation.file_id,
4043
4046
  providerMetadata: {
4044
- openai: {
4047
+ [providerKey]: {
4045
4048
  fileId: annotation.file_id,
4046
4049
  containerId: annotation.container_id,
4047
4050
  ...annotation.index != null ? { index: annotation.index } : {}
@@ -4057,7 +4060,7 @@ var OpenAIResponsesLanguageModel = class {
4057
4060
  title: annotation.file_id,
4058
4061
  filename: annotation.file_id,
4059
4062
  providerMetadata: {
4060
- openai: {
4063
+ [providerKey]: {
4061
4064
  fileId: annotation.file_id,
4062
4065
  ...annotation.index != null ? { index: annotation.index } : {}
4063
4066
  }
@@ -4076,7 +4079,7 @@ var OpenAIResponsesLanguageModel = class {
4076
4079
  toolName: part.name,
4077
4080
  input: part.arguments,
4078
4081
  providerMetadata: {
4079
- openai: {
4082
+ [providerKey]: {
4080
4083
  itemId: part.id
4081
4084
  }
4082
4085
  }
@@ -4241,13 +4244,13 @@ var OpenAIResponsesLanguageModel = class {
4241
4244
  }
4242
4245
  }
4243
4246
  const providerMetadata = {
4244
- openai: { responseId: response.id }
4247
+ [providerKey]: { responseId: response.id }
4245
4248
  };
4246
4249
  if (logprobs.length > 0) {
4247
- providerMetadata.openai.logprobs = logprobs;
4250
+ providerMetadata[providerKey].logprobs = logprobs;
4248
4251
  }
4249
4252
  if (typeof response.service_tier === "string") {
4250
- providerMetadata.openai.serviceTier = response.service_tier;
4253
+ providerMetadata[providerKey].serviceTier = response.service_tier;
4251
4254
  }
4252
4255
  const usage = response.usage;
4253
4256
  return {
@@ -4300,6 +4303,7 @@ var OpenAIResponsesLanguageModel = class {
4300
4303
  fetch: this.config.fetch
4301
4304
  });
4302
4305
  const self = this;
4306
+ const providerKey = this.config.provider.replace(".responses", "");
4303
4307
  let finishReason = "unknown";
4304
4308
  const usage = {
4305
4309
  inputTokens: void 0,
@@ -4423,7 +4427,7 @@ var OpenAIResponsesLanguageModel = class {
4423
4427
  type: "text-start",
4424
4428
  id: value.item.id,
4425
4429
  providerMetadata: {
4426
- openai: {
4430
+ [providerKey]: {
4427
4431
  itemId: value.item.id
4428
4432
  }
4429
4433
  }
@@ -4437,7 +4441,7 @@ var OpenAIResponsesLanguageModel = class {
4437
4441
  type: "reasoning-start",
4438
4442
  id: `${value.item.id}:0`,
4439
4443
  providerMetadata: {
4440
- openai: {
4444
+ [providerKey]: {
4441
4445
  itemId: value.item.id,
4442
4446
  reasoningEncryptedContent: (_a = value.item.encrypted_content) != null ? _a : null
4443
4447
  }
@@ -4458,7 +4462,7 @@ var OpenAIResponsesLanguageModel = class {
4458
4462
  toolName: value.item.name,
4459
4463
  input: value.item.arguments,
4460
4464
  providerMetadata: {
4461
- openai: {
4465
+ [providerKey]: {
4462
4466
  itemId: value.item.id
4463
4467
  }
4464
4468
  }
@@ -4596,7 +4600,7 @@ var OpenAIResponsesLanguageModel = class {
4596
4600
  }
4597
4601
  }),
4598
4602
  providerMetadata: {
4599
- openai: { itemId: value.item.id }
4603
+ [providerKey]: { itemId: value.item.id }
4600
4604
  }
4601
4605
  });
4602
4606
  } else if (value.item.type === "reasoning") {
@@ -4611,7 +4615,7 @@ var OpenAIResponsesLanguageModel = class {
4611
4615
  type: "reasoning-end",
4612
4616
  id: `${value.item.id}:${summaryIndex}`,
4613
4617
  providerMetadata: {
4614
- openai: {
4618
+ [providerKey]: {
4615
4619
  itemId: value.item.id,
4616
4620
  reasoningEncryptedContent: (_d = value.item.encrypted_content) != null ? _d : null
4617
4621
  }
@@ -4701,7 +4705,9 @@ var OpenAIResponsesLanguageModel = class {
4701
4705
  controller.enqueue({
4702
4706
  type: "reasoning-end",
4703
4707
  id: `${value.item_id}:${summaryIndex}`,
4704
- providerMetadata: { openai: { itemId: value.item_id } }
4708
+ providerMetadata: {
4709
+ [providerKey]: { itemId: value.item_id }
4710
+ }
4705
4711
  });
4706
4712
  activeReasoningPart.summaryParts[summaryIndex] = "concluded";
4707
4713
  }
@@ -4710,7 +4716,7 @@ var OpenAIResponsesLanguageModel = class {
4710
4716
  type: "reasoning-start",
4711
4717
  id: `${value.item_id}:${value.summary_index}`,
4712
4718
  providerMetadata: {
4713
- openai: {
4719
+ [providerKey]: {
4714
4720
  itemId: value.item_id,
4715
4721
  reasoningEncryptedContent: (_h = (_g = activeReasoning[value.item_id]) == null ? void 0 : _g.encryptedContent) != null ? _h : null
4716
4722
  }
@@ -4723,7 +4729,7 @@ var OpenAIResponsesLanguageModel = class {
4723
4729
  id: `${value.item_id}:${value.summary_index}`,
4724
4730
  delta: value.delta,
4725
4731
  providerMetadata: {
4726
- openai: {
4732
+ [providerKey]: {
4727
4733
  itemId: value.item_id
4728
4734
  }
4729
4735
  }
@@ -4734,7 +4740,7 @@ var OpenAIResponsesLanguageModel = class {
4734
4740
  type: "reasoning-end",
4735
4741
  id: `${value.item_id}:${value.summary_index}`,
4736
4742
  providerMetadata: {
4737
- openai: { itemId: value.item_id }
4743
+ [providerKey]: { itemId: value.item_id }
4738
4744
  }
4739
4745
  });
4740
4746
  activeReasoning[value.item_id].summaryParts[value.summary_index] = "concluded";
@@ -4774,7 +4780,7 @@ var OpenAIResponsesLanguageModel = class {
4774
4780
  filename: (_v = value.annotation.filename) != null ? _v : value.annotation.file_id,
4775
4781
  ...value.annotation.file_id ? {
4776
4782
  providerMetadata: {
4777
- openai: {
4783
+ [providerKey]: {
4778
4784
  fileId: value.annotation.file_id
4779
4785
  }
4780
4786
  }
@@ -4789,7 +4795,7 @@ var OpenAIResponsesLanguageModel = class {
4789
4795
  title: (_A = (_z = value.annotation.filename) != null ? _z : value.annotation.file_id) != null ? _A : "Document",
4790
4796
  filename: (_B = value.annotation.filename) != null ? _B : value.annotation.file_id,
4791
4797
  providerMetadata: {
4792
- openai: {
4798
+ [providerKey]: {
4793
4799
  fileId: value.annotation.file_id,
4794
4800
  containerId: value.annotation.container_id,
4795
4801
  ...value.annotation.index != null ? { index: value.annotation.index } : {}
@@ -4805,7 +4811,7 @@ var OpenAIResponsesLanguageModel = class {
4805
4811
  title: value.annotation.file_id,
4806
4812
  filename: value.annotation.file_id,
4807
4813
  providerMetadata: {
4808
- openai: {
4814
+ [providerKey]: {
4809
4815
  fileId: value.annotation.file_id,
4810
4816
  ...value.annotation.index != null ? { index: value.annotation.index } : {}
4811
4817
  }
@@ -4817,7 +4823,7 @@ var OpenAIResponsesLanguageModel = class {
4817
4823
  type: "text-end",
4818
4824
  id: value.item.id,
4819
4825
  providerMetadata: {
4820
- openai: {
4826
+ [providerKey]: {
4821
4827
  itemId: value.item.id,
4822
4828
  ...ongoingAnnotations.length > 0 && {
4823
4829
  annotations: ongoingAnnotations
@@ -4831,15 +4837,15 @@ var OpenAIResponsesLanguageModel = class {
4831
4837
  },
4832
4838
  flush(controller) {
4833
4839
  const providerMetadata = {
4834
- openai: {
4840
+ [providerKey]: {
4835
4841
  responseId
4836
4842
  }
4837
4843
  };
4838
4844
  if (logprobs.length > 0) {
4839
- providerMetadata.openai.logprobs = logprobs;
4845
+ providerMetadata[providerKey].logprobs = logprobs;
4840
4846
  }
4841
4847
  if (serviceTier !== void 0) {
4842
- providerMetadata.openai.serviceTier = serviceTier;
4848
+ providerMetadata[providerKey].serviceTier = serviceTier;
4843
4849
  }
4844
4850
  controller.enqueue({
4845
4851
  type: "finish",