@ai-sdk/openai 3.0.0-beta.69 → 3.0.0-beta.70

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -567,7 +567,10 @@ function prepareChatTools({
567
567
  });
568
568
  break;
569
569
  default:
570
- toolWarnings.push({ type: "unsupported-tool", tool });
570
+ toolWarnings.push({
571
+ type: "unsupported",
572
+ feature: `tool type: ${tool.type}`
573
+ });
571
574
  break;
572
575
  }
573
576
  }
@@ -637,15 +640,12 @@ var OpenAIChatLanguageModel = class {
637
640
  })) != null ? _a : {};
638
641
  const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
639
642
  if (topK != null) {
640
- warnings.push({
641
- type: "unsupported-setting",
642
- setting: "topK"
643
- });
643
+ warnings.push({ type: "unsupported", feature: "topK" });
644
644
  }
645
645
  if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && !structuredOutputs) {
646
646
  warnings.push({
647
- type: "unsupported-setting",
648
- setting: "responseFormat",
647
+ type: "unsupported",
648
+ feature: "responseFormat",
649
649
  details: "JSON response format schema is only supported with structuredOutputs"
650
650
  });
651
651
  }
@@ -702,32 +702,32 @@ var OpenAIChatLanguageModel = class {
702
702
  if (baseArgs.temperature != null) {
703
703
  baseArgs.temperature = void 0;
704
704
  warnings.push({
705
- type: "unsupported-setting",
706
- setting: "temperature",
705
+ type: "unsupported",
706
+ feature: "temperature",
707
707
  details: "temperature is not supported for reasoning models"
708
708
  });
709
709
  }
710
710
  if (baseArgs.top_p != null) {
711
711
  baseArgs.top_p = void 0;
712
712
  warnings.push({
713
- type: "unsupported-setting",
714
- setting: "topP",
713
+ type: "unsupported",
714
+ feature: "topP",
715
715
  details: "topP is not supported for reasoning models"
716
716
  });
717
717
  }
718
718
  if (baseArgs.frequency_penalty != null) {
719
719
  baseArgs.frequency_penalty = void 0;
720
720
  warnings.push({
721
- type: "unsupported-setting",
722
- setting: "frequencyPenalty",
721
+ type: "unsupported",
722
+ feature: "frequencyPenalty",
723
723
  details: "frequencyPenalty is not supported for reasoning models"
724
724
  });
725
725
  }
726
726
  if (baseArgs.presence_penalty != null) {
727
727
  baseArgs.presence_penalty = void 0;
728
728
  warnings.push({
729
- type: "unsupported-setting",
730
- setting: "presencePenalty",
729
+ type: "unsupported",
730
+ feature: "presencePenalty",
731
731
  details: "presencePenalty is not supported for reasoning models"
732
732
  });
733
733
  }
@@ -762,24 +762,24 @@ var OpenAIChatLanguageModel = class {
762
762
  if (baseArgs.temperature != null) {
763
763
  baseArgs.temperature = void 0;
764
764
  warnings.push({
765
- type: "unsupported-setting",
766
- setting: "temperature",
765
+ type: "unsupported",
766
+ feature: "temperature",
767
767
  details: "temperature is not supported for the search preview models and has been removed."
768
768
  });
769
769
  }
770
770
  }
771
771
  if (openaiOptions.serviceTier === "flex" && !supportsFlexProcessing(this.modelId)) {
772
772
  warnings.push({
773
- type: "unsupported-setting",
774
- setting: "serviceTier",
773
+ type: "unsupported",
774
+ feature: "serviceTier",
775
775
  details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
776
776
  });
777
777
  baseArgs.service_tier = void 0;
778
778
  }
779
779
  if (openaiOptions.serviceTier === "priority" && !supportsPriorityProcessing(this.modelId)) {
780
780
  warnings.push({
781
- type: "unsupported-setting",
782
- setting: "serviceTier",
781
+ type: "unsupported",
782
+ feature: "serviceTier",
783
783
  details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
784
784
  });
785
785
  baseArgs.service_tier = void 0;
@@ -1366,18 +1366,18 @@ var OpenAICompletionLanguageModel = class {
1366
1366
  })
1367
1367
  };
1368
1368
  if (topK != null) {
1369
- warnings.push({ type: "unsupported-setting", setting: "topK" });
1369
+ warnings.push({ type: "unsupported", feature: "topK" });
1370
1370
  }
1371
1371
  if (tools == null ? void 0 : tools.length) {
1372
- warnings.push({ type: "unsupported-setting", setting: "tools" });
1372
+ warnings.push({ type: "unsupported", feature: "tools" });
1373
1373
  }
1374
1374
  if (toolChoice != null) {
1375
- warnings.push({ type: "unsupported-setting", setting: "toolChoice" });
1375
+ warnings.push({ type: "unsupported", feature: "toolChoice" });
1376
1376
  }
1377
1377
  if (responseFormat != null && responseFormat.type !== "text") {
1378
1378
  warnings.push({
1379
- type: "unsupported-setting",
1380
- setting: "responseFormat",
1379
+ type: "unsupported",
1380
+ feature: "responseFormat",
1381
1381
  details: "JSON response format is not supported."
1382
1382
  });
1383
1383
  }
@@ -1719,13 +1719,13 @@ var OpenAIImageModel = class {
1719
1719
  const warnings = [];
1720
1720
  if (aspectRatio != null) {
1721
1721
  warnings.push({
1722
- type: "unsupported-setting",
1723
- setting: "aspectRatio",
1722
+ type: "unsupported",
1723
+ feature: "aspectRatio",
1724
1724
  details: "This model does not support aspect ratio. Use `size` instead."
1725
1725
  });
1726
1726
  }
1727
1727
  if (seed != null) {
1728
- warnings.push({ type: "unsupported-setting", setting: "seed" });
1728
+ warnings.push({ type: "unsupported", feature: "seed" });
1729
1729
  }
1730
1730
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
1731
1731
  const { value: response, responseHeaders } = await (0, import_provider_utils13.postJsonToApi)({
@@ -2064,8 +2064,8 @@ var OpenAISpeechModel = class {
2064
2064
  requestBody.response_format = outputFormat;
2065
2065
  } else {
2066
2066
  warnings.push({
2067
- type: "unsupported-setting",
2068
- setting: "outputFormat",
2067
+ type: "unsupported",
2068
+ feature: "outputFormat",
2069
2069
  details: `Unsupported output format: ${outputFormat}. Using mp3 instead.`
2070
2070
  });
2071
2071
  }
@@ -2081,8 +2081,8 @@ var OpenAISpeechModel = class {
2081
2081
  }
2082
2082
  if (language) {
2083
2083
  warnings.push({
2084
- type: "unsupported-setting",
2085
- setting: "language",
2084
+ type: "unsupported",
2085
+ feature: "language",
2086
2086
  details: `OpenAI speech models do not support language selection. Language parameter "${language}" was ignored.`
2087
2087
  });
2088
2088
  }
@@ -3646,7 +3646,10 @@ async function prepareResponsesTools({
3646
3646
  break;
3647
3647
  }
3648
3648
  default:
3649
- toolWarnings.push({ type: "unsupported-tool", tool });
3649
+ toolWarnings.push({
3650
+ type: "unsupported",
3651
+ feature: `function tool ${tool}`
3652
+ });
3650
3653
  break;
3651
3654
  }
3652
3655
  }
@@ -3707,25 +3710,19 @@ var OpenAIResponsesLanguageModel = class {
3707
3710
  const warnings = [];
3708
3711
  const modelConfig = getResponsesModelConfig(this.modelId);
3709
3712
  if (topK != null) {
3710
- warnings.push({ type: "unsupported-setting", setting: "topK" });
3713
+ warnings.push({ type: "unsupported", feature: "topK" });
3711
3714
  }
3712
3715
  if (seed != null) {
3713
- warnings.push({ type: "unsupported-setting", setting: "seed" });
3716
+ warnings.push({ type: "unsupported", feature: "seed" });
3714
3717
  }
3715
3718
  if (presencePenalty != null) {
3716
- warnings.push({
3717
- type: "unsupported-setting",
3718
- setting: "presencePenalty"
3719
- });
3719
+ warnings.push({ type: "unsupported", feature: "presencePenalty" });
3720
3720
  }
3721
3721
  if (frequencyPenalty != null) {
3722
- warnings.push({
3723
- type: "unsupported-setting",
3724
- setting: "frequencyPenalty"
3725
- });
3722
+ warnings.push({ type: "unsupported", feature: "frequencyPenalty" });
3726
3723
  }
3727
3724
  if (stopSequences != null) {
3728
- warnings.push({ type: "unsupported-setting", setting: "stopSequences" });
3725
+ warnings.push({ type: "unsupported", feature: "stopSequences" });
3729
3726
  }
3730
3727
  const openaiOptions = await (0, import_provider_utils30.parseProviderOptions)({
3731
3728
  provider: "openai",
@@ -3734,8 +3731,8 @@ var OpenAIResponsesLanguageModel = class {
3734
3731
  });
3735
3732
  if ((openaiOptions == null ? void 0 : openaiOptions.conversation) && (openaiOptions == null ? void 0 : openaiOptions.previousResponseId)) {
3736
3733
  warnings.push({
3737
- type: "unsupported-setting",
3738
- setting: "conversation",
3734
+ type: "unsupported",
3735
+ feature: "conversation",
3739
3736
  details: "conversation and previousResponseId cannot be used together"
3740
3737
  });
3741
3738
  }
@@ -3832,47 +3829,47 @@ var OpenAIResponsesLanguageModel = class {
3832
3829
  if (baseArgs.temperature != null) {
3833
3830
  baseArgs.temperature = void 0;
3834
3831
  warnings.push({
3835
- type: "unsupported-setting",
3836
- setting: "temperature",
3832
+ type: "unsupported",
3833
+ feature: "temperature",
3837
3834
  details: "temperature is not supported for reasoning models"
3838
3835
  });
3839
3836
  }
3840
3837
  if (baseArgs.top_p != null) {
3841
3838
  baseArgs.top_p = void 0;
3842
3839
  warnings.push({
3843
- type: "unsupported-setting",
3844
- setting: "topP",
3840
+ type: "unsupported",
3841
+ feature: "topP",
3845
3842
  details: "topP is not supported for reasoning models"
3846
3843
  });
3847
3844
  }
3848
3845
  } else {
3849
3846
  if ((openaiOptions == null ? void 0 : openaiOptions.reasoningEffort) != null) {
3850
3847
  warnings.push({
3851
- type: "unsupported-setting",
3852
- setting: "reasoningEffort",
3848
+ type: "unsupported",
3849
+ feature: "reasoningEffort",
3853
3850
  details: "reasoningEffort is not supported for non-reasoning models"
3854
3851
  });
3855
3852
  }
3856
3853
  if ((openaiOptions == null ? void 0 : openaiOptions.reasoningSummary) != null) {
3857
3854
  warnings.push({
3858
- type: "unsupported-setting",
3859
- setting: "reasoningSummary",
3855
+ type: "unsupported",
3856
+ feature: "reasoningSummary",
3860
3857
  details: "reasoningSummary is not supported for non-reasoning models"
3861
3858
  });
3862
3859
  }
3863
3860
  }
3864
3861
  if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "flex" && !modelConfig.supportsFlexProcessing) {
3865
3862
  warnings.push({
3866
- type: "unsupported-setting",
3867
- setting: "serviceTier",
3863
+ type: "unsupported",
3864
+ feature: "serviceTier",
3868
3865
  details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
3869
3866
  });
3870
3867
  delete baseArgs.service_tier;
3871
3868
  }
3872
3869
  if ((openaiOptions == null ? void 0 : openaiOptions.serviceTier) === "priority" && !modelConfig.supportsPriorityProcessing) {
3873
3870
  warnings.push({
3874
- type: "unsupported-setting",
3875
- setting: "serviceTier",
3871
+ type: "unsupported",
3872
+ feature: "serviceTier",
3876
3873
  details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
3877
3874
  });
3878
3875
  delete baseArgs.service_tier;