@ai-sdk/amazon-bedrock 4.0.0-beta.78 → 4.0.0-beta.80

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -76,7 +76,8 @@ var bedrockProviderOptions = z.object({
76
76
  additionalModelRequestFields: z.record(z.string(), z.any()).optional(),
77
77
  reasoningConfig: z.object({
78
78
  type: z.union([z.literal("enabled"), z.literal("disabled")]).optional(),
79
- budgetTokens: z.number().optional()
79
+ budgetTokens: z.number().optional(),
80
+ maxReasoningEffort: z.enum(["low", "medium", "high"]).optional()
80
81
  }).optional(),
81
82
  /**
82
83
  * Anthropic beta features to enable
@@ -230,7 +231,8 @@ async function prepareTools({
230
231
  betas: anthropicBetas
231
232
  } = await prepareAnthropicTools({
232
233
  tools: ProviderTools,
233
- toolChoice
234
+ toolChoice,
235
+ supportsStructuredOutput: false
234
236
  });
235
237
  toolWarnings.push(...anthropicToolWarnings);
236
238
  anthropicBetas.forEach((beta) => betas.add(beta));
@@ -691,7 +693,7 @@ var BedrockChatLanguageModel = class {
691
693
  toolChoice,
692
694
  providerOptions
693
695
  }) {
694
- var _a, _b, _c, _d, _e, _f, _g;
696
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
695
697
  const bedrockOptions = (_a = await parseProviderOptions2({
696
698
  provider: "bedrock",
697
699
  providerOptions,
@@ -764,8 +766,10 @@ var BedrockChatLanguageModel = class {
764
766
  anthropic_beta: mergedBetas
765
767
  };
766
768
  }
767
- const isThinking = ((_c = bedrockOptions.reasoningConfig) == null ? void 0 : _c.type) === "enabled";
769
+ const isAnthropicModel = this.modelId.includes("anthropic");
770
+ const isThinkingRequested = ((_c = bedrockOptions.reasoningConfig) == null ? void 0 : _c.type) === "enabled";
768
771
  const thinkingBudget = (_d = bedrockOptions.reasoningConfig) == null ? void 0 : _d.budgetTokens;
772
+ const isAnthropicThinkingEnabled = isAnthropicModel && isThinkingRequested;
769
773
  const inferenceConfig = {
770
774
  ...maxOutputTokens != null && { maxTokens: maxOutputTokens },
771
775
  ...temperature != null && { temperature },
@@ -773,7 +777,7 @@ var BedrockChatLanguageModel = class {
773
777
  ...topK != null && { topK },
774
778
  ...stopSequences != null && { stopSequences }
775
779
  };
776
- if (isThinking && thinkingBudget != null) {
780
+ if (isAnthropicThinkingEnabled && thinkingBudget != null) {
777
781
  if (inferenceConfig.maxTokens != null) {
778
782
  inferenceConfig.maxTokens += thinkingBudget;
779
783
  } else {
@@ -786,8 +790,32 @@ var BedrockChatLanguageModel = class {
786
790
  budget_tokens: thinkingBudget
787
791
  }
788
792
  };
793
+ } else if (!isAnthropicModel && thinkingBudget != null) {
794
+ warnings.push({
795
+ type: "unsupported",
796
+ feature: "budgetTokens",
797
+ details: "budgetTokens applies only to Anthropic models on Bedrock and will be ignored for this model."
798
+ });
799
+ }
800
+ const maxReasoningEffort = (_f = bedrockOptions.reasoningConfig) == null ? void 0 : _f.maxReasoningEffort;
801
+ if (maxReasoningEffort != null && !isAnthropicModel) {
802
+ bedrockOptions.additionalModelRequestFields = {
803
+ ...bedrockOptions.additionalModelRequestFields,
804
+ reasoningConfig: {
805
+ ...((_g = bedrockOptions.reasoningConfig) == null ? void 0 : _g.type) != null && {
806
+ type: bedrockOptions.reasoningConfig.type
807
+ },
808
+ maxReasoningEffort
809
+ }
810
+ };
811
+ } else if (maxReasoningEffort != null && isAnthropicModel) {
812
+ warnings.push({
813
+ type: "unsupported",
814
+ feature: "maxReasoningEffort",
815
+ details: "maxReasoningEffort applies only to Amazon Nova models on Bedrock and will be ignored for this model."
816
+ });
789
817
  }
790
- if (isThinking && inferenceConfig.temperature != null) {
818
+ if (isAnthropicThinkingEnabled && inferenceConfig.temperature != null) {
791
819
  delete inferenceConfig.temperature;
792
820
  warnings.push({
793
821
  type: "unsupported",
@@ -795,7 +823,7 @@ var BedrockChatLanguageModel = class {
795
823
  details: "temperature is not supported when thinking is enabled"
796
824
  });
797
825
  }
798
- if (isThinking && inferenceConfig.topP != null) {
826
+ if (isAnthropicThinkingEnabled && inferenceConfig.topP != null) {
799
827
  delete inferenceConfig.topP;
800
828
  warnings.push({
801
829
  type: "unsupported",
@@ -803,7 +831,7 @@ var BedrockChatLanguageModel = class {
803
831
  details: "topP is not supported when thinking is enabled"
804
832
  });
805
833
  }
806
- if (isThinking && inferenceConfig.topK != null) {
834
+ if (isAnthropicThinkingEnabled && inferenceConfig.topK != null) {
807
835
  delete inferenceConfig.topK;
808
836
  warnings.push({
809
837
  type: "unsupported",
@@ -811,7 +839,7 @@ var BedrockChatLanguageModel = class {
811
839
  details: "topK is not supported when thinking is enabled"
812
840
  });
813
841
  }
814
- const hasAnyTools = ((_g = (_f = toolConfig.tools) == null ? void 0 : _f.length) != null ? _g : 0) > 0 || additionalTools;
842
+ const hasAnyTools = ((_i = (_h = toolConfig.tools) == null ? void 0 : _h.length) != null ? _i : 0) > 0 || additionalTools;
815
843
  let filteredPrompt = prompt;
816
844
  if (!hasAnyTools) {
817
845
  const hasToolContent = prompt.some(
@@ -1550,7 +1578,7 @@ import {
1550
1578
  import { AwsV4Signer } from "aws4fetch";
1551
1579
 
1552
1580
  // src/version.ts
1553
- var VERSION = true ? "4.0.0-beta.78" : "0.0.0-test";
1581
+ var VERSION = true ? "4.0.0-beta.80" : "0.0.0-test";
1554
1582
 
1555
1583
  // src/bedrock-sigv4-fetch.ts
1556
1584
  function createSigV4FetchFunction(getCredentials, fetch = globalThis.fetch) {