@ai-sdk/amazon-bedrock 3.0.65 → 3.0.67

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,17 @@
1
1
  # @ai-sdk/amazon-bedrock
2
2
 
3
+ ## 3.0.67
4
+
5
+ ### Patch Changes
6
+
7
+ - 1887f82: fix(bedrock): send {} as tool input when streaming tool calls without arguments
8
+
9
+ ## 3.0.66
10
+
11
+ ### Patch Changes
12
+
13
+ - c36a1ad: feat(provider/bedrock): Support Nova 2 extended reasoning `maxReasoningEffort` field
14
+
3
15
  ## 3.0.65
4
16
 
5
17
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -9,6 +9,11 @@ declare const bedrockProviderOptions: z.ZodObject<{
9
9
  reasoningConfig: z.ZodOptional<z.ZodObject<{
10
10
  type: z.ZodOptional<z.ZodUnion<readonly [z.ZodLiteral<"enabled">, z.ZodLiteral<"disabled">]>>;
11
11
  budgetTokens: z.ZodOptional<z.ZodNumber>;
12
+ maxReasoningEffort: z.ZodOptional<z.ZodEnum<{
13
+ low: "low";
14
+ medium: "medium";
15
+ high: "high";
16
+ }>>;
12
17
  }, z.core.$strip>>;
13
18
  anthropicBeta: z.ZodOptional<z.ZodArray<z.ZodString>>;
14
19
  }, z.core.$strip>;
package/dist/index.d.ts CHANGED
@@ -9,6 +9,11 @@ declare const bedrockProviderOptions: z.ZodObject<{
9
9
  reasoningConfig: z.ZodOptional<z.ZodObject<{
10
10
  type: z.ZodOptional<z.ZodUnion<readonly [z.ZodLiteral<"enabled">, z.ZodLiteral<"disabled">]>>;
11
11
  budgetTokens: z.ZodOptional<z.ZodNumber>;
12
+ maxReasoningEffort: z.ZodOptional<z.ZodEnum<{
13
+ low: "low";
14
+ medium: "medium";
15
+ high: "high";
16
+ }>>;
12
17
  }, z.core.$strip>>;
13
18
  anthropicBeta: z.ZodOptional<z.ZodArray<z.ZodString>>;
14
19
  }, z.core.$strip>;
package/dist/index.js CHANGED
@@ -30,7 +30,7 @@ module.exports = __toCommonJS(src_exports);
30
30
  var import_provider_utils8 = require("@ai-sdk/provider-utils");
31
31
 
32
32
  // src/version.ts
33
- var VERSION = true ? "3.0.65" : "0.0.0-test";
33
+ var VERSION = true ? "3.0.67" : "0.0.0-test";
34
34
 
35
35
  // src/bedrock-provider.ts
36
36
  var import_internal2 = require("@ai-sdk/anthropic/internal");
@@ -96,7 +96,8 @@ var bedrockProviderOptions = import_v4.z.object({
96
96
  additionalModelRequestFields: import_v4.z.record(import_v4.z.string(), import_v4.z.any()).optional(),
97
97
  reasoningConfig: import_v4.z.object({
98
98
  type: import_v4.z.union([import_v4.z.literal("enabled"), import_v4.z.literal("disabled")]).optional(),
99
- budgetTokens: import_v4.z.number().optional()
99
+ budgetTokens: import_v4.z.number().optional(),
100
+ maxReasoningEffort: import_v4.z.enum(["low", "medium", "high"]).optional()
100
101
  }).optional(),
101
102
  /**
102
103
  * Anthropic beta features to enable
@@ -692,7 +693,7 @@ var BedrockChatLanguageModel = class {
692
693
  toolChoice,
693
694
  providerOptions
694
695
  }) {
695
- var _a, _b, _c, _d, _e, _f, _g;
696
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
696
697
  const bedrockOptions = (_a = await (0, import_provider_utils4.parseProviderOptions)({
697
698
  provider: "bedrock",
698
699
  providerOptions,
@@ -765,8 +766,10 @@ var BedrockChatLanguageModel = class {
765
766
  anthropic_beta: mergedBetas
766
767
  };
767
768
  }
768
- const isThinking = ((_c = bedrockOptions.reasoningConfig) == null ? void 0 : _c.type) === "enabled";
769
+ const isAnthropicModel = this.modelId.includes("anthropic");
770
+ const isThinkingRequested = ((_c = bedrockOptions.reasoningConfig) == null ? void 0 : _c.type) === "enabled";
769
771
  const thinkingBudget = (_d = bedrockOptions.reasoningConfig) == null ? void 0 : _d.budgetTokens;
772
+ const isAnthropicThinkingEnabled = isAnthropicModel && isThinkingRequested;
770
773
  const inferenceConfig = {
771
774
  ...maxOutputTokens != null && { maxTokens: maxOutputTokens },
772
775
  ...temperature != null && { temperature },
@@ -774,7 +777,7 @@ var BedrockChatLanguageModel = class {
774
777
  ...topK != null && { topK },
775
778
  ...stopSequences != null && { stopSequences }
776
779
  };
777
- if (isThinking && thinkingBudget != null) {
780
+ if (isAnthropicThinkingEnabled && thinkingBudget != null) {
778
781
  if (inferenceConfig.maxTokens != null) {
779
782
  inferenceConfig.maxTokens += thinkingBudget;
780
783
  } else {
@@ -787,8 +790,32 @@ var BedrockChatLanguageModel = class {
787
790
  budget_tokens: thinkingBudget
788
791
  }
789
792
  };
793
+ } else if (!isAnthropicModel && thinkingBudget != null) {
794
+ warnings.push({
795
+ type: "unsupported-setting",
796
+ setting: "providerOptions",
797
+ details: "budgetTokens applies only to Anthropic models on Bedrock and will be ignored for this model."
798
+ });
799
+ }
800
+ const maxReasoningEffort = (_f = bedrockOptions.reasoningConfig) == null ? void 0 : _f.maxReasoningEffort;
801
+ if (maxReasoningEffort != null && !isAnthropicModel) {
802
+ bedrockOptions.additionalModelRequestFields = {
803
+ ...bedrockOptions.additionalModelRequestFields,
804
+ reasoningConfig: {
805
+ ...((_g = bedrockOptions.reasoningConfig) == null ? void 0 : _g.type) != null && {
806
+ type: bedrockOptions.reasoningConfig.type
807
+ },
808
+ maxReasoningEffort
809
+ }
810
+ };
811
+ } else if (maxReasoningEffort != null && isAnthropicModel) {
812
+ warnings.push({
813
+ type: "unsupported-setting",
814
+ setting: "providerOptions",
815
+ details: "maxReasoningEffort applies only to Amazon Nova models on Bedrock and will be ignored for this model."
816
+ });
790
817
  }
791
- if (isThinking && inferenceConfig.temperature != null) {
818
+ if (isAnthropicThinkingEnabled && inferenceConfig.temperature != null) {
792
819
  delete inferenceConfig.temperature;
793
820
  warnings.push({
794
821
  type: "unsupported-setting",
@@ -796,7 +823,7 @@ var BedrockChatLanguageModel = class {
796
823
  details: "temperature is not supported when thinking is enabled"
797
824
  });
798
825
  }
799
- if (isThinking && inferenceConfig.topP != null) {
826
+ if (isAnthropicThinkingEnabled && inferenceConfig.topP != null) {
800
827
  delete inferenceConfig.topP;
801
828
  warnings.push({
802
829
  type: "unsupported-setting",
@@ -804,7 +831,7 @@ var BedrockChatLanguageModel = class {
804
831
  details: "topP is not supported when thinking is enabled"
805
832
  });
806
833
  }
807
- if (isThinking && inferenceConfig.topK != null) {
834
+ if (isAnthropicThinkingEnabled && inferenceConfig.topK != null) {
808
835
  delete inferenceConfig.topK;
809
836
  warnings.push({
810
837
  type: "unsupported-setting",
@@ -812,7 +839,7 @@ var BedrockChatLanguageModel = class {
812
839
  details: "topK is not supported when thinking is enabled"
813
840
  });
814
841
  }
815
- const hasAnyTools = ((_g = (_f = toolConfig.tools) == null ? void 0 : _f.length) != null ? _g : 0) > 0 || additionalTools;
842
+ const hasAnyTools = ((_i = (_h = toolConfig.tools) == null ? void 0 : _h.length) != null ? _i : 0) > 0 || additionalTools;
816
843
  let filteredPrompt = prompt;
817
844
  if (!hasAnyTools) {
818
845
  const hasToolContent = prompt.some(
@@ -935,7 +962,7 @@ var BedrockChatLanguageModel = class {
935
962
  type: "tool-call",
936
963
  toolCallId: (_c = (_b = part.toolUse) == null ? void 0 : _b.toolUseId) != null ? _c : this.config.generateId(),
937
964
  toolName: (_e = (_d = part.toolUse) == null ? void 0 : _d.name) != null ? _e : `tool-${this.config.generateId()}`,
938
- input: JSON.stringify((_g = (_f = part.toolUse) == null ? void 0 : _f.input) != null ? _g : "")
965
+ input: JSON.stringify((_g = (_f = part.toolUse) == null ? void 0 : _f.input) != null ? _g : {})
939
966
  });
940
967
  }
941
968
  }
@@ -1125,7 +1152,7 @@ var BedrockChatLanguageModel = class {
1125
1152
  type: "tool-call",
1126
1153
  toolCallId: contentBlock.toolCallId,
1127
1154
  toolName: contentBlock.toolName,
1128
- input: contentBlock.jsonText
1155
+ input: contentBlock.jsonText === "" ? "{}" : contentBlock.jsonText
1129
1156
  });
1130
1157
  }
1131
1158
  }