@ai-sdk/amazon-bedrock 3.0.3 → 3.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,17 @@
1
1
  # @ai-sdk/amazon-bedrock
2
2
 
3
+ ## 3.0.5
4
+
5
+ ### Patch Changes
6
+
7
+ - c2871e6: fix(provider/amazon-bedrock): resolve opus 4.1 reasoning mode validation error
8
+
9
+ ## 3.0.4
10
+
11
+ ### Patch Changes
12
+
13
+ - 9aa06a7: filter out blank text blocks
14
+
3
15
  ## 3.0.3
4
16
 
5
17
  ### Patch Changes
package/dist/index.js CHANGED
@@ -452,6 +452,9 @@ async function convertToBedrockChatMessages(prompt) {
452
452
  const isLastContentPart = k === content.length - 1;
453
453
  switch (part.type) {
454
454
  case "text": {
455
+ if (!part.text.trim()) {
456
+ break;
457
+ }
455
458
  bedrockContent.push({
456
459
  text: (
457
460
  // trim the last text part if it's the last message in the block
@@ -711,17 +714,17 @@ var BedrockChatLanguageModel = class {
711
714
  const isThinking = ((_b = bedrockOptions.reasoningConfig) == null ? void 0 : _b.type) === "enabled";
712
715
  const thinkingBudget = (_c = bedrockOptions.reasoningConfig) == null ? void 0 : _c.budgetTokens;
713
716
  const inferenceConfig = {
714
- ...maxOutputTokens != null && { maxOutputTokens },
717
+ ...maxOutputTokens != null && { maxTokens: maxOutputTokens },
715
718
  ...temperature != null && { temperature },
716
719
  ...topP != null && { topP },
717
720
  ...topK != null && { topK },
718
721
  ...stopSequences != null && { stopSequences }
719
722
  };
720
723
  if (isThinking && thinkingBudget != null) {
721
- if (inferenceConfig.maxOutputTokens != null) {
722
- inferenceConfig.maxOutputTokens += thinkingBudget;
724
+ if (inferenceConfig.maxTokens != null) {
725
+ inferenceConfig.maxTokens += thinkingBudget;
723
726
  } else {
724
- inferenceConfig.maxOutputTokens = thinkingBudget + 4096;
727
+ inferenceConfig.maxTokens = thinkingBudget + 4096;
725
728
  }
726
729
  bedrockOptions.additionalModelRequestFields = {
727
730
  ...bedrockOptions.additionalModelRequestFields,