@ai-sdk/amazon-bedrock 3.0.55 → 3.0.56

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -8,7 +8,7 @@ import {
8
8
  } from "@ai-sdk/provider-utils";
9
9
 
10
10
  // src/version.ts
11
- var VERSION = true ? "3.0.55" : "0.0.0-test";
11
+ var VERSION = true ? "3.0.56" : "0.0.0-test";
12
12
 
13
13
  // src/bedrock-provider.ts
14
14
  import { anthropicTools as anthropicTools2 } from "@ai-sdk/anthropic/internal";
@@ -82,7 +82,11 @@ var bedrockProviderOptions = z.object({
82
82
  reasoningConfig: z.object({
83
83
  type: z.union([z.literal("enabled"), z.literal("disabled")]).optional(),
84
84
  budgetTokens: z.number().optional()
85
- }).optional()
85
+ }).optional(),
86
+ /**
87
+ * Anthropic beta features to enable
88
+ */
89
+ anthropicBeta: z.array(z.string()).optional()
86
90
  });
87
91
 
88
92
  // src/bedrock-error.ts
@@ -684,7 +688,7 @@ var BedrockChatLanguageModel = class {
684
688
  toolChoice,
685
689
  providerOptions
686
690
  }) {
687
- var _a, _b, _c, _d, _e, _f;
691
+ var _a, _b, _c, _d, _e, _f, _g;
688
692
  const bedrockOptions = (_a = await parseProviderOptions2({
689
693
  provider: "bedrock",
690
694
  providerOptions,
@@ -709,6 +713,21 @@ var BedrockChatLanguageModel = class {
709
713
  setting: "seed"
710
714
  });
711
715
  }
716
+ if (temperature != null && temperature > 1) {
717
+ warnings.push({
718
+ type: "unsupported-setting",
719
+ setting: "temperature",
720
+ details: `${temperature} exceeds bedrock maximum of 1.0. clamped to 1.0`
721
+ });
722
+ temperature = 1;
723
+ } else if (temperature != null && temperature < 0) {
724
+ warnings.push({
725
+ type: "unsupported-setting",
726
+ setting: "temperature",
727
+ details: `${temperature} is below bedrock minimum of 0. clamped to 0`
728
+ });
729
+ temperature = 0;
730
+ }
712
731
  if (responseFormat != null && responseFormat.type !== "text" && responseFormat.type !== "json") {
713
732
  warnings.push({
714
733
  type: "unsupported-setting",
@@ -742,8 +761,16 @@ var BedrockChatLanguageModel = class {
742
761
  ...additionalTools
743
762
  };
744
763
  }
745
- const isThinking = ((_b = bedrockOptions.reasoningConfig) == null ? void 0 : _b.type) === "enabled";
746
- const thinkingBudget = (_c = bedrockOptions.reasoningConfig) == null ? void 0 : _c.budgetTokens;
764
+ if (betas.size > 0 || bedrockOptions.anthropicBeta) {
765
+ const existingBetas = (_b = bedrockOptions.anthropicBeta) != null ? _b : [];
766
+ const mergedBetas = betas.size > 0 ? [...existingBetas, ...Array.from(betas)] : existingBetas;
767
+ bedrockOptions.additionalModelRequestFields = {
768
+ ...bedrockOptions.additionalModelRequestFields,
769
+ anthropic_beta: mergedBetas
770
+ };
771
+ }
772
+ const isThinking = ((_c = bedrockOptions.reasoningConfig) == null ? void 0 : _c.type) === "enabled";
773
+ const thinkingBudget = (_d = bedrockOptions.reasoningConfig) == null ? void 0 : _d.budgetTokens;
747
774
  const inferenceConfig = {
748
775
  ...maxOutputTokens != null && { maxTokens: maxOutputTokens },
749
776
  ...temperature != null && { temperature },
@@ -760,7 +787,7 @@ var BedrockChatLanguageModel = class {
760
787
  bedrockOptions.additionalModelRequestFields = {
761
788
  ...bedrockOptions.additionalModelRequestFields,
762
789
  thinking: {
763
- type: (_d = bedrockOptions.reasoningConfig) == null ? void 0 : _d.type,
790
+ type: (_e = bedrockOptions.reasoningConfig) == null ? void 0 : _e.type,
764
791
  budget_tokens: thinkingBudget
765
792
  }
766
793
  };
@@ -789,7 +816,7 @@ var BedrockChatLanguageModel = class {
789
816
  details: "topK is not supported when thinking is enabled"
790
817
  });
791
818
  }
792
- const hasAnyTools = ((_f = (_e = toolConfig.tools) == null ? void 0 : _e.length) != null ? _f : 0) > 0 || additionalTools;
819
+ const hasAnyTools = ((_g = (_f = toolConfig.tools) == null ? void 0 : _f.length) != null ? _g : 0) > 0 || additionalTools;
793
820
  let filteredPrompt = prompt;
794
821
  if (!hasAnyTools) {
795
822
  const hasToolContent = prompt.some(
@@ -838,27 +865,21 @@ var BedrockChatLanguageModel = class {
838
865
  };
839
866
  }
840
867
  async getHeaders({
841
- betas,
842
868
  headers
843
869
  }) {
844
- return combineHeaders(
845
- await resolve(this.config.headers),
846
- betas.size > 0 ? { "anthropic-beta": Array.from(betas).join(",") } : {},
847
- headers
848
- );
870
+ return combineHeaders(await resolve(this.config.headers), headers);
849
871
  }
850
872
  async doGenerate(options) {
851
873
  var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
852
874
  const {
853
875
  command: args,
854
876
  warnings,
855
- usesJsonResponseTool,
856
- betas
877
+ usesJsonResponseTool
857
878
  } = await this.getArgs(options);
858
879
  const url = `${this.getUrl(this.modelId)}/converse`;
859
880
  const { value: response, responseHeaders } = await postJsonToApi({
860
881
  url,
861
- headers: await this.getHeaders({ betas, headers: options.headers }),
882
+ headers: await this.getHeaders({ headers: options.headers }),
862
883
  body: args,
863
884
  failedResponseHandler: createJsonErrorResponseHandler({
864
885
  errorSchema: BedrockErrorSchema,
@@ -955,13 +976,12 @@ var BedrockChatLanguageModel = class {
955
976
  const {
956
977
  command: args,
957
978
  warnings,
958
- usesJsonResponseTool,
959
- betas
979
+ usesJsonResponseTool
960
980
  } = await this.getArgs(options);
961
981
  const url = `${this.getUrl(this.modelId)}/converse-stream`;
962
982
  const { value: response, responseHeaders } = await postJsonToApi({
963
983
  url,
964
- headers: await this.getHeaders({ betas, headers: options.headers }),
984
+ headers: await this.getHeaders({ headers: options.headers }),
965
985
  body: args,
966
986
  failedResponseHandler: createJsonErrorResponseHandler({
967
987
  errorSchema: BedrockErrorSchema,