@ai-sdk/amazon-bedrock 3.1.0-beta.9 → 4.0.0-beta.100

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1,4 +1,5 @@
1
1
  // src/bedrock-provider.ts
2
+ import { anthropicTools as anthropicTools2 } from "@ai-sdk/anthropic/internal";
2
3
  import {
3
4
  generateId,
4
5
  loadOptionalSetting,
@@ -7,12 +8,6 @@ import {
7
8
  withUserAgentSuffix as withUserAgentSuffix2
8
9
  } from "@ai-sdk/provider-utils";
9
10
 
10
- // src/version.ts
11
- var VERSION = true ? "3.1.0-beta.9" : "0.0.0-test";
12
-
13
- // src/bedrock-provider.ts
14
- import { anthropicTools as anthropicTools2 } from "@ai-sdk/anthropic/internal";
15
-
16
11
  // src/bedrock-chat-language-model.ts
17
12
  import {
18
13
  combineHeaders,
@@ -60,6 +55,18 @@ var BEDROCK_DOCUMENT_MIME_TYPES = {
60
55
 
61
56
  // src/bedrock-chat-options.ts
62
57
  import { z } from "zod/v4";
58
+ var bedrockFilePartProviderOptions = z.object({
59
+ /**
60
+ * Citation configuration for this document.
61
+ * When enabled, this document will generate citations in the response.
62
+ */
63
+ citations: z.object({
64
+ /**
65
+ * Enable citations for this document
66
+ */
67
+ enabled: z.boolean()
68
+ }).optional()
69
+ });
63
70
  var bedrockProviderOptions = z.object({
64
71
  /**
65
72
  * Additional inference parameters that the model supports,
@@ -69,8 +76,13 @@ var bedrockProviderOptions = z.object({
69
76
  additionalModelRequestFields: z.record(z.string(), z.any()).optional(),
70
77
  reasoningConfig: z.object({
71
78
  type: z.union([z.literal("enabled"), z.literal("disabled")]).optional(),
72
- budgetTokens: z.number().optional()
73
- }).optional()
79
+ budgetTokens: z.number().optional(),
80
+ maxReasoningEffort: z.enum(["low", "medium", "high"]).optional()
81
+ }).optional(),
82
+ /**
83
+ * Anthropic beta features to enable
84
+ */
85
+ anthropicBeta: z.array(z.string()).optional()
74
86
  });
75
87
 
76
88
  // src/bedrock-error.ts
@@ -80,6 +92,45 @@ var BedrockErrorSchema = z2.object({
80
92
  type: z2.string().nullish()
81
93
  });
82
94
 
95
+ // src/convert-bedrock-usage.ts
96
+ function convertBedrockUsage(usage) {
97
+ var _a, _b;
98
+ if (usage == null) {
99
+ return {
100
+ inputTokens: {
101
+ total: void 0,
102
+ noCache: void 0,
103
+ cacheRead: void 0,
104
+ cacheWrite: void 0
105
+ },
106
+ outputTokens: {
107
+ total: void 0,
108
+ text: void 0,
109
+ reasoning: void 0
110
+ },
111
+ raw: void 0
112
+ };
113
+ }
114
+ const inputTokens = usage.inputTokens;
115
+ const outputTokens = usage.outputTokens;
116
+ const cacheReadTokens = (_a = usage.cacheReadInputTokens) != null ? _a : 0;
117
+ const cacheWriteTokens = (_b = usage.cacheWriteInputTokens) != null ? _b : 0;
118
+ return {
119
+ inputTokens: {
120
+ total: inputTokens,
121
+ noCache: inputTokens - cacheReadTokens,
122
+ cacheRead: cacheReadTokens,
123
+ cacheWrite: cacheWriteTokens
124
+ },
125
+ outputTokens: {
126
+ total: outputTokens,
127
+ text: outputTokens,
128
+ reasoning: void 0
129
+ },
130
+ raw: usage
131
+ };
132
+ }
133
+
83
134
  // src/bedrock-event-stream-response-handler.ts
84
135
  import { EmptyResponseBodyError } from "@ai-sdk/provider";
85
136
  import {
@@ -164,11 +215,12 @@ import {
164
215
  anthropicTools,
165
216
  prepareTools as prepareAnthropicTools
166
217
  } from "@ai-sdk/anthropic/internal";
167
- function prepareTools({
218
+ async function prepareTools({
168
219
  tools,
169
220
  toolChoice,
170
221
  modelId
171
222
  }) {
223
+ var _a;
172
224
  const toolWarnings = [];
173
225
  const betas = /* @__PURE__ */ new Set();
174
226
  if (tools == null || tools.length === 0) {
@@ -180,10 +232,10 @@ function prepareTools({
180
232
  };
181
233
  }
182
234
  const supportedTools = tools.filter((tool) => {
183
- if (tool.type === "provider-defined" && tool.id === "anthropic.web_search_20250305") {
235
+ if (tool.type === "provider" && tool.id === "anthropic.web_search_20250305") {
184
236
  toolWarnings.push({
185
- type: "unsupported-tool",
186
- tool,
237
+ type: "unsupported",
238
+ feature: "web_search_20250305 tool",
187
239
  details: "The web_search_20250305 tool is not supported on Amazon Bedrock."
188
240
  });
189
241
  return false;
@@ -199,18 +251,16 @@ function prepareTools({
199
251
  };
200
252
  }
201
253
  const isAnthropicModel = modelId.includes("anthropic.");
202
- const providerDefinedTools = supportedTools.filter(
203
- (t) => t.type === "provider-defined"
204
- );
254
+ const ProviderTools = supportedTools.filter((t) => t.type === "provider");
205
255
  const functionTools = supportedTools.filter((t) => t.type === "function");
206
256
  let additionalTools = void 0;
207
257
  const bedrockTools = [];
208
- const usingAnthropicTools = isAnthropicModel && providerDefinedTools.length > 0;
258
+ const usingAnthropicTools = isAnthropicModel && ProviderTools.length > 0;
209
259
  if (usingAnthropicTools) {
210
260
  if (functionTools.length > 0) {
211
261
  toolWarnings.push({
212
- type: "unsupported-setting",
213
- setting: "tools",
262
+ type: "unsupported",
263
+ feature: "mixing Anthropic provider-defined tools and standard function tools",
214
264
  details: "Mixed Anthropic provider-defined tools and standard function tools are not supported in a single call to Bedrock. Only Anthropic tools will be used."
215
265
  });
216
266
  }
@@ -218,9 +268,10 @@ function prepareTools({
218
268
  toolChoice: preparedAnthropicToolChoice,
219
269
  toolWarnings: anthropicToolWarnings,
220
270
  betas: anthropicBetas
221
- } = prepareAnthropicTools({
222
- tools: providerDefinedTools,
223
- toolChoice
271
+ } = await prepareAnthropicTools({
272
+ tools: ProviderTools,
273
+ toolChoice,
274
+ supportsStructuredOutput: false
224
275
  });
225
276
  toolWarnings.push(...anthropicToolWarnings);
226
277
  anthropicBetas.forEach((beta) => betas.add(beta));
@@ -229,7 +280,7 @@ function prepareTools({
229
280
  tool_choice: preparedAnthropicToolChoice
230
281
  };
231
282
  }
232
- for (const tool of providerDefinedTools) {
283
+ for (const tool of ProviderTools) {
233
284
  const toolFactory = Object.values(anthropicTools).find((factory) => {
234
285
  const instance = factory({});
235
286
  return instance.id === tool.id;
@@ -240,24 +291,24 @@ function prepareTools({
240
291
  toolSpec: {
241
292
  name: tool.name,
242
293
  inputSchema: {
243
- json: asSchema(fullToolDefinition.inputSchema).jsonSchema
294
+ json: await asSchema(fullToolDefinition.inputSchema).jsonSchema
244
295
  }
245
296
  }
246
297
  });
247
298
  } else {
248
- toolWarnings.push({ type: "unsupported-tool", tool });
299
+ toolWarnings.push({ type: "unsupported", feature: "tool ${tool.id}" });
249
300
  }
250
301
  }
251
302
  } else {
252
- for (const tool of providerDefinedTools) {
253
- toolWarnings.push({ type: "unsupported-tool", tool });
303
+ for (const tool of ProviderTools) {
304
+ toolWarnings.push({ type: "unsupported", feature: `tool ${tool.id}` });
254
305
  }
255
306
  }
256
307
  for (const tool of functionTools) {
257
308
  bedrockTools.push({
258
309
  toolSpec: {
259
310
  name: tool.name,
260
- description: tool.description,
311
+ ...((_a = tool.description) == null ? void 0 : _a.trim()) !== "" ? { description: tool.description } : {},
261
312
  inputSchema: {
262
313
  json: tool.inputSchema
263
314
  }
@@ -307,7 +358,17 @@ function getCachePoint(providerMetadata) {
307
358
  var _a;
308
359
  return (_a = providerMetadata == null ? void 0 : providerMetadata.bedrock) == null ? void 0 : _a.cachePoint;
309
360
  }
361
+ async function shouldEnableCitations(providerMetadata) {
362
+ var _a, _b;
363
+ const bedrockOptions = await parseProviderOptions({
364
+ provider: "bedrock",
365
+ providerOptions: providerMetadata,
366
+ schema: bedrockFilePartProviderOptions
367
+ });
368
+ return (_b = (_a = bedrockOptions == null ? void 0 : bedrockOptions.citations) == null ? void 0 : _a.enabled) != null ? _b : false;
369
+ }
310
370
  async function convertToBedrockChatMessages(prompt) {
371
+ var _a, _b;
311
372
  const blocks = groupIntoBlocks(prompt);
312
373
  let system = [];
313
374
  const messages = [];
@@ -367,11 +428,17 @@ async function convertToBedrockChatMessages(prompt) {
367
428
  message: "File mime type is required in user message part content"
368
429
  });
369
430
  }
431
+ const enableCitations = await shouldEnableCitations(
432
+ part.providerOptions
433
+ );
370
434
  bedrockContent.push({
371
435
  document: {
372
436
  format: getBedrockDocumentFormat(part.mediaType),
373
- name: generateDocumentName(),
374
- source: { bytes: convertToBase64(part.data) }
437
+ name: (_a = part.filename) != null ? _a : generateDocumentName(),
438
+ source: { bytes: convertToBase64(part.data) },
439
+ ...enableCitations && {
440
+ citations: { enabled: true }
441
+ }
375
442
  }
376
443
  });
377
444
  }
@@ -391,7 +458,7 @@ async function convertToBedrockChatMessages(prompt) {
391
458
  switch (contentPart.type) {
392
459
  case "text":
393
460
  return { text: contentPart.text };
394
- case "media":
461
+ case "image-data":
395
462
  if (!contentPart.mediaType.startsWith("image/")) {
396
463
  throw new UnsupportedFunctionalityError2({
397
464
  functionality: `media type: ${contentPart.mediaType}`
@@ -406,6 +473,11 @@ async function convertToBedrockChatMessages(prompt) {
406
473
  source: { bytes: contentPart.data }
407
474
  }
408
475
  };
476
+ default: {
477
+ throw new UnsupportedFunctionalityError2({
478
+ functionality: `unsupported tool content part type: ${contentPart.type}`
479
+ });
480
+ }
409
481
  }
410
482
  });
411
483
  break;
@@ -414,6 +486,11 @@ async function convertToBedrockChatMessages(prompt) {
414
486
  case "error-text":
415
487
  toolResultContent = [{ text: output.value }];
416
488
  break;
489
+ case "execution-denied":
490
+ toolResultContent = [
491
+ { text: (_b = output.reason) != null ? _b : "Tool execution denied." }
492
+ ];
493
+ break;
417
494
  case "json":
418
495
  case "error-json":
419
496
  default:
@@ -612,7 +689,7 @@ function groupIntoBlocks(prompt) {
612
689
  }
613
690
 
614
691
  // src/map-bedrock-finish-reason.ts
615
- function mapBedrockFinishReason(finishReason) {
692
+ function mapBedrockFinishReason(finishReason, isJsonResponseFromTool) {
616
693
  switch (finishReason) {
617
694
  case "stop_sequence":
618
695
  case "end_turn":
@@ -623,7 +700,7 @@ function mapBedrockFinishReason(finishReason) {
623
700
  case "guardrail_intervened":
624
701
  return "content-filter";
625
702
  case "tool_use":
626
- return "tool-calls";
703
+ return isJsonResponseFromTool ? "stop" : "tool-calls";
627
704
  default:
628
705
  return "unknown";
629
706
  }
@@ -655,7 +732,7 @@ var BedrockChatLanguageModel = class {
655
732
  toolChoice,
656
733
  providerOptions
657
734
  }) {
658
- var _a, _b, _c, _d, _e, _f;
735
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
659
736
  const bedrockOptions = (_a = await parseProviderOptions2({
660
737
  provider: "bedrock",
661
738
  providerOptions,
@@ -664,46 +741,53 @@ var BedrockChatLanguageModel = class {
664
741
  const warnings = [];
665
742
  if (frequencyPenalty != null) {
666
743
  warnings.push({
667
- type: "unsupported-setting",
668
- setting: "frequencyPenalty"
744
+ type: "unsupported",
745
+ feature: "frequencyPenalty"
669
746
  });
670
747
  }
671
748
  if (presencePenalty != null) {
672
749
  warnings.push({
673
- type: "unsupported-setting",
674
- setting: "presencePenalty"
750
+ type: "unsupported",
751
+ feature: "presencePenalty"
675
752
  });
676
753
  }
677
754
  if (seed != null) {
678
755
  warnings.push({
679
- type: "unsupported-setting",
680
- setting: "seed"
756
+ type: "unsupported",
757
+ feature: "seed"
758
+ });
759
+ }
760
+ if (temperature != null && temperature > 1) {
761
+ warnings.push({
762
+ type: "unsupported",
763
+ feature: "temperature",
764
+ details: `${temperature} exceeds bedrock maximum of 1.0. clamped to 1.0`
765
+ });
766
+ temperature = 1;
767
+ } else if (temperature != null && temperature < 0) {
768
+ warnings.push({
769
+ type: "unsupported",
770
+ feature: "temperature",
771
+ details: `${temperature} is below bedrock minimum of 0. clamped to 0`
681
772
  });
773
+ temperature = 0;
682
774
  }
683
775
  if (responseFormat != null && responseFormat.type !== "text" && responseFormat.type !== "json") {
684
776
  warnings.push({
685
- type: "unsupported-setting",
686
- setting: "responseFormat",
777
+ type: "unsupported",
778
+ feature: "responseFormat",
687
779
  details: "Only text and json response formats are supported."
688
780
  });
689
781
  }
690
- if (tools != null && (responseFormat == null ? void 0 : responseFormat.type) === "json") {
691
- if (tools.length > 0) {
692
- warnings.push({
693
- type: "other",
694
- message: "JSON response format does not support tools. The provided tools are ignored."
695
- });
696
- }
697
- }
698
782
  const jsonResponseTool = (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null ? {
699
783
  type: "function",
700
784
  name: "json",
701
785
  description: "Respond with a JSON object.",
702
786
  inputSchema: responseFormat.schema
703
787
  } : void 0;
704
- const { toolConfig, additionalTools, toolWarnings, betas } = prepareTools({
705
- tools: jsonResponseTool ? [jsonResponseTool, ...tools != null ? tools : []] : tools,
706
- toolChoice: jsonResponseTool != null ? { type: "tool", toolName: jsonResponseTool.name } : toolChoice,
788
+ const { toolConfig, additionalTools, toolWarnings, betas } = await prepareTools({
789
+ tools: jsonResponseTool ? [...tools != null ? tools : [], jsonResponseTool] : tools,
790
+ toolChoice: jsonResponseTool != null ? { type: "required" } : toolChoice,
707
791
  modelId: this.modelId
708
792
  });
709
793
  warnings.push(...toolWarnings);
@@ -713,8 +797,18 @@ var BedrockChatLanguageModel = class {
713
797
  ...additionalTools
714
798
  };
715
799
  }
716
- const isThinking = ((_b = bedrockOptions.reasoningConfig) == null ? void 0 : _b.type) === "enabled";
717
- const thinkingBudget = (_c = bedrockOptions.reasoningConfig) == null ? void 0 : _c.budgetTokens;
800
+ if (betas.size > 0 || bedrockOptions.anthropicBeta) {
801
+ const existingBetas = (_b = bedrockOptions.anthropicBeta) != null ? _b : [];
802
+ const mergedBetas = betas.size > 0 ? [...existingBetas, ...Array.from(betas)] : existingBetas;
803
+ bedrockOptions.additionalModelRequestFields = {
804
+ ...bedrockOptions.additionalModelRequestFields,
805
+ anthropic_beta: mergedBetas
806
+ };
807
+ }
808
+ const isAnthropicModel = this.modelId.includes("anthropic");
809
+ const isThinkingRequested = ((_c = bedrockOptions.reasoningConfig) == null ? void 0 : _c.type) === "enabled";
810
+ const thinkingBudget = (_d = bedrockOptions.reasoningConfig) == null ? void 0 : _d.budgetTokens;
811
+ const isAnthropicThinkingEnabled = isAnthropicModel && isThinkingRequested;
718
812
  const inferenceConfig = {
719
813
  ...maxOutputTokens != null && { maxTokens: maxOutputTokens },
720
814
  ...temperature != null && { temperature },
@@ -722,7 +816,7 @@ var BedrockChatLanguageModel = class {
722
816
  ...topK != null && { topK },
723
817
  ...stopSequences != null && { stopSequences }
724
818
  };
725
- if (isThinking && thinkingBudget != null) {
819
+ if (isAnthropicThinkingEnabled && thinkingBudget != null) {
726
820
  if (inferenceConfig.maxTokens != null) {
727
821
  inferenceConfig.maxTokens += thinkingBudget;
728
822
  } else {
@@ -731,36 +825,60 @@ var BedrockChatLanguageModel = class {
731
825
  bedrockOptions.additionalModelRequestFields = {
732
826
  ...bedrockOptions.additionalModelRequestFields,
733
827
  thinking: {
734
- type: (_d = bedrockOptions.reasoningConfig) == null ? void 0 : _d.type,
828
+ type: (_e = bedrockOptions.reasoningConfig) == null ? void 0 : _e.type,
735
829
  budget_tokens: thinkingBudget
736
830
  }
737
831
  };
832
+ } else if (!isAnthropicModel && thinkingBudget != null) {
833
+ warnings.push({
834
+ type: "unsupported",
835
+ feature: "budgetTokens",
836
+ details: "budgetTokens applies only to Anthropic models on Bedrock and will be ignored for this model."
837
+ });
838
+ }
839
+ const maxReasoningEffort = (_f = bedrockOptions.reasoningConfig) == null ? void 0 : _f.maxReasoningEffort;
840
+ if (maxReasoningEffort != null && !isAnthropicModel) {
841
+ bedrockOptions.additionalModelRequestFields = {
842
+ ...bedrockOptions.additionalModelRequestFields,
843
+ reasoningConfig: {
844
+ ...((_g = bedrockOptions.reasoningConfig) == null ? void 0 : _g.type) != null && {
845
+ type: bedrockOptions.reasoningConfig.type
846
+ },
847
+ maxReasoningEffort
848
+ }
849
+ };
850
+ } else if (maxReasoningEffort != null && isAnthropicModel) {
851
+ warnings.push({
852
+ type: "unsupported",
853
+ feature: "maxReasoningEffort",
854
+ details: "maxReasoningEffort applies only to Amazon Nova models on Bedrock and will be ignored for this model."
855
+ });
738
856
  }
739
- if (isThinking && inferenceConfig.temperature != null) {
857
+ if (isAnthropicThinkingEnabled && inferenceConfig.temperature != null) {
740
858
  delete inferenceConfig.temperature;
741
859
  warnings.push({
742
- type: "unsupported-setting",
743
- setting: "temperature",
860
+ type: "unsupported",
861
+ feature: "temperature",
744
862
  details: "temperature is not supported when thinking is enabled"
745
863
  });
746
864
  }
747
- if (isThinking && inferenceConfig.topP != null) {
865
+ if (isAnthropicThinkingEnabled && inferenceConfig.topP != null) {
748
866
  delete inferenceConfig.topP;
749
867
  warnings.push({
750
- type: "unsupported-setting",
751
- setting: "topP",
868
+ type: "unsupported",
869
+ feature: "topP",
752
870
  details: "topP is not supported when thinking is enabled"
753
871
  });
754
872
  }
755
- if (isThinking && inferenceConfig.topK != null) {
873
+ if (isAnthropicThinkingEnabled && inferenceConfig.topK != null) {
756
874
  delete inferenceConfig.topK;
757
875
  warnings.push({
758
- type: "unsupported-setting",
759
- setting: "topK",
876
+ type: "unsupported",
877
+ feature: "topK",
760
878
  details: "topK is not supported when thinking is enabled"
761
879
  });
762
880
  }
763
- const hasAnyTools = ((_f = (_e = toolConfig.tools) == null ? void 0 : _e.length) != null ? _f : 0) > 0 || additionalTools;
881
+ const hasAnyTools = ((_i = (_h = toolConfig.tools) == null ? void 0 : _h.length) != null ? _i : 0) > 0 || additionalTools;
764
882
  let filteredPrompt = prompt;
765
883
  if (!hasAnyTools) {
766
884
  const hasToolContent = prompt.some(
@@ -780,14 +898,18 @@ var BedrockChatLanguageModel = class {
780
898
  (message) => message.role === "system" || message.content.length > 0
781
899
  );
782
900
  warnings.push({
783
- type: "unsupported-setting",
784
- setting: "toolContent",
901
+ type: "unsupported",
902
+ feature: "toolContent",
785
903
  details: "Tool calls and results removed from conversation because Bedrock does not support tool content without active tools."
786
904
  });
787
905
  }
788
906
  }
789
907
  const { system, messages } = await convertToBedrockChatMessages(filteredPrompt);
790
- const { reasoningConfig: _, ...filteredBedrockOptions } = (providerOptions == null ? void 0 : providerOptions.bedrock) || {};
908
+ const {
909
+ reasoningConfig: _,
910
+ additionalModelRequestFields: __,
911
+ ...filteredBedrockOptions
912
+ } = (providerOptions == null ? void 0 : providerOptions.bedrock) || {};
791
913
  return {
792
914
  command: {
793
915
  system,
@@ -805,27 +927,21 @@ var BedrockChatLanguageModel = class {
805
927
  };
806
928
  }
807
929
  async getHeaders({
808
- betas,
809
930
  headers
810
931
  }) {
811
- return combineHeaders(
812
- await resolve(this.config.headers),
813
- betas.size > 0 ? { "anthropic-beta": Array.from(betas).join(",") } : {},
814
- headers
815
- );
932
+ return combineHeaders(await resolve(this.config.headers), headers);
816
933
  }
817
934
  async doGenerate(options) {
818
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
935
+ var _a, _b, _c, _d, _e, _f, _g, _h;
819
936
  const {
820
937
  command: args,
821
938
  warnings,
822
- usesJsonResponseTool,
823
- betas
939
+ usesJsonResponseTool
824
940
  } = await this.getArgs(options);
825
941
  const url = `${this.getUrl(this.modelId)}/converse`;
826
942
  const { value: response, responseHeaders } = await postJsonToApi({
827
943
  url,
828
- headers: await this.getHeaders({ betas, headers: options.headers }),
944
+ headers: await this.getHeaders({ headers: options.headers }),
829
945
  body: args,
830
946
  failedResponseHandler: createJsonErrorResponseHandler({
831
947
  errorSchema: BedrockErrorSchema,
@@ -841,11 +957,10 @@ var BedrockChatLanguageModel = class {
841
957
  fetch: this.config.fetch
842
958
  });
843
959
  const content = [];
960
+ let isJsonResponseFromTool = false;
844
961
  for (const part of response.output.message.content) {
845
962
  if (part.text) {
846
- if (!usesJsonResponseTool) {
847
- content.push({ type: "text", text: part.text });
848
- }
963
+ content.push({ type: "text", text: part.text });
849
964
  }
850
965
  if (part.reasoningContent) {
851
966
  if ("reasoningText" in part.reasoningContent) {
@@ -874,21 +989,24 @@ var BedrockChatLanguageModel = class {
874
989
  }
875
990
  }
876
991
  if (part.toolUse) {
877
- content.push(
878
- // when a json response tool is used, the tool call becomes the text:
879
- usesJsonResponseTool ? {
992
+ const isJsonResponseTool = usesJsonResponseTool && part.toolUse.name === "json";
993
+ if (isJsonResponseTool) {
994
+ isJsonResponseFromTool = true;
995
+ content.push({
880
996
  type: "text",
881
997
  text: JSON.stringify(part.toolUse.input)
882
- } : {
998
+ });
999
+ } else {
1000
+ content.push({
883
1001
  type: "tool-call",
884
1002
  toolCallId: (_c = (_b = part.toolUse) == null ? void 0 : _b.toolUseId) != null ? _c : this.config.generateId(),
885
1003
  toolName: (_e = (_d = part.toolUse) == null ? void 0 : _d.name) != null ? _e : `tool-${this.config.generateId()}`,
886
- input: JSON.stringify((_g = (_f = part.toolUse) == null ? void 0 : _f.input) != null ? _g : "")
887
- }
888
- );
1004
+ input: JSON.stringify((_g = (_f = part.toolUse) == null ? void 0 : _f.input) != null ? _g : {})
1005
+ });
1006
+ }
889
1007
  }
890
1008
  }
891
- const providerMetadata = response.trace || response.usage || usesJsonResponseTool ? {
1009
+ const providerMetadata = response.trace || response.usage || isJsonResponseFromTool ? {
892
1010
  bedrock: {
893
1011
  ...response.trace && typeof response.trace === "object" ? { trace: response.trace } : {},
894
1012
  ...((_h = response.usage) == null ? void 0 : _h.cacheWriteInputTokens) != null && {
@@ -896,20 +1014,16 @@ var BedrockChatLanguageModel = class {
896
1014
  cacheWriteInputTokens: response.usage.cacheWriteInputTokens
897
1015
  }
898
1016
  },
899
- ...usesJsonResponseTool && { isJsonResponseFromTool: true }
1017
+ ...isJsonResponseFromTool && { isJsonResponseFromTool: true }
900
1018
  }
901
1019
  } : void 0;
902
1020
  return {
903
1021
  content,
904
1022
  finishReason: mapBedrockFinishReason(
905
- response.stopReason
1023
+ response.stopReason,
1024
+ isJsonResponseFromTool
906
1025
  ),
907
- usage: {
908
- inputTokens: (_i = response.usage) == null ? void 0 : _i.inputTokens,
909
- outputTokens: (_j = response.usage) == null ? void 0 : _j.outputTokens,
910
- totalTokens: ((_k = response.usage) == null ? void 0 : _k.inputTokens) + ((_l = response.usage) == null ? void 0 : _l.outputTokens),
911
- cachedInputTokens: (_n = (_m = response.usage) == null ? void 0 : _m.cacheReadInputTokens) != null ? _n : void 0
912
- },
1026
+ usage: convertBedrockUsage(response.usage),
913
1027
  response: {
914
1028
  // TODO add id, timestamp, etc
915
1029
  headers: responseHeaders
@@ -922,13 +1036,12 @@ var BedrockChatLanguageModel = class {
922
1036
  const {
923
1037
  command: args,
924
1038
  warnings,
925
- usesJsonResponseTool,
926
- betas
1039
+ usesJsonResponseTool
927
1040
  } = await this.getArgs(options);
928
1041
  const url = `${this.getUrl(this.modelId)}/converse-stream`;
929
1042
  const { value: response, responseHeaders } = await postJsonToApi({
930
1043
  url,
931
- headers: await this.getHeaders({ betas, headers: options.headers }),
1044
+ headers: await this.getHeaders({ headers: options.headers }),
932
1045
  body: args,
933
1046
  failedResponseHandler: createJsonErrorResponseHandler({
934
1047
  errorSchema: BedrockErrorSchema,
@@ -939,12 +1052,9 @@ var BedrockChatLanguageModel = class {
939
1052
  fetch: this.config.fetch
940
1053
  });
941
1054
  let finishReason = "unknown";
942
- const usage = {
943
- inputTokens: void 0,
944
- outputTokens: void 0,
945
- totalTokens: void 0
946
- };
1055
+ let usage = void 0;
947
1056
  let providerMetadata = void 0;
1057
+ let isJsonResponseFromTool = false;
948
1058
  const contentBlocks = {};
949
1059
  return {
950
1060
  stream: response.pipeThrough(
@@ -953,7 +1063,7 @@ var BedrockChatLanguageModel = class {
953
1063
  controller.enqueue({ type: "stream-start", warnings });
954
1064
  },
955
1065
  transform(chunk, controller) {
956
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q;
1066
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i;
957
1067
  function enqueueError(bedrockError) {
958
1068
  finishReason = "error";
959
1069
  controller.enqueue({ type: "error", error: bedrockError });
@@ -984,15 +1094,15 @@ var BedrockChatLanguageModel = class {
984
1094
  }
985
1095
  if (value.messageStop) {
986
1096
  finishReason = mapBedrockFinishReason(
987
- value.messageStop.stopReason
1097
+ value.messageStop.stopReason,
1098
+ isJsonResponseFromTool
988
1099
  );
989
1100
  }
990
1101
  if (value.metadata) {
991
- usage.inputTokens = (_b = (_a = value.metadata.usage) == null ? void 0 : _a.inputTokens) != null ? _b : usage.inputTokens;
992
- usage.outputTokens = (_d = (_c = value.metadata.usage) == null ? void 0 : _c.outputTokens) != null ? _d : usage.outputTokens;
993
- usage.totalTokens = ((_e = usage.inputTokens) != null ? _e : 0) + ((_f = usage.outputTokens) != null ? _f : 0);
994
- usage.cachedInputTokens = (_h = (_g = value.metadata.usage) == null ? void 0 : _g.cacheReadInputTokens) != null ? _h : usage.cachedInputTokens;
995
- const cacheUsage = ((_i = value.metadata.usage) == null ? void 0 : _i.cacheWriteInputTokens) != null ? {
1102
+ if (value.metadata.usage) {
1103
+ usage = value.metadata.usage;
1104
+ }
1105
+ const cacheUsage = ((_a = value.metadata.usage) == null ? void 0 : _a.cacheWriteInputTokens) != null ? {
996
1106
  usage: {
997
1107
  cacheWriteInputTokens: value.metadata.usage.cacheWriteInputTokens
998
1108
  }
@@ -1000,19 +1110,16 @@ var BedrockChatLanguageModel = class {
1000
1110
  const trace = value.metadata.trace ? {
1001
1111
  trace: value.metadata.trace
1002
1112
  } : void 0;
1003
- if (cacheUsage || trace || usesJsonResponseTool) {
1113
+ if (cacheUsage || trace) {
1004
1114
  providerMetadata = {
1005
1115
  bedrock: {
1006
1116
  ...cacheUsage,
1007
- ...trace,
1008
- ...usesJsonResponseTool && {
1009
- isJsonResponseFromTool: true
1010
- }
1117
+ ...trace
1011
1118
  }
1012
1119
  };
1013
1120
  }
1014
1121
  }
1015
- if (((_j = value.contentBlockStart) == null ? void 0 : _j.contentBlockIndex) != null && !((_l = (_k = value.contentBlockStart) == null ? void 0 : _k.start) == null ? void 0 : _l.toolUse)) {
1122
+ if (((_b = value.contentBlockStart) == null ? void 0 : _b.contentBlockIndex) != null && !((_d = (_c = value.contentBlockStart) == null ? void 0 : _c.start) == null ? void 0 : _d.toolUse)) {
1016
1123
  const blockIndex = value.contentBlockStart.contentBlockIndex;
1017
1124
  contentBlocks[blockIndex] = { type: "text" };
1018
1125
  controller.enqueue({
@@ -1020,26 +1127,22 @@ var BedrockChatLanguageModel = class {
1020
1127
  id: String(blockIndex)
1021
1128
  });
1022
1129
  }
1023
- if (((_m = value.contentBlockDelta) == null ? void 0 : _m.delta) && "text" in value.contentBlockDelta.delta && value.contentBlockDelta.delta.text) {
1130
+ if (((_e = value.contentBlockDelta) == null ? void 0 : _e.delta) && "text" in value.contentBlockDelta.delta && value.contentBlockDelta.delta.text) {
1024
1131
  const blockIndex = value.contentBlockDelta.contentBlockIndex || 0;
1025
1132
  if (contentBlocks[blockIndex] == null) {
1026
1133
  contentBlocks[blockIndex] = { type: "text" };
1027
- if (!usesJsonResponseTool) {
1028
- controller.enqueue({
1029
- type: "text-start",
1030
- id: String(blockIndex)
1031
- });
1032
- }
1033
- }
1034
- if (!usesJsonResponseTool) {
1035
1134
  controller.enqueue({
1036
- type: "text-delta",
1037
- id: String(blockIndex),
1038
- delta: value.contentBlockDelta.delta.text
1135
+ type: "text-start",
1136
+ id: String(blockIndex)
1039
1137
  });
1040
1138
  }
1139
+ controller.enqueue({
1140
+ type: "text-delta",
1141
+ id: String(blockIndex),
1142
+ delta: value.contentBlockDelta.delta.text
1143
+ });
1041
1144
  }
1042
- if (((_n = value.contentBlockStop) == null ? void 0 : _n.contentBlockIndex) != null) {
1145
+ if (((_f = value.contentBlockStop) == null ? void 0 : _f.contentBlockIndex) != null) {
1043
1146
  const blockIndex = value.contentBlockStop.contentBlockIndex;
1044
1147
  const contentBlock = contentBlocks[blockIndex];
1045
1148
  if (contentBlock != null) {
@@ -1049,14 +1152,13 @@ var BedrockChatLanguageModel = class {
1049
1152
  id: String(blockIndex)
1050
1153
  });
1051
1154
  } else if (contentBlock.type === "text") {
1052
- if (!usesJsonResponseTool) {
1053
- controller.enqueue({
1054
- type: "text-end",
1055
- id: String(blockIndex)
1056
- });
1057
- }
1155
+ controller.enqueue({
1156
+ type: "text-end",
1157
+ id: String(blockIndex)
1158
+ });
1058
1159
  } else if (contentBlock.type === "tool-call") {
1059
- if (usesJsonResponseTool) {
1160
+ if (contentBlock.isJsonResponseTool) {
1161
+ isJsonResponseFromTool = true;
1060
1162
  controller.enqueue({
1061
1163
  type: "text-start",
1062
1164
  id: String(blockIndex)
@@ -1079,14 +1181,14 @@ var BedrockChatLanguageModel = class {
1079
1181
  type: "tool-call",
1080
1182
  toolCallId: contentBlock.toolCallId,
1081
1183
  toolName: contentBlock.toolName,
1082
- input: contentBlock.jsonText
1184
+ input: contentBlock.jsonText === "" ? "{}" : contentBlock.jsonText
1083
1185
  });
1084
1186
  }
1085
1187
  }
1086
1188
  delete contentBlocks[blockIndex];
1087
1189
  }
1088
1190
  }
1089
- if (((_o = value.contentBlockDelta) == null ? void 0 : _o.delta) && "reasoningContent" in value.contentBlockDelta.delta && value.contentBlockDelta.delta.reasoningContent) {
1191
+ if (((_g = value.contentBlockDelta) == null ? void 0 : _g.delta) && "reasoningContent" in value.contentBlockDelta.delta && value.contentBlockDelta.delta.reasoningContent) {
1090
1192
  const blockIndex = value.contentBlockDelta.contentBlockIndex || 0;
1091
1193
  const reasoningContent = value.contentBlockDelta.delta.reasoningContent;
1092
1194
  if ("text" in reasoningContent && reasoningContent.text) {
@@ -1127,16 +1229,18 @@ var BedrockChatLanguageModel = class {
1127
1229
  }
1128
1230
  }
1129
1231
  const contentBlockStart = value.contentBlockStart;
1130
- if (((_p = contentBlockStart == null ? void 0 : contentBlockStart.start) == null ? void 0 : _p.toolUse) != null) {
1232
+ if (((_h = contentBlockStart == null ? void 0 : contentBlockStart.start) == null ? void 0 : _h.toolUse) != null) {
1131
1233
  const toolUse = contentBlockStart.start.toolUse;
1132
1234
  const blockIndex = contentBlockStart.contentBlockIndex;
1235
+ const isJsonResponseTool = usesJsonResponseTool && toolUse.name === "json";
1133
1236
  contentBlocks[blockIndex] = {
1134
1237
  type: "tool-call",
1135
1238
  toolCallId: toolUse.toolUseId,
1136
1239
  toolName: toolUse.name,
1137
- jsonText: ""
1240
+ jsonText: "",
1241
+ isJsonResponseTool
1138
1242
  };
1139
- if (!usesJsonResponseTool) {
1243
+ if (!isJsonResponseTool) {
1140
1244
  controller.enqueue({
1141
1245
  type: "tool-input-start",
1142
1246
  id: toolUse.toolUseId,
@@ -1149,8 +1253,8 @@ var BedrockChatLanguageModel = class {
1149
1253
  const blockIndex = contentBlockDelta.contentBlockIndex;
1150
1254
  const contentBlock = contentBlocks[blockIndex];
1151
1255
  if ((contentBlock == null ? void 0 : contentBlock.type) === "tool-call") {
1152
- const delta = (_q = contentBlockDelta.delta.toolUse.input) != null ? _q : "";
1153
- if (!usesJsonResponseTool) {
1256
+ const delta = (_i = contentBlockDelta.delta.toolUse.input) != null ? _i : "";
1257
+ if (!contentBlock.isJsonResponseTool) {
1154
1258
  controller.enqueue({
1155
1259
  type: "tool-input-delta",
1156
1260
  id: contentBlock.toolCallId,
@@ -1162,10 +1266,24 @@ var BedrockChatLanguageModel = class {
1162
1266
  }
1163
1267
  },
1164
1268
  flush(controller) {
1269
+ if (isJsonResponseFromTool) {
1270
+ if (providerMetadata) {
1271
+ providerMetadata.bedrock = {
1272
+ ...providerMetadata.bedrock,
1273
+ isJsonResponseFromTool: true
1274
+ };
1275
+ } else {
1276
+ providerMetadata = {
1277
+ bedrock: {
1278
+ isJsonResponseFromTool: true
1279
+ }
1280
+ };
1281
+ }
1282
+ }
1165
1283
  controller.enqueue({
1166
1284
  type: "finish",
1167
1285
  finishReason,
1168
- usage,
1286
+ usage: convertBedrockUsage(usage),
1169
1287
  ...providerMetadata && { providerMetadata }
1170
1288
  });
1171
1289
  }
@@ -1366,6 +1484,7 @@ var BedrockEmbeddingModel = class {
1366
1484
  abortSignal
1367
1485
  });
1368
1486
  return {
1487
+ warnings: [],
1369
1488
  embeddings: [response.embedding],
1370
1489
  usage: { tokens: response.inputTextTokenCount }
1371
1490
  };
@@ -1442,8 +1561,8 @@ var BedrockImageModel = class {
1442
1561
  };
1443
1562
  if (aspectRatio != void 0) {
1444
1563
  warnings.push({
1445
- type: "unsupported-setting",
1446
- setting: "aspectRatio",
1564
+ type: "unsupported",
1565
+ feature: "aspectRatio",
1447
1566
  details: "This model does not support aspect ratio. Use `size` instead."
1448
1567
  });
1449
1568
  }
@@ -1479,42 +1598,26 @@ var bedrockImageResponseSchema = z6.object({
1479
1598
  images: z6.array(z6.string())
1480
1599
  });
1481
1600
 
1482
- // src/headers-utils.ts
1483
- function extractHeaders(headers) {
1484
- let originalHeaders = {};
1485
- if (headers) {
1486
- if (headers instanceof Headers) {
1487
- originalHeaders = convertHeadersToRecord(headers);
1488
- } else if (Array.isArray(headers)) {
1489
- for (const [k, v] of headers) {
1490
- originalHeaders[k.toLowerCase()] = v;
1491
- }
1492
- } else {
1493
- originalHeaders = Object.fromEntries(
1494
- Object.entries(headers).map(([k, v]) => [k.toLowerCase(), v])
1495
- );
1496
- }
1497
- }
1498
- return originalHeaders;
1499
- }
1500
- function convertHeadersToRecord(headers) {
1501
- return Object.fromEntries([...headers]);
1502
- }
1503
-
1504
1601
  // src/bedrock-sigv4-fetch.ts
1505
1602
  import {
1506
1603
  combineHeaders as combineHeaders4,
1604
+ normalizeHeaders,
1507
1605
  withUserAgentSuffix,
1508
1606
  getRuntimeEnvironmentUserAgent
1509
1607
  } from "@ai-sdk/provider-utils";
1510
1608
  import { AwsV4Signer } from "aws4fetch";
1609
+
1610
+ // src/version.ts
1611
+ var VERSION = true ? "4.0.0-beta.100" : "0.0.0-test";
1612
+
1613
+ // src/bedrock-sigv4-fetch.ts
1511
1614
  function createSigV4FetchFunction(getCredentials, fetch = globalThis.fetch) {
1512
1615
  return async (input, init) => {
1513
1616
  var _a, _b;
1514
1617
  const request = input instanceof Request ? input : void 0;
1515
1618
  const originalHeaders = combineHeaders4(
1516
- extractHeaders(request == null ? void 0 : request.headers),
1517
- extractHeaders(init == null ? void 0 : init.headers)
1619
+ normalizeHeaders(request == null ? void 0 : request.headers),
1620
+ normalizeHeaders(init == null ? void 0 : init.headers)
1518
1621
  );
1519
1622
  const headersWithUserAgent = withUserAgentSuffix(
1520
1623
  originalHeaders,
@@ -1550,7 +1653,7 @@ function createSigV4FetchFunction(getCredentials, fetch = globalThis.fetch) {
1550
1653
  service: "bedrock"
1551
1654
  });
1552
1655
  const signingResult = await signer.sign();
1553
- const signedHeaders = convertHeadersToRecord(signingResult.headers);
1656
+ const signedHeaders = normalizeHeaders(signingResult.headers);
1554
1657
  const combinedHeaders = combineHeaders4(headersWithUserAgent, signedHeaders);
1555
1658
  return fetch(input, {
1556
1659
  ...init,
@@ -1572,7 +1675,7 @@ function prepareBodyString(body) {
1572
1675
  }
1573
1676
  function createApiKeyFetchFunction(apiKey, fetch = globalThis.fetch) {
1574
1677
  return async (input, init) => {
1575
- const originalHeaders = extractHeaders(init == null ? void 0 : init.headers);
1678
+ const originalHeaders = normalizeHeaders(init == null ? void 0 : init.headers);
1576
1679
  const headersWithUserAgent = withUserAgentSuffix(
1577
1680
  originalHeaders,
1578
1681
  `ai-sdk/amazon-bedrock/${VERSION}`,
@@ -1588,6 +1691,130 @@ function createApiKeyFetchFunction(apiKey, fetch = globalThis.fetch) {
1588
1691
  };
1589
1692
  }
1590
1693
 
1694
+ // src/reranking/bedrock-reranking-model.ts
1695
+ import {
1696
+ combineHeaders as combineHeaders5,
1697
+ createJsonErrorResponseHandler as createJsonErrorResponseHandler4,
1698
+ createJsonResponseHandler as createJsonResponseHandler4,
1699
+ parseProviderOptions as parseProviderOptions4,
1700
+ postJsonToApi as postJsonToApi4,
1701
+ resolve as resolve4
1702
+ } from "@ai-sdk/provider-utils";
1703
+
1704
+ // src/reranking/bedrock-reranking-api.ts
1705
+ import { lazySchema, zodSchema } from "@ai-sdk/provider-utils";
1706
+ import { z as z7 } from "zod/v4";
1707
+ var bedrockRerankingResponseSchema = lazySchema(
1708
+ () => zodSchema(
1709
+ z7.object({
1710
+ results: z7.array(
1711
+ z7.object({
1712
+ index: z7.number(),
1713
+ relevanceScore: z7.number()
1714
+ })
1715
+ ),
1716
+ nextToken: z7.string().optional()
1717
+ })
1718
+ )
1719
+ );
1720
+
1721
+ // src/reranking/bedrock-reranking-options.ts
1722
+ import { lazySchema as lazySchema2, zodSchema as zodSchema2 } from "@ai-sdk/provider-utils";
1723
+ import { z as z8 } from "zod/v4";
1724
+ var bedrockRerankingOptionsSchema = lazySchema2(
1725
+ () => zodSchema2(
1726
+ z8.object({
1727
+ /**
1728
+ * If the total number of results was greater than could fit in a response, a token is returned in the nextToken field. You can enter that token in this field to return the next batch of results.
1729
+ */
1730
+ nextToken: z8.string().optional(),
1731
+ /**
1732
+ * Additional model request fields to pass to the model.
1733
+ */
1734
+ additionalModelRequestFields: z8.record(z8.string(), z8.any()).optional()
1735
+ })
1736
+ )
1737
+ );
1738
+
1739
+ // src/reranking/bedrock-reranking-model.ts
1740
+ var BedrockRerankingModel = class {
1741
+ constructor(modelId, config) {
1742
+ this.modelId = modelId;
1743
+ this.config = config;
1744
+ this.specificationVersion = "v3";
1745
+ this.provider = "amazon-bedrock";
1746
+ }
1747
+ async doRerank({
1748
+ documents,
1749
+ headers,
1750
+ query,
1751
+ topN,
1752
+ abortSignal,
1753
+ providerOptions
1754
+ }) {
1755
+ const bedrockOptions = await parseProviderOptions4({
1756
+ provider: "bedrock",
1757
+ providerOptions,
1758
+ schema: bedrockRerankingOptionsSchema
1759
+ });
1760
+ const {
1761
+ value: response,
1762
+ responseHeaders,
1763
+ rawValue
1764
+ } = await postJsonToApi4({
1765
+ url: `${this.config.baseUrl()}/rerank`,
1766
+ headers: await resolve4(
1767
+ combineHeaders5(await resolve4(this.config.headers), headers)
1768
+ ),
1769
+ body: {
1770
+ nextToken: bedrockOptions == null ? void 0 : bedrockOptions.nextToken,
1771
+ queries: [
1772
+ {
1773
+ textQuery: { text: query },
1774
+ type: "TEXT"
1775
+ }
1776
+ ],
1777
+ rerankingConfiguration: {
1778
+ bedrockRerankingConfiguration: {
1779
+ modelConfiguration: {
1780
+ modelArn: `arn:aws:bedrock:${this.config.region}::foundation-model/${this.modelId}`,
1781
+ additionalModelRequestFields: bedrockOptions == null ? void 0 : bedrockOptions.additionalModelRequestFields
1782
+ },
1783
+ numberOfResults: topN
1784
+ },
1785
+ type: "BEDROCK_RERANKING_MODEL"
1786
+ },
1787
+ sources: documents.values.map((value) => ({
1788
+ type: "INLINE",
1789
+ inlineDocumentSource: documents.type === "text" ? {
1790
+ type: "TEXT",
1791
+ textDocument: { text: value }
1792
+ } : {
1793
+ type: "JSON",
1794
+ jsonDocument: value
1795
+ }
1796
+ }))
1797
+ },
1798
+ failedResponseHandler: createJsonErrorResponseHandler4({
1799
+ errorSchema: BedrockErrorSchema,
1800
+ errorToMessage: (error) => `${error.type}: ${error.message}`
1801
+ }),
1802
+ successfulResponseHandler: createJsonResponseHandler4(
1803
+ bedrockRerankingResponseSchema
1804
+ ),
1805
+ fetch: this.config.fetch,
1806
+ abortSignal
1807
+ });
1808
+ return {
1809
+ ranking: response.results,
1810
+ response: {
1811
+ headers: responseHeaders,
1812
+ body: rawValue
1813
+ }
1814
+ };
1815
+ }
1816
+ };
1817
+
1591
1818
  // src/bedrock-provider.ts
1592
1819
  function createAmazonBedrock(options = {}) {
1593
1820
  const rawApiKey = loadOptionalSetting({
@@ -1656,7 +1883,12 @@ Original error: ${errorMessage}`
1656
1883
  throw error;
1657
1884
  }
1658
1885
  }, options.fetch);
1659
- const getBaseUrl = () => {
1886
+ const getHeaders = () => {
1887
+ var _a;
1888
+ const baseHeaders = (_a = options.headers) != null ? _a : {};
1889
+ return withUserAgentSuffix2(baseHeaders, `ai-sdk/amazon-bedrock/${VERSION}`);
1890
+ };
1891
+ const getBedrockRuntimeBaseUrl = () => {
1660
1892
  var _a, _b;
1661
1893
  return (_b = withoutTrailingSlash(
1662
1894
  (_a = options.baseURL) != null ? _a : `https://bedrock-runtime.${loadSetting({
@@ -1667,13 +1899,19 @@ Original error: ${errorMessage}`
1667
1899
  })}.amazonaws.com`
1668
1900
  )) != null ? _b : `https://bedrock-runtime.us-east-1.amazonaws.com`;
1669
1901
  };
1670
- const getHeaders = () => {
1671
- var _a;
1672
- const baseHeaders = (_a = options.headers) != null ? _a : {};
1673
- return withUserAgentSuffix2(baseHeaders, `ai-sdk/amazon-bedrock/${VERSION}`);
1902
+ const getBedrockAgentRuntimeBaseUrl = () => {
1903
+ var _a, _b;
1904
+ return (_b = withoutTrailingSlash(
1905
+ (_a = options.baseURL) != null ? _a : `https://bedrock-agent-runtime.${loadSetting({
1906
+ settingValue: options.region,
1907
+ settingName: "region",
1908
+ environmentVariableName: "AWS_REGION",
1909
+ description: "AWS region"
1910
+ })}.amazonaws.com`
1911
+ )) != null ? _b : `https://bedrock-agent-runtime.us-west-2.amazonaws.com`;
1674
1912
  };
1675
1913
  const createChatModel = (modelId) => new BedrockChatLanguageModel(modelId, {
1676
- baseUrl: getBaseUrl,
1914
+ baseUrl: getBedrockRuntimeBaseUrl,
1677
1915
  headers: getHeaders,
1678
1916
  fetch: fetchFunction,
1679
1917
  generateId
@@ -1687,21 +1925,36 @@ Original error: ${errorMessage}`
1687
1925
  return createChatModel(modelId);
1688
1926
  };
1689
1927
  const createEmbeddingModel = (modelId) => new BedrockEmbeddingModel(modelId, {
1690
- baseUrl: getBaseUrl,
1928
+ baseUrl: getBedrockRuntimeBaseUrl,
1691
1929
  headers: getHeaders,
1692
1930
  fetch: fetchFunction
1693
1931
  });
1694
1932
  const createImageModel = (modelId) => new BedrockImageModel(modelId, {
1695
- baseUrl: getBaseUrl,
1933
+ baseUrl: getBedrockRuntimeBaseUrl,
1934
+ headers: getHeaders,
1935
+ fetch: fetchFunction
1936
+ });
1937
+ const createRerankingModel = (modelId) => new BedrockRerankingModel(modelId, {
1938
+ baseUrl: getBedrockAgentRuntimeBaseUrl,
1939
+ region: loadSetting({
1940
+ settingValue: options.region,
1941
+ settingName: "region",
1942
+ environmentVariableName: "AWS_REGION",
1943
+ description: "AWS region"
1944
+ }),
1696
1945
  headers: getHeaders,
1697
1946
  fetch: fetchFunction
1698
1947
  });
1948
+ provider.specificationVersion = "v3";
1699
1949
  provider.languageModel = createChatModel;
1700
1950
  provider.embedding = createEmbeddingModel;
1951
+ provider.embeddingModel = createEmbeddingModel;
1701
1952
  provider.textEmbedding = createEmbeddingModel;
1702
1953
  provider.textEmbeddingModel = createEmbeddingModel;
1703
1954
  provider.image = createImageModel;
1704
1955
  provider.imageModel = createImageModel;
1956
+ provider.reranking = createRerankingModel;
1957
+ provider.rerankingModel = createRerankingModel;
1705
1958
  provider.tools = anthropicTools2;
1706
1959
  return provider;
1707
1960
  }