@ai-sdk/amazon-bedrock 2.0.6 → 2.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -8,6 +8,7 @@ import {
8
8
 
9
9
  // src/bedrock-chat-language-model.ts
10
10
  import {
11
+ InvalidArgumentError,
11
12
  UnsupportedFunctionalityError as UnsupportedFunctionalityError3
12
13
  } from "@ai-sdk/provider";
13
14
  import {
@@ -17,6 +18,7 @@ import {
17
18
  postJsonToApi,
18
19
  resolve
19
20
  } from "@ai-sdk/provider-utils";
21
+ import { z as z2 } from "zod";
20
22
 
21
23
  // src/bedrock-api-types.ts
22
24
  var BEDROCK_CACHE_POINT = {
@@ -323,11 +325,45 @@ function convertToBedrockChatMessages(prompt) {
323
325
  // trim the last text part if it's the last message in the block
324
326
  // because Bedrock does not allow trailing whitespace
325
327
  // in pre-filled assistant responses
326
- isLastBlock && isLastMessage && isLastContentPart ? part.text.trim() : part.text
328
+ trimIfLast(
329
+ isLastBlock,
330
+ isLastMessage,
331
+ isLastContentPart,
332
+ part.text
333
+ )
327
334
  )
328
335
  });
329
336
  break;
330
337
  }
338
+ case "reasoning": {
339
+ bedrockContent.push({
340
+ reasoningContent: {
341
+ reasoningText: {
342
+ // trim the last text part if it's the last message in the block
343
+ // because Bedrock does not allow trailing whitespace
344
+ // in pre-filled assistant responses
345
+ text: trimIfLast(
346
+ isLastBlock,
347
+ isLastMessage,
348
+ isLastContentPart,
349
+ part.text
350
+ ),
351
+ signature: part.signature
352
+ }
353
+ }
354
+ });
355
+ break;
356
+ }
357
+ case "redacted-reasoning": {
358
+ bedrockContent.push({
359
+ reasoningContent: {
360
+ redactedReasoning: {
361
+ data: part.data
362
+ }
363
+ }
364
+ });
365
+ break;
366
+ }
331
367
  case "tool-call": {
332
368
  bedrockContent.push({
333
369
  toolUse: {
@@ -355,6 +391,9 @@ function convertToBedrockChatMessages(prompt) {
355
391
  }
356
392
  return { system, messages };
357
393
  }
394
+ function trimIfLast(isLastBlock, isLastMessage, isLastContentPart, text) {
395
+ return isLastBlock && isLastMessage && isLastContentPart ? text.trim() : text;
396
+ }
358
397
  function groupIntoBlocks(prompt) {
359
398
  const blocks = [];
360
399
  let currentBlock = void 0;
@@ -421,7 +460,6 @@ function mapBedrockFinishReason(finishReason) {
421
460
  }
422
461
 
423
462
  // src/bedrock-chat-language-model.ts
424
- import { z as z2 } from "zod";
425
463
  var BedrockChatLanguageModel = class {
426
464
  constructor(modelId, settings, config) {
427
465
  this.modelId = modelId;
@@ -444,10 +482,9 @@ var BedrockChatLanguageModel = class {
444
482
  stopSequences,
445
483
  responseFormat,
446
484
  seed,
447
- providerMetadata,
448
- headers
485
+ providerMetadata
449
486
  }) {
450
- var _a;
487
+ var _a, _b, _c, _d, _e, _f, _g;
451
488
  const type = mode.type;
452
489
  const warnings = [];
453
490
  if (frequencyPenalty != null) {
@@ -482,12 +519,54 @@ var BedrockChatLanguageModel = class {
482
519
  });
483
520
  }
484
521
  const { system, messages } = convertToBedrockChatMessages(prompt);
522
+ const reasoningConfigOptions = BedrockReasoningConfigOptionsSchema.safeParse(
523
+ (_a = providerMetadata == null ? void 0 : providerMetadata.bedrock) == null ? void 0 : _a.reasoning_config
524
+ );
525
+ if (!reasoningConfigOptions.success) {
526
+ throw new InvalidArgumentError({
527
+ argument: "providerOptions.bedrock.reasoning_config",
528
+ message: "invalid reasoning configuration options",
529
+ cause: reasoningConfigOptions.error
530
+ });
531
+ }
532
+ const isThinking = ((_b = reasoningConfigOptions.data) == null ? void 0 : _b.type) === "enabled";
533
+ const thinkingBudget = (_e = (_c = reasoningConfigOptions.data) == null ? void 0 : _c.budgetTokens) != null ? _e : (_d = reasoningConfigOptions.data) == null ? void 0 : _d.budget_tokens;
485
534
  const inferenceConfig = {
486
535
  ...maxTokens != null && { maxTokens },
487
536
  ...temperature != null && { temperature },
488
537
  ...topP != null && { topP },
489
538
  ...stopSequences != null && { stopSequences }
490
539
  };
540
+ if (isThinking && thinkingBudget != null) {
541
+ if (inferenceConfig.maxTokens != null) {
542
+ inferenceConfig.maxTokens += thinkingBudget;
543
+ } else {
544
+ inferenceConfig.maxTokens = thinkingBudget + 4096;
545
+ }
546
+ this.settings.additionalModelRequestFields = {
547
+ ...this.settings.additionalModelRequestFields,
548
+ reasoning_config: {
549
+ type: (_f = reasoningConfigOptions.data) == null ? void 0 : _f.type,
550
+ budget_tokens: thinkingBudget
551
+ }
552
+ };
553
+ }
554
+ if (isThinking && inferenceConfig.temperature != null) {
555
+ delete inferenceConfig.temperature;
556
+ warnings.push({
557
+ type: "unsupported-setting",
558
+ setting: "temperature",
559
+ details: "temperature is not supported when thinking is enabled"
560
+ });
561
+ }
562
+ if (isThinking && inferenceConfig.topP != null) {
563
+ delete inferenceConfig.topP;
564
+ warnings.push({
565
+ type: "unsupported-setting",
566
+ setting: "topP",
567
+ details: "topP is not supported when thinking is enabled"
568
+ });
569
+ }
491
570
  const baseArgs = {
492
571
  system,
493
572
  additionalModelRequestFields: this.settings.additionalModelRequestFields,
@@ -503,7 +582,7 @@ var BedrockChatLanguageModel = class {
503
582
  return {
504
583
  command: {
505
584
  ...baseArgs,
506
- ...((_a = toolConfig.tools) == null ? void 0 : _a.length) ? { toolConfig } : {}
585
+ ...((_g = toolConfig.tools) == null ? void 0 : _g.length) ? { toolConfig } : {}
507
586
  },
508
587
  warnings: [...warnings, ...toolWarnings]
509
588
  };
@@ -577,6 +656,25 @@ var BedrockChatLanguageModel = class {
577
656
  }
578
657
  }
579
658
  } : void 0;
659
+ const reasoning = response.output.message.content.filter((content) => content.reasoningContent).map((content) => {
660
+ var _a2;
661
+ if (content.reasoningContent && "reasoningText" in content.reasoningContent) {
662
+ return {
663
+ type: "text",
664
+ text: content.reasoningContent.reasoningText.text,
665
+ ...content.reasoningContent.reasoningText.signature && {
666
+ signature: content.reasoningContent.reasoningText.signature
667
+ }
668
+ };
669
+ } else if (content.reasoningContent && "redactedReasoning" in content.reasoningContent) {
670
+ return {
671
+ type: "redacted",
672
+ data: (_a2 = content.reasoningContent.redactedReasoning.data) != null ? _a2 : ""
673
+ };
674
+ } else {
675
+ return void 0;
676
+ }
677
+ }).filter((item) => item !== void 0);
580
678
  return {
581
679
  text: (_h = (_g = (_f = (_e = response.output) == null ? void 0 : _e.message) == null ? void 0 : _f.content) == null ? void 0 : _g.map((part) => {
582
680
  var _a2;
@@ -601,6 +699,7 @@ var BedrockChatLanguageModel = class {
601
699
  rawCall: { rawPrompt, rawSettings },
602
700
  rawResponse: { headers: responseHeaders },
603
701
  warnings,
702
+ reasoning: reasoning.length > 0 ? reasoning : void 0,
604
703
  ...providerMetadata && { providerMetadata }
605
704
  };
606
705
  }
@@ -634,7 +733,7 @@ var BedrockChatLanguageModel = class {
634
733
  stream: response.pipeThrough(
635
734
  new TransformStream({
636
735
  transform(chunk, controller) {
637
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
736
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
638
737
  function enqueueError(bedrockError) {
639
738
  finishReason = "error";
640
739
  controller.enqueue({ type: "error", error: bedrockError });
@@ -694,8 +793,27 @@ var BedrockChatLanguageModel = class {
694
793
  textDelta: value.contentBlockDelta.delta.text
695
794
  });
696
795
  }
796
+ if (((_l = value.contentBlockDelta) == null ? void 0 : _l.delta) && "reasoningContent" in value.contentBlockDelta.delta && value.contentBlockDelta.delta.reasoningContent) {
797
+ const reasoningContent = value.contentBlockDelta.delta.reasoningContent;
798
+ if ("text" in reasoningContent && reasoningContent.text) {
799
+ controller.enqueue({
800
+ type: "reasoning",
801
+ textDelta: reasoningContent.text
802
+ });
803
+ } else if ("signature" in reasoningContent && reasoningContent.signature) {
804
+ controller.enqueue({
805
+ type: "reasoning-signature",
806
+ signature: reasoningContent.signature
807
+ });
808
+ } else if ("data" in reasoningContent && reasoningContent.data) {
809
+ controller.enqueue({
810
+ type: "redacted-reasoning",
811
+ data: reasoningContent.data
812
+ });
813
+ }
814
+ }
697
815
  const contentBlockStart = value.contentBlockStart;
698
- if (((_l = contentBlockStart == null ? void 0 : contentBlockStart.start) == null ? void 0 : _l.toolUse) != null) {
816
+ if (((_m = contentBlockStart == null ? void 0 : contentBlockStart.start) == null ? void 0 : _m.toolUse) != null) {
699
817
  const toolUse = contentBlockStart.start.toolUse;
700
818
  toolCallContentBlocks[contentBlockStart.contentBlockIndex] = {
701
819
  toolCallId: toolUse.toolUseId,
@@ -706,7 +824,7 @@ var BedrockChatLanguageModel = class {
706
824
  const contentBlockDelta = value.contentBlockDelta;
707
825
  if ((contentBlockDelta == null ? void 0 : contentBlockDelta.delta) && "toolUse" in contentBlockDelta.delta && contentBlockDelta.delta.toolUse) {
708
826
  const contentBlock = toolCallContentBlocks[contentBlockDelta.contentBlockIndex];
709
- const delta = (_m = contentBlockDelta.delta.toolUse.input) != null ? _m : "";
827
+ const delta = (_n = contentBlockDelta.delta.toolUse.input) != null ? _n : "";
710
828
  controller.enqueue({
711
829
  type: "tool-call-delta",
712
830
  toolCallType: "function",
@@ -752,6 +870,11 @@ var BedrockChatLanguageModel = class {
752
870
  return `${this.config.baseUrl()}/model/${encodedModelId}`;
753
871
  }
754
872
  };
873
+ var BedrockReasoningConfigOptionsSchema = z2.object({
874
+ type: z2.union([z2.literal("enabled"), z2.literal("disabled")]).nullish(),
875
+ budget_tokens: z2.number().nullish(),
876
+ budgetTokens: z2.number().nullish()
877
+ }).nullish();
755
878
  var BedrockStopReasonSchema = z2.union([
756
879
  z2.enum(BEDROCK_STOP_REASONS),
757
880
  z2.string()
@@ -761,6 +884,13 @@ var BedrockToolUseSchema = z2.object({
761
884
  name: z2.string(),
762
885
  input: z2.unknown()
763
886
  });
887
+ var BedrockReasoningTextSchema = z2.object({
888
+ signature: z2.string().nullish(),
889
+ text: z2.string()
890
+ });
891
+ var BedrockRedactedReasoningSchema = z2.object({
892
+ data: z2.string()
893
+ });
764
894
  var BedrockResponseSchema = z2.object({
765
895
  metrics: z2.object({
766
896
  latencyMs: z2.number()
@@ -770,7 +900,15 @@ var BedrockResponseSchema = z2.object({
770
900
  content: z2.array(
771
901
  z2.object({
772
902
  text: z2.string().nullish(),
773
- toolUse: BedrockToolUseSchema.nullish()
903
+ toolUse: BedrockToolUseSchema.nullish(),
904
+ reasoningContent: z2.union([
905
+ z2.object({
906
+ reasoningText: BedrockReasoningTextSchema
907
+ }),
908
+ z2.object({
909
+ redactedReasoning: BedrockRedactedReasoningSchema
910
+ })
911
+ ]).nullish()
774
912
  })
775
913
  ),
776
914
  role: z2.string()
@@ -791,7 +929,18 @@ var BedrockStreamSchema = z2.object({
791
929
  contentBlockIndex: z2.number(),
792
930
  delta: z2.union([
793
931
  z2.object({ text: z2.string() }),
794
- z2.object({ toolUse: z2.object({ input: z2.string() }) })
932
+ z2.object({ toolUse: z2.object({ input: z2.string() }) }),
933
+ z2.object({
934
+ reasoningContent: z2.object({ text: z2.string() })
935
+ }),
936
+ z2.object({
937
+ reasoningContent: z2.object({
938
+ signature: z2.string()
939
+ })
940
+ }),
941
+ z2.object({
942
+ reasoningContent: z2.object({ data: z2.string() })
943
+ })
795
944
  ]).nullish()
796
945
  }).nullish(),
797
946
  contentBlockStart: z2.object({