@ai-sdk/anthropic 2.0.0-alpha.7 → 2.0.0-alpha.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,16 @@
1
1
  # @ai-sdk/anthropic
2
2
 
3
+ ## 2.0.0-alpha.8
4
+
5
+ ### Patch Changes
6
+
7
+ - ad66c0e: feat (provider/anthropic): json response schema support via tool calls
8
+ - 075711d: fix (provider/anthropic): return stop finish reason for json output with tool
9
+ - Updated dependencies [4fef487]
10
+ - Updated dependencies [9222aeb]
11
+ - @ai-sdk/provider-utils@3.0.0-alpha.8
12
+ - @ai-sdk/provider@2.0.0-alpha.8
13
+
3
14
  ## 2.0.0-alpha.7
4
15
 
5
16
  ### Patch Changes
package/dist/index.js CHANGED
@@ -468,13 +468,16 @@ function groupIntoBlocks(prompt) {
468
468
  }
469
469
 
470
470
  // src/map-anthropic-stop-reason.ts
471
- function mapAnthropicStopReason(finishReason) {
471
+ function mapAnthropicStopReason({
472
+ finishReason,
473
+ isJsonResponseFromTool
474
+ }) {
472
475
  switch (finishReason) {
473
476
  case "end_turn":
474
477
  case "stop_sequence":
475
478
  return "stop";
476
479
  case "tool_use":
477
- return "tool-calls";
480
+ return isJsonResponseFromTool ? "stop" : "tool-calls";
478
481
  case "max_tokens":
479
482
  return "length";
480
483
  default:
@@ -535,13 +538,27 @@ var AnthropicMessagesLanguageModel = class {
535
538
  setting: "seed"
536
539
  });
537
540
  }
538
- if (responseFormat != null && responseFormat.type !== "text") {
539
- warnings.push({
540
- type: "unsupported-setting",
541
- setting: "responseFormat",
542
- details: "JSON response format is not supported."
543
- });
541
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json") {
542
+ if (responseFormat.schema == null) {
543
+ warnings.push({
544
+ type: "unsupported-setting",
545
+ setting: "responseFormat",
546
+ details: "JSON response format requires a schema. The response format is ignored."
547
+ });
548
+ } else if (tools != null) {
549
+ warnings.push({
550
+ type: "unsupported-setting",
551
+ setting: "tools",
552
+ details: "JSON response format does not support tools. The provided tools are ignored."
553
+ });
554
+ }
544
555
  }
556
+ const jsonResponseTool = (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null ? {
557
+ type: "function",
558
+ name: "json",
559
+ description: "Respond with a JSON object.",
560
+ parameters: responseFormat.schema
561
+ } : void 0;
545
562
  const anthropicOptions = await (0, import_provider_utils3.parseProviderOptions)({
546
563
  provider: "anthropic",
547
564
  providerOptions,
@@ -608,7 +625,12 @@ var AnthropicMessagesLanguageModel = class {
608
625
  toolChoice: anthropicToolChoice,
609
626
  toolWarnings,
610
627
  betas: toolsBetas
611
- } = prepareTools({ tools, toolChoice });
628
+ } = prepareTools(
629
+ jsonResponseTool != null ? {
630
+ tools: [jsonResponseTool],
631
+ toolChoice: { type: "tool", toolName: jsonResponseTool.name }
632
+ } : { tools, toolChoice }
633
+ );
612
634
  return {
613
635
  args: {
614
636
  ...baseArgs,
@@ -616,7 +638,8 @@ var AnthropicMessagesLanguageModel = class {
616
638
  tool_choice: anthropicToolChoice
617
639
  },
618
640
  warnings: [...warnings, ...toolWarnings],
619
- betas: /* @__PURE__ */ new Set([...messagesBetas, ...toolsBetas])
641
+ betas: /* @__PURE__ */ new Set([...messagesBetas, ...toolsBetas]),
642
+ jsonResponseTool
620
643
  };
621
644
  }
622
645
  async getHeaders({
@@ -639,7 +662,7 @@ var AnthropicMessagesLanguageModel = class {
639
662
  }
640
663
  async doGenerate(options) {
641
664
  var _a, _b, _c, _d;
642
- const { args, warnings, betas } = await this.getArgs(options);
665
+ const { args, warnings, betas, jsonResponseTool } = await this.getArgs(options);
643
666
  const {
644
667
  responseHeaders,
645
668
  value: response,
@@ -659,7 +682,9 @@ var AnthropicMessagesLanguageModel = class {
659
682
  for (const part of response.content) {
660
683
  switch (part.type) {
661
684
  case "text": {
662
- content.push({ type: "text", text: part.text });
685
+ if (jsonResponseTool == null) {
686
+ content.push({ type: "text", text: part.text });
687
+ }
663
688
  break;
664
689
  }
665
690
  case "thinking": {
@@ -687,20 +712,29 @@ var AnthropicMessagesLanguageModel = class {
687
712
  break;
688
713
  }
689
714
  case "tool_use": {
690
- content.push({
691
- type: "tool-call",
692
- toolCallType: "function",
693
- toolCallId: part.id,
694
- toolName: part.name,
695
- args: JSON.stringify(part.input)
696
- });
715
+ content.push(
716
+ // when a json response tool is used, the tool call becomes the text:
717
+ jsonResponseTool != null ? {
718
+ type: "text",
719
+ text: JSON.stringify(part.input)
720
+ } : {
721
+ type: "tool-call",
722
+ toolCallType: "function",
723
+ toolCallId: part.id,
724
+ toolName: part.name,
725
+ args: JSON.stringify(part.input)
726
+ }
727
+ );
697
728
  break;
698
729
  }
699
730
  }
700
731
  }
701
732
  return {
702
733
  content,
703
- finishReason: mapAnthropicStopReason(response.stop_reason),
734
+ finishReason: mapAnthropicStopReason({
735
+ finishReason: response.stop_reason,
736
+ isJsonResponseFromTool: jsonResponseTool != null
737
+ }),
704
738
  usage: {
705
739
  inputTokens: response.usage.input_tokens,
706
740
  outputTokens: response.usage.output_tokens,
@@ -723,7 +757,7 @@ var AnthropicMessagesLanguageModel = class {
723
757
  };
724
758
  }
725
759
  async doStream(options) {
726
- const { args, warnings, betas } = await this.getArgs(options);
760
+ const { args, warnings, betas, jsonResponseTool } = await this.getArgs(options);
727
761
  const body = { ...args, stream: true };
728
762
  const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
729
763
  url: this.buildRequestUrl(true),
@@ -802,13 +836,15 @@ var AnthropicMessagesLanguageModel = class {
802
836
  case "content_block_stop": {
803
837
  if (toolCallContentBlocks[value.index] != null) {
804
838
  const contentBlock = toolCallContentBlocks[value.index];
805
- controller.enqueue({
806
- type: "tool-call",
807
- toolCallType: "function",
808
- toolCallId: contentBlock.toolCallId,
809
- toolName: contentBlock.toolName,
810
- args: contentBlock.jsonText
811
- });
839
+ if (jsonResponseTool == null) {
840
+ controller.enqueue({
841
+ type: "tool-call",
842
+ toolCallType: "function",
843
+ toolCallId: contentBlock.toolCallId,
844
+ toolName: contentBlock.toolName,
845
+ args: contentBlock.jsonText
846
+ });
847
+ }
812
848
  delete toolCallContentBlocks[value.index];
813
849
  }
814
850
  blockType = void 0;
@@ -818,6 +854,9 @@ var AnthropicMessagesLanguageModel = class {
818
854
  const deltaType = value.delta.type;
819
855
  switch (deltaType) {
820
856
  case "text_delta": {
857
+ if (jsonResponseTool != null) {
858
+ return;
859
+ }
821
860
  controller.enqueue({
822
861
  type: "text",
823
862
  text: value.delta.text
@@ -848,13 +887,18 @@ var AnthropicMessagesLanguageModel = class {
848
887
  }
849
888
  case "input_json_delta": {
850
889
  const contentBlock = toolCallContentBlocks[value.index];
851
- controller.enqueue({
852
- type: "tool-call-delta",
853
- toolCallType: "function",
854
- toolCallId: contentBlock.toolCallId,
855
- toolName: contentBlock.toolName,
856
- argsTextDelta: value.delta.partial_json
857
- });
890
+ controller.enqueue(
891
+ jsonResponseTool != null ? {
892
+ type: "text",
893
+ text: value.delta.partial_json
894
+ } : {
895
+ type: "tool-call-delta",
896
+ toolCallType: "function",
897
+ toolCallId: contentBlock.toolCallId,
898
+ toolName: contentBlock.toolName,
899
+ argsTextDelta: value.delta.partial_json
900
+ }
901
+ );
858
902
  contentBlock.jsonText += value.delta.partial_json;
859
903
  return;
860
904
  }
@@ -884,7 +928,10 @@ var AnthropicMessagesLanguageModel = class {
884
928
  case "message_delta": {
885
929
  usage.outputTokens = value.usage.output_tokens;
886
930
  usage.totalTokens = ((_e = usage.inputTokens) != null ? _e : 0) + ((_f = value.usage.output_tokens) != null ? _f : 0);
887
- finishReason = mapAnthropicStopReason(value.delta.stop_reason);
931
+ finishReason = mapAnthropicStopReason({
932
+ finishReason: value.delta.stop_reason,
933
+ isJsonResponseFromTool: jsonResponseTool != null
934
+ });
888
935
  return;
889
936
  }
890
937
  case "message_stop": {