@ai-sdk/anthropic 2.0.0-alpha.6 → 2.0.0-alpha.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -464,13 +464,16 @@ function groupIntoBlocks(prompt) {
464
464
  }
465
465
 
466
466
  // src/map-anthropic-stop-reason.ts
467
- function mapAnthropicStopReason(finishReason) {
467
+ function mapAnthropicStopReason({
468
+ finishReason,
469
+ isJsonResponseFromTool
470
+ }) {
468
471
  switch (finishReason) {
469
472
  case "end_turn":
470
473
  case "stop_sequence":
471
474
  return "stop";
472
475
  case "tool_use":
473
- return "tool-calls";
476
+ return isJsonResponseFromTool ? "stop" : "tool-calls";
474
477
  case "max_tokens":
475
478
  return "length";
476
479
  default:
@@ -531,13 +534,27 @@ var AnthropicMessagesLanguageModel = class {
531
534
  setting: "seed"
532
535
  });
533
536
  }
534
- if (responseFormat != null && responseFormat.type !== "text") {
535
- warnings.push({
536
- type: "unsupported-setting",
537
- setting: "responseFormat",
538
- details: "JSON response format is not supported."
539
- });
537
+ if ((responseFormat == null ? void 0 : responseFormat.type) === "json") {
538
+ if (responseFormat.schema == null) {
539
+ warnings.push({
540
+ type: "unsupported-setting",
541
+ setting: "responseFormat",
542
+ details: "JSON response format requires a schema. The response format is ignored."
543
+ });
544
+ } else if (tools != null) {
545
+ warnings.push({
546
+ type: "unsupported-setting",
547
+ setting: "tools",
548
+ details: "JSON response format does not support tools. The provided tools are ignored."
549
+ });
550
+ }
540
551
  }
552
+ const jsonResponseTool = (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null ? {
553
+ type: "function",
554
+ name: "json",
555
+ description: "Respond with a JSON object.",
556
+ parameters: responseFormat.schema
557
+ } : void 0;
541
558
  const anthropicOptions = await (0, import_provider_utils3.parseProviderOptions)({
542
559
  provider: "anthropic",
543
560
  providerOptions,
@@ -604,7 +621,12 @@ var AnthropicMessagesLanguageModel = class {
604
621
  toolChoice: anthropicToolChoice,
605
622
  toolWarnings,
606
623
  betas: toolsBetas
607
- } = prepareTools({ tools, toolChoice });
624
+ } = prepareTools(
625
+ jsonResponseTool != null ? {
626
+ tools: [jsonResponseTool],
627
+ toolChoice: { type: "tool", toolName: jsonResponseTool.name }
628
+ } : { tools, toolChoice }
629
+ );
608
630
  return {
609
631
  args: {
610
632
  ...baseArgs,
@@ -612,7 +634,8 @@ var AnthropicMessagesLanguageModel = class {
612
634
  tool_choice: anthropicToolChoice
613
635
  },
614
636
  warnings: [...warnings, ...toolWarnings],
615
- betas: /* @__PURE__ */ new Set([...messagesBetas, ...toolsBetas])
637
+ betas: /* @__PURE__ */ new Set([...messagesBetas, ...toolsBetas]),
638
+ jsonResponseTool
616
639
  };
617
640
  }
618
641
  async getHeaders({
@@ -635,7 +658,7 @@ var AnthropicMessagesLanguageModel = class {
635
658
  }
636
659
  async doGenerate(options) {
637
660
  var _a, _b, _c, _d;
638
- const { args, warnings, betas } = await this.getArgs(options);
661
+ const { args, warnings, betas, jsonResponseTool } = await this.getArgs(options);
639
662
  const {
640
663
  responseHeaders,
641
664
  value: response,
@@ -655,7 +678,9 @@ var AnthropicMessagesLanguageModel = class {
655
678
  for (const part of response.content) {
656
679
  switch (part.type) {
657
680
  case "text": {
658
- content.push({ type: "text", text: part.text });
681
+ if (jsonResponseTool == null) {
682
+ content.push({ type: "text", text: part.text });
683
+ }
659
684
  break;
660
685
  }
661
686
  case "thinking": {
@@ -683,20 +708,29 @@ var AnthropicMessagesLanguageModel = class {
683
708
  break;
684
709
  }
685
710
  case "tool_use": {
686
- content.push({
687
- type: "tool-call",
688
- toolCallType: "function",
689
- toolCallId: part.id,
690
- toolName: part.name,
691
- args: JSON.stringify(part.input)
692
- });
711
+ content.push(
712
+ // when a json response tool is used, the tool call becomes the text:
713
+ jsonResponseTool != null ? {
714
+ type: "text",
715
+ text: JSON.stringify(part.input)
716
+ } : {
717
+ type: "tool-call",
718
+ toolCallType: "function",
719
+ toolCallId: part.id,
720
+ toolName: part.name,
721
+ args: JSON.stringify(part.input)
722
+ }
723
+ );
693
724
  break;
694
725
  }
695
726
  }
696
727
  }
697
728
  return {
698
729
  content,
699
- finishReason: mapAnthropicStopReason(response.stop_reason),
730
+ finishReason: mapAnthropicStopReason({
731
+ finishReason: response.stop_reason,
732
+ isJsonResponseFromTool: jsonResponseTool != null
733
+ }),
700
734
  usage: {
701
735
  inputTokens: response.usage.input_tokens,
702
736
  outputTokens: response.usage.output_tokens,
@@ -719,7 +753,7 @@ var AnthropicMessagesLanguageModel = class {
719
753
  };
720
754
  }
721
755
  async doStream(options) {
722
- const { args, warnings, betas } = await this.getArgs(options);
756
+ const { args, warnings, betas, jsonResponseTool } = await this.getArgs(options);
723
757
  const body = { ...args, stream: true };
724
758
  const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
725
759
  url: this.buildRequestUrl(true),
@@ -798,13 +832,15 @@ var AnthropicMessagesLanguageModel = class {
798
832
  case "content_block_stop": {
799
833
  if (toolCallContentBlocks[value.index] != null) {
800
834
  const contentBlock = toolCallContentBlocks[value.index];
801
- controller.enqueue({
802
- type: "tool-call",
803
- toolCallType: "function",
804
- toolCallId: contentBlock.toolCallId,
805
- toolName: contentBlock.toolName,
806
- args: contentBlock.jsonText
807
- });
835
+ if (jsonResponseTool == null) {
836
+ controller.enqueue({
837
+ type: "tool-call",
838
+ toolCallType: "function",
839
+ toolCallId: contentBlock.toolCallId,
840
+ toolName: contentBlock.toolName,
841
+ args: contentBlock.jsonText
842
+ });
843
+ }
808
844
  delete toolCallContentBlocks[value.index];
809
845
  }
810
846
  blockType = void 0;
@@ -814,6 +850,9 @@ var AnthropicMessagesLanguageModel = class {
814
850
  const deltaType = value.delta.type;
815
851
  switch (deltaType) {
816
852
  case "text_delta": {
853
+ if (jsonResponseTool != null) {
854
+ return;
855
+ }
817
856
  controller.enqueue({
818
857
  type: "text",
819
858
  text: value.delta.text
@@ -844,13 +883,18 @@ var AnthropicMessagesLanguageModel = class {
844
883
  }
845
884
  case "input_json_delta": {
846
885
  const contentBlock = toolCallContentBlocks[value.index];
847
- controller.enqueue({
848
- type: "tool-call-delta",
849
- toolCallType: "function",
850
- toolCallId: contentBlock.toolCallId,
851
- toolName: contentBlock.toolName,
852
- argsTextDelta: value.delta.partial_json
853
- });
886
+ controller.enqueue(
887
+ jsonResponseTool != null ? {
888
+ type: "text",
889
+ text: value.delta.partial_json
890
+ } : {
891
+ type: "tool-call-delta",
892
+ toolCallType: "function",
893
+ toolCallId: contentBlock.toolCallId,
894
+ toolName: contentBlock.toolName,
895
+ argsTextDelta: value.delta.partial_json
896
+ }
897
+ );
854
898
  contentBlock.jsonText += value.delta.partial_json;
855
899
  return;
856
900
  }
@@ -880,7 +924,10 @@ var AnthropicMessagesLanguageModel = class {
880
924
  case "message_delta": {
881
925
  usage.outputTokens = value.usage.output_tokens;
882
926
  usage.totalTokens = ((_e = usage.inputTokens) != null ? _e : 0) + ((_f = value.usage.output_tokens) != null ? _f : 0);
883
- finishReason = mapAnthropicStopReason(value.delta.stop_reason);
927
+ finishReason = mapAnthropicStopReason({
928
+ finishReason: value.delta.stop_reason,
929
+ isJsonResponseFromTool: jsonResponseTool != null
930
+ });
884
931
  return;
885
932
  }
886
933
  case "message_stop": {