@ai-sdk/anthropic 3.0.3 → 3.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2721,7 +2721,7 @@ var AnthropicMessagesLanguageModel = class {
2721
2721
  });
2722
2722
  }
2723
2723
  async doGenerate(options) {
2724
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
2724
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
2725
2725
  const { args, warnings, betas, usesJsonResponseTool, toolNameMapping } = await this.getArgs({
2726
2726
  ...options,
2727
2727
  stream: false,
@@ -2745,6 +2745,7 @@ var AnthropicMessagesLanguageModel = class {
2745
2745
  });
2746
2746
  const content = [];
2747
2747
  const mcpToolCalls = {};
2748
+ const serverToolCalls = {};
2748
2749
  let isJsonResponseFromTool = false;
2749
2750
  for (const part of response.content) {
2750
2751
  switch (part.type) {
@@ -2839,6 +2840,7 @@ var AnthropicMessagesLanguageModel = class {
2839
2840
  providerExecuted: true
2840
2841
  });
2841
2842
  } else if (part.name === "tool_search_tool_regex" || part.name === "tool_search_tool_bm25") {
2843
+ serverToolCalls[part.id] = part.name;
2842
2844
  content.push({
2843
2845
  type: "tool-call",
2844
2846
  toolCallId: part.id,
@@ -3002,11 +3004,12 @@ var AnthropicMessagesLanguageModel = class {
3002
3004
  }
3003
3005
  // tool search tool results:
3004
3006
  case "tool_search_tool_result": {
3007
+ const providerToolName = (_c = serverToolCalls[part.tool_use_id]) != null ? _c : "tool_search_tool_regex";
3005
3008
  if (part.content.type === "tool_search_tool_search_result") {
3006
3009
  content.push({
3007
3010
  type: "tool-result",
3008
3011
  toolCallId: part.tool_use_id,
3009
- toolName: toolNameMapping.toCustomToolName("tool_search"),
3012
+ toolName: toolNameMapping.toCustomToolName(providerToolName),
3010
3013
  result: part.content.tool_references.map((ref) => ({
3011
3014
  type: ref.type,
3012
3015
  toolName: ref.tool_name
@@ -3016,7 +3019,7 @@ var AnthropicMessagesLanguageModel = class {
3016
3019
  content.push({
3017
3020
  type: "tool-result",
3018
3021
  toolCallId: part.tool_use_id,
3019
- toolName: toolNameMapping.toCustomToolName("tool_search"),
3022
+ toolName: toolNameMapping.toCustomToolName(providerToolName),
3020
3023
  isError: true,
3021
3024
  result: {
3022
3025
  type: "tool_search_tool_result_error",
@@ -3035,13 +3038,13 @@ var AnthropicMessagesLanguageModel = class {
3035
3038
  finishReason: response.stop_reason,
3036
3039
  isJsonResponseFromTool
3037
3040
  }),
3038
- raw: (_c = response.stop_reason) != null ? _c : void 0
3041
+ raw: (_d = response.stop_reason) != null ? _d : void 0
3039
3042
  },
3040
3043
  usage: convertAnthropicMessagesUsage(response.usage),
3041
3044
  request: { body: args },
3042
3045
  response: {
3043
- id: (_d = response.id) != null ? _d : void 0,
3044
- modelId: (_e = response.model) != null ? _e : void 0,
3046
+ id: (_e = response.id) != null ? _e : void 0,
3047
+ modelId: (_f = response.model) != null ? _f : void 0,
3045
3048
  headers: responseHeaders,
3046
3049
  body: rawResponse
3047
3050
  },
@@ -3049,20 +3052,20 @@ var AnthropicMessagesLanguageModel = class {
3049
3052
  providerMetadata: {
3050
3053
  anthropic: {
3051
3054
  usage: response.usage,
3052
- cacheCreationInputTokens: (_f = response.usage.cache_creation_input_tokens) != null ? _f : null,
3053
- stopSequence: (_g = response.stop_sequence) != null ? _g : null,
3055
+ cacheCreationInputTokens: (_g = response.usage.cache_creation_input_tokens) != null ? _g : null,
3056
+ stopSequence: (_h = response.stop_sequence) != null ? _h : null,
3054
3057
  container: response.container ? {
3055
3058
  expiresAt: response.container.expires_at,
3056
3059
  id: response.container.id,
3057
- skills: (_i = (_h = response.container.skills) == null ? void 0 : _h.map((skill) => ({
3060
+ skills: (_j = (_i = response.container.skills) == null ? void 0 : _i.map((skill) => ({
3058
3061
  type: skill.type,
3059
3062
  skillId: skill.skill_id,
3060
3063
  version: skill.version
3061
- }))) != null ? _i : null
3064
+ }))) != null ? _j : null
3062
3065
  } : null,
3063
- contextManagement: (_j = mapAnthropicResponseContextManagement(
3066
+ contextManagement: (_k = mapAnthropicResponseContextManagement(
3064
3067
  response.context_management
3065
- )) != null ? _j : null
3068
+ )) != null ? _k : null
3066
3069
  }
3067
3070
  }
3068
3071
  };
@@ -3105,6 +3108,7 @@ var AnthropicMessagesLanguageModel = class {
3105
3108
  };
3106
3109
  const contentBlocks = {};
3107
3110
  const mcpToolCalls = {};
3111
+ const serverToolCalls = {};
3108
3112
  let contextManagement = null;
3109
3113
  let rawUsage = void 0;
3110
3114
  let cacheCreationInputTokens = null;
@@ -3119,7 +3123,7 @@ var AnthropicMessagesLanguageModel = class {
3119
3123
  controller.enqueue({ type: "stream-start", warnings });
3120
3124
  },
3121
3125
  transform(chunk, controller) {
3122
- var _a2, _b2, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
3126
+ var _a2, _b2, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
3123
3127
  if (options.includeRawChunks) {
3124
3128
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
3125
3129
  }
@@ -3231,6 +3235,7 @@ var AnthropicMessagesLanguageModel = class {
3231
3235
  providerExecuted: true
3232
3236
  });
3233
3237
  } else if (part.name === "tool_search_tool_regex" || part.name === "tool_search_tool_bm25") {
3238
+ serverToolCalls[part.id] = part.name;
3234
3239
  const customToolName = toolNameMapping.toCustomToolName(
3235
3240
  part.name
3236
3241
  );
@@ -3375,11 +3380,12 @@ var AnthropicMessagesLanguageModel = class {
3375
3380
  }
3376
3381
  // tool search tool results:
3377
3382
  case "tool_search_tool_result": {
3383
+ const providerToolName = (_c = serverToolCalls[part.tool_use_id]) != null ? _c : "tool_search_tool_regex";
3378
3384
  if (part.content.type === "tool_search_tool_search_result") {
3379
3385
  controller.enqueue({
3380
3386
  type: "tool-result",
3381
3387
  toolCallId: part.tool_use_id,
3382
- toolName: toolNameMapping.toCustomToolName("tool_search"),
3388
+ toolName: toolNameMapping.toCustomToolName(providerToolName),
3383
3389
  result: part.content.tool_references.map((ref) => ({
3384
3390
  type: ref.type,
3385
3391
  toolName: ref.tool_name
@@ -3389,7 +3395,7 @@ var AnthropicMessagesLanguageModel = class {
3389
3395
  controller.enqueue({
3390
3396
  type: "tool-result",
3391
3397
  toolCallId: part.tool_use_id,
3392
- toolName: toolNameMapping.toCustomToolName("tool_search"),
3398
+ toolName: toolNameMapping.toCustomToolName(providerToolName),
3393
3399
  isError: true,
3394
3400
  result: {
3395
3401
  type: "tool_search_tool_result_error",
@@ -3588,12 +3594,12 @@ var AnthropicMessagesLanguageModel = class {
3588
3594
  }
3589
3595
  case "message_start": {
3590
3596
  usage.input_tokens = value.message.usage.input_tokens;
3591
- usage.cache_read_input_tokens = (_c = value.message.usage.cache_read_input_tokens) != null ? _c : 0;
3592
- usage.cache_creation_input_tokens = (_d = value.message.usage.cache_creation_input_tokens) != null ? _d : 0;
3597
+ usage.cache_read_input_tokens = (_d = value.message.usage.cache_read_input_tokens) != null ? _d : 0;
3598
+ usage.cache_creation_input_tokens = (_e = value.message.usage.cache_creation_input_tokens) != null ? _e : 0;
3593
3599
  rawUsage = {
3594
3600
  ...value.message.usage
3595
3601
  };
3596
- cacheCreationInputTokens = (_e = value.message.usage.cache_creation_input_tokens) != null ? _e : null;
3602
+ cacheCreationInputTokens = (_f = value.message.usage.cache_creation_input_tokens) != null ? _f : null;
3597
3603
  if (value.message.container != null) {
3598
3604
  container = {
3599
3605
  expiresAt: value.message.container.expires_at,
@@ -3612,8 +3618,8 @@ var AnthropicMessagesLanguageModel = class {
3612
3618
  }
3613
3619
  controller.enqueue({
3614
3620
  type: "response-metadata",
3615
- id: (_f = value.message.id) != null ? _f : void 0,
3616
- modelId: (_g = value.message.model) != null ? _g : void 0
3621
+ id: (_g = value.message.id) != null ? _g : void 0,
3622
+ modelId: (_h = value.message.model) != null ? _h : void 0
3617
3623
  });
3618
3624
  if (value.message.content != null) {
3619
3625
  for (let contentIndex = 0; contentIndex < value.message.content.length; contentIndex++) {
@@ -3629,7 +3635,7 @@ var AnthropicMessagesLanguageModel = class {
3629
3635
  id: part.id,
3630
3636
  toolName: part.name
3631
3637
  });
3632
- const inputStr = JSON.stringify((_h = part.input) != null ? _h : {});
3638
+ const inputStr = JSON.stringify((_i = part.input) != null ? _i : {});
3633
3639
  controller.enqueue({
3634
3640
  type: "tool-input-delta",
3635
3641
  id: part.id,
@@ -3664,17 +3670,17 @@ var AnthropicMessagesLanguageModel = class {
3664
3670
  finishReason: value.delta.stop_reason,
3665
3671
  isJsonResponseFromTool
3666
3672
  }),
3667
- raw: (_i = value.delta.stop_reason) != null ? _i : void 0
3673
+ raw: (_j = value.delta.stop_reason) != null ? _j : void 0
3668
3674
  };
3669
- stopSequence = (_j = value.delta.stop_sequence) != null ? _j : null;
3675
+ stopSequence = (_k = value.delta.stop_sequence) != null ? _k : null;
3670
3676
  container = value.delta.container != null ? {
3671
3677
  expiresAt: value.delta.container.expires_at,
3672
3678
  id: value.delta.container.id,
3673
- skills: (_l = (_k = value.delta.container.skills) == null ? void 0 : _k.map((skill) => ({
3679
+ skills: (_m = (_l = value.delta.container.skills) == null ? void 0 : _l.map((skill) => ({
3674
3680
  type: skill.type,
3675
3681
  skillId: skill.skill_id,
3676
3682
  version: skill.version
3677
- }))) != null ? _l : null
3683
+ }))) != null ? _m : null
3678
3684
  } : null;
3679
3685
  if (value.delta.context_management) {
3680
3686
  contextManagement = mapAnthropicResponseContextManagement(