@ai-sdk/anthropic 3.0.3 → 3.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2705,7 +2705,7 @@ var AnthropicMessagesLanguageModel = class {
2705
2705
  });
2706
2706
  }
2707
2707
  async doGenerate(options) {
2708
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
2708
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
2709
2709
  const { args, warnings, betas, usesJsonResponseTool, toolNameMapping } = await this.getArgs({
2710
2710
  ...options,
2711
2711
  stream: false,
@@ -2729,6 +2729,7 @@ var AnthropicMessagesLanguageModel = class {
2729
2729
  });
2730
2730
  const content = [];
2731
2731
  const mcpToolCalls = {};
2732
+ const serverToolCalls = {};
2732
2733
  let isJsonResponseFromTool = false;
2733
2734
  for (const part of response.content) {
2734
2735
  switch (part.type) {
@@ -2823,6 +2824,7 @@ var AnthropicMessagesLanguageModel = class {
2823
2824
  providerExecuted: true
2824
2825
  });
2825
2826
  } else if (part.name === "tool_search_tool_regex" || part.name === "tool_search_tool_bm25") {
2827
+ serverToolCalls[part.id] = part.name;
2826
2828
  content.push({
2827
2829
  type: "tool-call",
2828
2830
  toolCallId: part.id,
@@ -2986,11 +2988,12 @@ var AnthropicMessagesLanguageModel = class {
2986
2988
  }
2987
2989
  // tool search tool results:
2988
2990
  case "tool_search_tool_result": {
2991
+ const providerToolName = (_c = serverToolCalls[part.tool_use_id]) != null ? _c : "tool_search_tool_regex";
2989
2992
  if (part.content.type === "tool_search_tool_search_result") {
2990
2993
  content.push({
2991
2994
  type: "tool-result",
2992
2995
  toolCallId: part.tool_use_id,
2993
- toolName: toolNameMapping.toCustomToolName("tool_search"),
2996
+ toolName: toolNameMapping.toCustomToolName(providerToolName),
2994
2997
  result: part.content.tool_references.map((ref) => ({
2995
2998
  type: ref.type,
2996
2999
  toolName: ref.tool_name
@@ -3000,7 +3003,7 @@ var AnthropicMessagesLanguageModel = class {
3000
3003
  content.push({
3001
3004
  type: "tool-result",
3002
3005
  toolCallId: part.tool_use_id,
3003
- toolName: toolNameMapping.toCustomToolName("tool_search"),
3006
+ toolName: toolNameMapping.toCustomToolName(providerToolName),
3004
3007
  isError: true,
3005
3008
  result: {
3006
3009
  type: "tool_search_tool_result_error",
@@ -3019,13 +3022,13 @@ var AnthropicMessagesLanguageModel = class {
3019
3022
  finishReason: response.stop_reason,
3020
3023
  isJsonResponseFromTool
3021
3024
  }),
3022
- raw: (_c = response.stop_reason) != null ? _c : void 0
3025
+ raw: (_d = response.stop_reason) != null ? _d : void 0
3023
3026
  },
3024
3027
  usage: convertAnthropicMessagesUsage(response.usage),
3025
3028
  request: { body: args },
3026
3029
  response: {
3027
- id: (_d = response.id) != null ? _d : void 0,
3028
- modelId: (_e = response.model) != null ? _e : void 0,
3030
+ id: (_e = response.id) != null ? _e : void 0,
3031
+ modelId: (_f = response.model) != null ? _f : void 0,
3029
3032
  headers: responseHeaders,
3030
3033
  body: rawResponse
3031
3034
  },
@@ -3033,20 +3036,20 @@ var AnthropicMessagesLanguageModel = class {
3033
3036
  providerMetadata: {
3034
3037
  anthropic: {
3035
3038
  usage: response.usage,
3036
- cacheCreationInputTokens: (_f = response.usage.cache_creation_input_tokens) != null ? _f : null,
3037
- stopSequence: (_g = response.stop_sequence) != null ? _g : null,
3039
+ cacheCreationInputTokens: (_g = response.usage.cache_creation_input_tokens) != null ? _g : null,
3040
+ stopSequence: (_h = response.stop_sequence) != null ? _h : null,
3038
3041
  container: response.container ? {
3039
3042
  expiresAt: response.container.expires_at,
3040
3043
  id: response.container.id,
3041
- skills: (_i = (_h = response.container.skills) == null ? void 0 : _h.map((skill) => ({
3044
+ skills: (_j = (_i = response.container.skills) == null ? void 0 : _i.map((skill) => ({
3042
3045
  type: skill.type,
3043
3046
  skillId: skill.skill_id,
3044
3047
  version: skill.version
3045
- }))) != null ? _i : null
3048
+ }))) != null ? _j : null
3046
3049
  } : null,
3047
- contextManagement: (_j = mapAnthropicResponseContextManagement(
3050
+ contextManagement: (_k = mapAnthropicResponseContextManagement(
3048
3051
  response.context_management
3049
- )) != null ? _j : null
3052
+ )) != null ? _k : null
3050
3053
  }
3051
3054
  }
3052
3055
  };
@@ -3089,6 +3092,7 @@ var AnthropicMessagesLanguageModel = class {
3089
3092
  };
3090
3093
  const contentBlocks = {};
3091
3094
  const mcpToolCalls = {};
3095
+ const serverToolCalls = {};
3092
3096
  let contextManagement = null;
3093
3097
  let rawUsage = void 0;
3094
3098
  let cacheCreationInputTokens = null;
@@ -3103,7 +3107,7 @@ var AnthropicMessagesLanguageModel = class {
3103
3107
  controller.enqueue({ type: "stream-start", warnings });
3104
3108
  },
3105
3109
  transform(chunk, controller) {
3106
- var _a2, _b2, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
3110
+ var _a2, _b2, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
3107
3111
  if (options.includeRawChunks) {
3108
3112
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
3109
3113
  }
@@ -3215,6 +3219,7 @@ var AnthropicMessagesLanguageModel = class {
3215
3219
  providerExecuted: true
3216
3220
  });
3217
3221
  } else if (part.name === "tool_search_tool_regex" || part.name === "tool_search_tool_bm25") {
3222
+ serverToolCalls[part.id] = part.name;
3218
3223
  const customToolName = toolNameMapping.toCustomToolName(
3219
3224
  part.name
3220
3225
  );
@@ -3359,11 +3364,12 @@ var AnthropicMessagesLanguageModel = class {
3359
3364
  }
3360
3365
  // tool search tool results:
3361
3366
  case "tool_search_tool_result": {
3367
+ const providerToolName = (_c = serverToolCalls[part.tool_use_id]) != null ? _c : "tool_search_tool_regex";
3362
3368
  if (part.content.type === "tool_search_tool_search_result") {
3363
3369
  controller.enqueue({
3364
3370
  type: "tool-result",
3365
3371
  toolCallId: part.tool_use_id,
3366
- toolName: toolNameMapping.toCustomToolName("tool_search"),
3372
+ toolName: toolNameMapping.toCustomToolName(providerToolName),
3367
3373
  result: part.content.tool_references.map((ref) => ({
3368
3374
  type: ref.type,
3369
3375
  toolName: ref.tool_name
@@ -3373,7 +3379,7 @@ var AnthropicMessagesLanguageModel = class {
3373
3379
  controller.enqueue({
3374
3380
  type: "tool-result",
3375
3381
  toolCallId: part.tool_use_id,
3376
- toolName: toolNameMapping.toCustomToolName("tool_search"),
3382
+ toolName: toolNameMapping.toCustomToolName(providerToolName),
3377
3383
  isError: true,
3378
3384
  result: {
3379
3385
  type: "tool_search_tool_result_error",
@@ -3572,12 +3578,12 @@ var AnthropicMessagesLanguageModel = class {
3572
3578
  }
3573
3579
  case "message_start": {
3574
3580
  usage.input_tokens = value.message.usage.input_tokens;
3575
- usage.cache_read_input_tokens = (_c = value.message.usage.cache_read_input_tokens) != null ? _c : 0;
3576
- usage.cache_creation_input_tokens = (_d = value.message.usage.cache_creation_input_tokens) != null ? _d : 0;
3581
+ usage.cache_read_input_tokens = (_d = value.message.usage.cache_read_input_tokens) != null ? _d : 0;
3582
+ usage.cache_creation_input_tokens = (_e = value.message.usage.cache_creation_input_tokens) != null ? _e : 0;
3577
3583
  rawUsage = {
3578
3584
  ...value.message.usage
3579
3585
  };
3580
- cacheCreationInputTokens = (_e = value.message.usage.cache_creation_input_tokens) != null ? _e : null;
3586
+ cacheCreationInputTokens = (_f = value.message.usage.cache_creation_input_tokens) != null ? _f : null;
3581
3587
  if (value.message.container != null) {
3582
3588
  container = {
3583
3589
  expiresAt: value.message.container.expires_at,
@@ -3596,8 +3602,8 @@ var AnthropicMessagesLanguageModel = class {
3596
3602
  }
3597
3603
  controller.enqueue({
3598
3604
  type: "response-metadata",
3599
- id: (_f = value.message.id) != null ? _f : void 0,
3600
- modelId: (_g = value.message.model) != null ? _g : void 0
3605
+ id: (_g = value.message.id) != null ? _g : void 0,
3606
+ modelId: (_h = value.message.model) != null ? _h : void 0
3601
3607
  });
3602
3608
  if (value.message.content != null) {
3603
3609
  for (let contentIndex = 0; contentIndex < value.message.content.length; contentIndex++) {
@@ -3613,7 +3619,7 @@ var AnthropicMessagesLanguageModel = class {
3613
3619
  id: part.id,
3614
3620
  toolName: part.name
3615
3621
  });
3616
- const inputStr = JSON.stringify((_h = part.input) != null ? _h : {});
3622
+ const inputStr = JSON.stringify((_i = part.input) != null ? _i : {});
3617
3623
  controller.enqueue({
3618
3624
  type: "tool-input-delta",
3619
3625
  id: part.id,
@@ -3648,17 +3654,17 @@ var AnthropicMessagesLanguageModel = class {
3648
3654
  finishReason: value.delta.stop_reason,
3649
3655
  isJsonResponseFromTool
3650
3656
  }),
3651
- raw: (_i = value.delta.stop_reason) != null ? _i : void 0
3657
+ raw: (_j = value.delta.stop_reason) != null ? _j : void 0
3652
3658
  };
3653
- stopSequence = (_j = value.delta.stop_sequence) != null ? _j : null;
3659
+ stopSequence = (_k = value.delta.stop_sequence) != null ? _k : null;
3654
3660
  container = value.delta.container != null ? {
3655
3661
  expiresAt: value.delta.container.expires_at,
3656
3662
  id: value.delta.container.id,
3657
- skills: (_l = (_k = value.delta.container.skills) == null ? void 0 : _k.map((skill) => ({
3663
+ skills: (_m = (_l = value.delta.container.skills) == null ? void 0 : _l.map((skill) => ({
3658
3664
  type: skill.type,
3659
3665
  skillId: skill.skill_id,
3660
3666
  version: skill.version
3661
- }))) != null ? _l : null
3667
+ }))) != null ? _m : null
3662
3668
  } : null;
3663
3669
  if (value.delta.context_management) {
3664
3670
  contextManagement = mapAnthropicResponseContextManagement(