@ai-sdk/anthropic 3.0.0-beta.27 → 3.0.0-beta.29

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -10,7 +10,7 @@ import {
10
10
  } from "@ai-sdk/provider-utils";
11
11
 
12
12
  // src/version.ts
13
- var VERSION = true ? "3.0.0-beta.27" : "0.0.0-test";
13
+ var VERSION = true ? "3.0.0-beta.29" : "0.0.0-test";
14
14
 
15
15
  // src/anthropic-messages-language-model.ts
16
16
  import {
@@ -112,6 +112,24 @@ var anthropicMessagesResponseSchema = lazySchema2(
112
112
  name: z2.string(),
113
113
  input: z2.record(z2.string(), z2.unknown()).nullish()
114
114
  }),
115
+ z2.object({
116
+ type: z2.literal("mcp_tool_use"),
117
+ id: z2.string(),
118
+ name: z2.string(),
119
+ input: z2.unknown(),
120
+ server_name: z2.string()
121
+ }),
122
+ z2.object({
123
+ type: z2.literal("mcp_tool_result"),
124
+ tool_use_id: z2.string(),
125
+ is_error: z2.boolean(),
126
+ content: z2.array(
127
+ z2.union([
128
+ z2.string(),
129
+ z2.object({ type: z2.literal("text"), text: z2.string() })
130
+ ])
131
+ )
132
+ }),
115
133
  z2.object({
116
134
  type: z2.literal("web_fetch_tool_result"),
117
135
  tool_use_id: z2.string(),
@@ -284,6 +302,24 @@ var anthropicMessagesChunkSchema = lazySchema2(
284
302
  name: z2.string(),
285
303
  input: z2.record(z2.string(), z2.unknown()).nullish()
286
304
  }),
305
+ z2.object({
306
+ type: z2.literal("mcp_tool_use"),
307
+ id: z2.string(),
308
+ name: z2.string(),
309
+ input: z2.unknown(),
310
+ server_name: z2.string()
311
+ }),
312
+ z2.object({
313
+ type: z2.literal("mcp_tool_result"),
314
+ tool_use_id: z2.string(),
315
+ is_error: z2.boolean(),
316
+ content: z2.array(
317
+ z2.union([
318
+ z2.string(),
319
+ z2.object({ type: z2.literal("text"), text: z2.string() })
320
+ ])
321
+ )
322
+ }),
287
323
  z2.object({
288
324
  type: z2.literal("web_fetch_tool_result"),
289
325
  tool_use_id: z2.string(),
@@ -536,7 +572,19 @@ var anthropicProviderOptions = z3.object({
536
572
  cacheControl: z3.object({
537
573
  type: z3.literal("ephemeral"),
538
574
  ttl: z3.union([z3.literal("5m"), z3.literal("1h")]).optional()
539
- }).optional()
575
+ }).optional(),
576
+ mcpServers: z3.array(
577
+ z3.object({
578
+ type: z3.literal("url"),
579
+ name: z3.string(),
580
+ url: z3.string(),
581
+ authorizationToken: z3.string().nullish(),
582
+ toolConfiguration: z3.object({
583
+ enabled: z3.boolean().nullish(),
584
+ allowedTools: z3.array(z3.string()).nullish()
585
+ }).nullish()
586
+ })
587
+ ).optional()
540
588
  });
541
589
 
542
590
  // src/anthropic-prepare-tools.ts
@@ -929,7 +977,8 @@ import {
929
977
  import {
930
978
  convertToBase64,
931
979
  parseProviderOptions,
932
- validateTypes as validateTypes2
980
+ validateTypes as validateTypes2,
981
+ isNonNullable
933
982
  } from "@ai-sdk/provider-utils";
934
983
 
935
984
  // src/tool/code-execution_20250522.ts
@@ -1081,7 +1130,7 @@ async function convertToAnthropicMessagesPrompt({
1081
1130
  sendReasoning,
1082
1131
  warnings
1083
1132
  }) {
1084
- var _a, _b, _c, _d, _e, _f;
1133
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
1085
1134
  const betas = /* @__PURE__ */ new Set();
1086
1135
  const blocks = groupIntoBlocks(prompt);
1087
1136
  let system = void 0;
@@ -1232,20 +1281,20 @@ async function convertToAnthropicMessagesPrompt({
1232
1281
  return {
1233
1282
  type: "text",
1234
1283
  text: contentPart.text,
1235
- cache_control: void 0
1284
+ cache_control: cacheControl
1236
1285
  };
1237
- case "media": {
1238
- if (contentPart.mediaType.startsWith("image/")) {
1239
- return {
1240
- type: "image",
1241
- source: {
1242
- type: "base64",
1243
- media_type: contentPart.mediaType,
1244
- data: contentPart.data
1245
- },
1246
- cache_control: void 0
1247
- };
1248
- }
1286
+ case "image-data": {
1287
+ return {
1288
+ type: "image",
1289
+ source: {
1290
+ type: "base64",
1291
+ media_type: contentPart.mediaType,
1292
+ data: contentPart.data
1293
+ },
1294
+ cache_control: cacheControl
1295
+ };
1296
+ }
1297
+ case "file-data": {
1249
1298
  if (contentPart.mediaType === "application/pdf") {
1250
1299
  betas.add("pdfs-2024-09-25");
1251
1300
  return {
@@ -1255,15 +1304,24 @@ async function convertToAnthropicMessagesPrompt({
1255
1304
  media_type: contentPart.mediaType,
1256
1305
  data: contentPart.data
1257
1306
  },
1258
- cache_control: void 0
1307
+ cache_control: cacheControl
1259
1308
  };
1260
1309
  }
1261
- throw new UnsupportedFunctionalityError2({
1262
- functionality: `media type: ${contentPart.mediaType}`
1310
+ warnings.push({
1311
+ type: "other",
1312
+ message: `unsupported tool content part type: ${contentPart.type} with media type: ${contentPart.mediaType}`
1263
1313
  });
1314
+ return void 0;
1315
+ }
1316
+ default: {
1317
+ warnings.push({
1318
+ type: "other",
1319
+ message: `unsupported tool content part type: ${contentPart.type}`
1320
+ });
1321
+ return void 0;
1264
1322
  }
1265
1323
  }
1266
- });
1324
+ }).filter(isNonNullable);
1267
1325
  break;
1268
1326
  case "text":
1269
1327
  case "error-text":
@@ -1299,6 +1357,7 @@ async function convertToAnthropicMessagesPrompt({
1299
1357
  }
1300
1358
  case "assistant": {
1301
1359
  const anthropicContent = [];
1360
+ const mcpToolUseIds = /* @__PURE__ */ new Set();
1302
1361
  for (let j = 0; j < block.messages.length; j++) {
1303
1362
  const message = block.messages[j];
1304
1363
  const isLastMessage = j === block.messages.length - 1;
@@ -1364,7 +1423,29 @@ async function convertToAnthropicMessagesPrompt({
1364
1423
  }
1365
1424
  case "tool-call": {
1366
1425
  if (part.providerExecuted) {
1367
- if (part.toolName === "code_execution" && part.input != null && typeof part.input === "object" && "type" in part.input && typeof part.input.type === "string" && (part.input.type === "bash_code_execution" || part.input.type === "text_editor_code_execution")) {
1426
+ const isMcpToolUse = ((_h = (_g = part.providerOptions) == null ? void 0 : _g.anthropic) == null ? void 0 : _h.type) === "mcp-tool-use";
1427
+ if (isMcpToolUse) {
1428
+ mcpToolUseIds.add(part.toolCallId);
1429
+ const serverName = (_j = (_i = part.providerOptions) == null ? void 0 : _i.anthropic) == null ? void 0 : _j.serverName;
1430
+ if (serverName == null || typeof serverName !== "string") {
1431
+ warnings.push({
1432
+ type: "other",
1433
+ message: "mcp tool use server name is required and must be a string"
1434
+ });
1435
+ break;
1436
+ }
1437
+ anthropicContent.push({
1438
+ type: "mcp_tool_use",
1439
+ id: part.toolCallId,
1440
+ name: part.toolName,
1441
+ input: part.input,
1442
+ server_name: serverName,
1443
+ cache_control: cacheControl
1444
+ });
1445
+ } else if (
1446
+ // code execution 20250825:
1447
+ part.toolName === "code_execution" && part.input != null && typeof part.input === "object" && "type" in part.input && typeof part.input.type === "string" && (part.input.type === "bash_code_execution" || part.input.type === "text_editor_code_execution")
1448
+ ) {
1368
1449
  anthropicContent.push({
1369
1450
  type: "server_tool_use",
1370
1451
  id: part.toolCallId,
@@ -1400,7 +1481,23 @@ async function convertToAnthropicMessagesPrompt({
1400
1481
  break;
1401
1482
  }
1402
1483
  case "tool-result": {
1403
- if (part.toolName === "code_execution") {
1484
+ if (mcpToolUseIds.has(part.toolCallId)) {
1485
+ const output = part.output;
1486
+ if (output.type !== "json" && output.type !== "error-json") {
1487
+ warnings.push({
1488
+ type: "other",
1489
+ message: `provider executed tool result output type ${output.type} for tool ${part.toolName} is not supported`
1490
+ });
1491
+ break;
1492
+ }
1493
+ anthropicContent.push({
1494
+ type: "mcp_tool_result",
1495
+ tool_use_id: part.toolCallId,
1496
+ is_error: output.type === "error-json",
1497
+ content: output.value,
1498
+ cache_control: cacheControl
1499
+ });
1500
+ } else if (part.toolName === "code_execution") {
1404
1501
  const output = part.output;
1405
1502
  if (output.type !== "json") {
1406
1503
  warnings.push({
@@ -1715,7 +1812,7 @@ var AnthropicMessagesLanguageModel = class {
1715
1812
  providerOptions,
1716
1813
  schema: anthropicProviderOptions
1717
1814
  });
1718
- const { prompt: messagesPrompt, betas: messagesBetas } = await convertToAnthropicMessagesPrompt({
1815
+ const { prompt: messagesPrompt, betas } = await convertToAnthropicMessagesPrompt({
1719
1816
  prompt,
1720
1817
  sendReasoning: (_a = anthropicOptions == null ? void 0 : anthropicOptions.sendReasoning) != null ? _a : true,
1721
1818
  warnings
@@ -1737,6 +1834,19 @@ var AnthropicMessagesLanguageModel = class {
1737
1834
  ...isThinking && {
1738
1835
  thinking: { type: "enabled", budget_tokens: thinkingBudget }
1739
1836
  },
1837
+ // mcp servers:
1838
+ ...(anthropicOptions == null ? void 0 : anthropicOptions.mcpServers) && anthropicOptions.mcpServers.length > 0 && {
1839
+ mcp_servers: anthropicOptions.mcpServers.map((server) => ({
1840
+ type: server.type,
1841
+ name: server.name,
1842
+ url: server.url,
1843
+ authorization_token: server.authorizationToken,
1844
+ tool_configuration: server.toolConfiguration ? {
1845
+ allowed_tools: server.toolConfiguration.allowedTools,
1846
+ enabled: server.toolConfiguration.enabled
1847
+ } : void 0
1848
+ }))
1849
+ },
1740
1850
  // prompt:
1741
1851
  system: messagesPrompt.system,
1742
1852
  messages: messagesPrompt.messages
@@ -1778,11 +1888,14 @@ var AnthropicMessagesLanguageModel = class {
1778
1888
  warnings.push({
1779
1889
  type: "unsupported-setting",
1780
1890
  setting: "maxOutputTokens",
1781
- details: `${maxTokens} (maxOutputTokens + thinkingBudget) is greater than ${this.modelId} ${maxOutputTokensForModel} max output tokens. The max output tokens have been limited to ${maxOutputTokensForModel}.`
1891
+ details: `${baseArgs.max_tokens} (maxOutputTokens + thinkingBudget) is greater than ${this.modelId} ${maxOutputTokensForModel} max output tokens. The max output tokens have been limited to ${maxOutputTokensForModel}.`
1782
1892
  });
1783
1893
  }
1784
1894
  baseArgs.max_tokens = maxOutputTokensForModel;
1785
1895
  }
1896
+ if ((anthropicOptions == null ? void 0 : anthropicOptions.mcpServers) && anthropicOptions.mcpServers.length > 0) {
1897
+ betas.add("mcp-client-2025-04-04");
1898
+ }
1786
1899
  const {
1787
1900
  tools: anthropicTools2,
1788
1901
  toolChoice: anthropicToolChoice,
@@ -1806,7 +1919,7 @@ var AnthropicMessagesLanguageModel = class {
1806
1919
  tool_choice: anthropicToolChoice
1807
1920
  },
1808
1921
  warnings: [...warnings, ...toolWarnings],
1809
- betas: /* @__PURE__ */ new Set([...messagesBetas, ...toolsBetas]),
1922
+ betas: /* @__PURE__ */ new Set([...betas, ...toolsBetas]),
1810
1923
  usesJsonResponseTool: jsonResponseTool != null
1811
1924
  };
1812
1925
  }
@@ -1871,6 +1984,7 @@ var AnthropicMessagesLanguageModel = class {
1871
1984
  fetch: this.config.fetch
1872
1985
  });
1873
1986
  const content = [];
1987
+ const mcpToolCalls = {};
1874
1988
  for (const part of response.content) {
1875
1989
  switch (part.type) {
1876
1990
  case "text": {
@@ -1950,6 +2064,37 @@ var AnthropicMessagesLanguageModel = class {
1950
2064
  }
1951
2065
  break;
1952
2066
  }
2067
+ case "mcp_tool_use": {
2068
+ mcpToolCalls[part.id] = {
2069
+ type: "tool-call",
2070
+ toolCallId: part.id,
2071
+ toolName: part.name,
2072
+ input: JSON.stringify(part.input),
2073
+ providerExecuted: true,
2074
+ dynamic: true,
2075
+ providerMetadata: {
2076
+ anthropic: {
2077
+ type: "mcp-tool-use",
2078
+ serverName: part.server_name
2079
+ }
2080
+ }
2081
+ };
2082
+ content.push(mcpToolCalls[part.id]);
2083
+ break;
2084
+ }
2085
+ case "mcp_tool_result": {
2086
+ content.push({
2087
+ type: "tool-result",
2088
+ toolCallId: part.tool_use_id,
2089
+ toolName: mcpToolCalls[part.tool_use_id].toolName,
2090
+ isError: part.is_error,
2091
+ result: part.content,
2092
+ providerExecuted: true,
2093
+ dynamic: true,
2094
+ providerMetadata: mcpToolCalls[part.tool_use_id].providerMetadata
2095
+ });
2096
+ break;
2097
+ }
1953
2098
  case "web_fetch_tool_result": {
1954
2099
  if (part.content.type === "web_fetch_result") {
1955
2100
  content.push({
@@ -2130,6 +2275,7 @@ var AnthropicMessagesLanguageModel = class {
2130
2275
  totalTokens: void 0
2131
2276
  };
2132
2277
  const contentBlocks = {};
2278
+ const mcpToolCalls = {};
2133
2279
  let rawUsage = void 0;
2134
2280
  let cacheCreationInputTokens = null;
2135
2281
  let stopSequence = null;
@@ -2156,7 +2302,8 @@ var AnthropicMessagesLanguageModel = class {
2156
2302
  return;
2157
2303
  }
2158
2304
  case "content_block_start": {
2159
- const contentBlockType = value.content_block.type;
2305
+ const part = value.content_block;
2306
+ const contentBlockType = part.type;
2160
2307
  blockType = contentBlockType;
2161
2308
  switch (contentBlockType) {
2162
2309
  case "text": {
@@ -2182,7 +2329,7 @@ var AnthropicMessagesLanguageModel = class {
2182
2329
  id: String(value.index),
2183
2330
  providerMetadata: {
2184
2331
  anthropic: {
2185
- redactedData: value.content_block.data
2332
+ redactedData: part.data
2186
2333
  }
2187
2334
  }
2188
2335
  });
@@ -2191,16 +2338,16 @@ var AnthropicMessagesLanguageModel = class {
2191
2338
  case "tool_use": {
2192
2339
  contentBlocks[value.index] = usesJsonResponseTool ? { type: "text" } : {
2193
2340
  type: "tool-call",
2194
- toolCallId: value.content_block.id,
2195
- toolName: value.content_block.name,
2341
+ toolCallId: part.id,
2342
+ toolName: part.name,
2196
2343
  input: "",
2197
2344
  firstDelta: true
2198
2345
  };
2199
2346
  controller.enqueue(
2200
2347
  usesJsonResponseTool ? { type: "text-start", id: String(value.index) } : {
2201
2348
  type: "tool-input-start",
2202
- id: value.content_block.id,
2203
- toolName: value.content_block.name
2349
+ id: part.id,
2350
+ toolName: part.name
2204
2351
  }
2205
2352
  );
2206
2353
  return;
@@ -2215,19 +2362,19 @@ var AnthropicMessagesLanguageModel = class {
2215
2362
  "text_editor_code_execution",
2216
2363
  // code execution 20250825 bash:
2217
2364
  "bash_code_execution"
2218
- ].includes(value.content_block.name)) {
2365
+ ].includes(part.name)) {
2219
2366
  contentBlocks[value.index] = {
2220
2367
  type: "tool-call",
2221
- toolCallId: value.content_block.id,
2222
- toolName: value.content_block.name,
2368
+ toolCallId: part.id,
2369
+ toolName: part.name,
2223
2370
  input: "",
2224
2371
  providerExecuted: true,
2225
2372
  firstDelta: true
2226
2373
  };
2227
- const mappedToolName = value.content_block.name === "text_editor_code_execution" || value.content_block.name === "bash_code_execution" ? "code_execution" : value.content_block.name;
2374
+ const mappedToolName = part.name === "text_editor_code_execution" || part.name === "bash_code_execution" ? "code_execution" : part.name;
2228
2375
  controller.enqueue({
2229
2376
  type: "tool-input-start",
2230
- id: value.content_block.id,
2377
+ id: part.id,
2231
2378
  toolName: mappedToolName,
2232
2379
  providerExecuted: true
2233
2380
  });
@@ -2235,7 +2382,6 @@ var AnthropicMessagesLanguageModel = class {
2235
2382
  return;
2236
2383
  }
2237
2384
  case "web_fetch_tool_result": {
2238
- const part = value.content_block;
2239
2385
  if (part.content.type === "web_fetch_result") {
2240
2386
  controller.enqueue({
2241
2387
  type: "tool-result",
@@ -2273,7 +2419,6 @@ var AnthropicMessagesLanguageModel = class {
2273
2419
  return;
2274
2420
  }
2275
2421
  case "web_search_tool_result": {
2276
- const part = value.content_block;
2277
2422
  if (Array.isArray(part.content)) {
2278
2423
  controller.enqueue({
2279
2424
  type: "tool-result",
@@ -2322,7 +2467,6 @@ var AnthropicMessagesLanguageModel = class {
2322
2467
  }
2323
2468
  // code execution 20250522:
2324
2469
  case "code_execution_tool_result": {
2325
- const part = value.content_block;
2326
2470
  if (part.content.type === "code_execution_result") {
2327
2471
  controller.enqueue({
2328
2472
  type: "tool-result",
@@ -2354,7 +2498,6 @@ var AnthropicMessagesLanguageModel = class {
2354
2498
  // code execution 20250825:
2355
2499
  case "bash_code_execution_tool_result":
2356
2500
  case "text_editor_code_execution_tool_result": {
2357
- const part = value.content_block;
2358
2501
  controller.enqueue({
2359
2502
  type: "tool-result",
2360
2503
  toolCallId: part.tool_use_id,
@@ -2364,6 +2507,37 @@ var AnthropicMessagesLanguageModel = class {
2364
2507
  });
2365
2508
  return;
2366
2509
  }
2510
+ case "mcp_tool_use": {
2511
+ mcpToolCalls[part.id] = {
2512
+ type: "tool-call",
2513
+ toolCallId: part.id,
2514
+ toolName: part.name,
2515
+ input: JSON.stringify(part.input),
2516
+ providerExecuted: true,
2517
+ dynamic: true,
2518
+ providerMetadata: {
2519
+ anthropic: {
2520
+ type: "mcp-tool-use",
2521
+ serverName: part.server_name
2522
+ }
2523
+ }
2524
+ };
2525
+ controller.enqueue(mcpToolCalls[part.id]);
2526
+ return;
2527
+ }
2528
+ case "mcp_tool_result": {
2529
+ controller.enqueue({
2530
+ type: "tool-result",
2531
+ toolCallId: part.tool_use_id,
2532
+ toolName: mcpToolCalls[part.tool_use_id].toolName,
2533
+ isError: part.is_error,
2534
+ result: part.content,
2535
+ providerExecuted: true,
2536
+ dynamic: true,
2537
+ providerMetadata: mcpToolCalls[part.tool_use_id].providerMetadata
2538
+ });
2539
+ return;
2540
+ }
2367
2541
  default: {
2368
2542
  const _exhaustiveCheck = contentBlockType;
2369
2543
  throw new Error(