@ai-sdk/anthropic 2.0.30 → 2.0.32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -10,7 +10,7 @@ import {
10
10
  } from "@ai-sdk/provider-utils";
11
11
 
12
12
  // src/version.ts
13
- var VERSION = true ? "2.0.30" : "0.0.0-test";
13
+ var VERSION = true ? "2.0.32" : "0.0.0-test";
14
14
 
15
15
  // src/anthropic-messages-language-model.ts
16
16
  import {
@@ -536,6 +536,21 @@ var anthropicProviderOptions = z3.object({
536
536
  cacheControl: z3.object({
537
537
  type: z3.literal("ephemeral"),
538
538
  ttl: z3.union([z3.literal("5m"), z3.literal("1h")]).optional()
539
+ }).optional(),
540
+ /**
541
+ * Agent Skills configuration. Skills enable Claude to perform specialized tasks
542
+ * like document processing (PPTX, DOCX, PDF, XLSX) and data analysis.
543
+ * Requires code execution tool to be enabled.
544
+ */
545
+ container: z3.object({
546
+ id: z3.string().optional(),
547
+ skills: z3.array(
548
+ z3.object({
549
+ type: z3.union([z3.literal("anthropic"), z3.literal("custom")]),
550
+ skillId: z3.string(),
551
+ version: z3.string().optional()
552
+ })
553
+ ).optional()
539
554
  }).optional()
540
555
  });
541
556
 
@@ -1653,8 +1668,7 @@ var AnthropicMessagesLanguageModel = class {
1653
1668
  }
1654
1669
  async getArgs({
1655
1670
  prompt,
1656
- maxOutputTokens = 4096,
1657
- // 4096: max model output tokens TODO update default in v5
1671
+ maxOutputTokens,
1658
1672
  temperature,
1659
1673
  topP,
1660
1674
  topK,
@@ -1667,7 +1681,7 @@ var AnthropicMessagesLanguageModel = class {
1667
1681
  toolChoice,
1668
1682
  providerOptions
1669
1683
  }) {
1670
- var _a, _b, _c;
1684
+ var _a, _b, _c, _d;
1671
1685
  const warnings = [];
1672
1686
  if (frequencyPenalty != null) {
1673
1687
  warnings.push({
@@ -1713,18 +1727,20 @@ var AnthropicMessagesLanguageModel = class {
1713
1727
  providerOptions,
1714
1728
  schema: anthropicProviderOptions
1715
1729
  });
1716
- const { prompt: messagesPrompt, betas: messagesBetas } = await convertToAnthropicMessagesPrompt({
1730
+ const { prompt: messagesPrompt, betas } = await convertToAnthropicMessagesPrompt({
1717
1731
  prompt,
1718
1732
  sendReasoning: (_a = anthropicOptions == null ? void 0 : anthropicOptions.sendReasoning) != null ? _a : true,
1719
1733
  warnings
1720
1734
  });
1721
1735
  const isThinking = ((_b = anthropicOptions == null ? void 0 : anthropicOptions.thinking) == null ? void 0 : _b.type) === "enabled";
1722
1736
  const thinkingBudget = (_c = anthropicOptions == null ? void 0 : anthropicOptions.thinking) == null ? void 0 : _c.budgetTokens;
1737
+ const maxOutputTokensForModel = getMaxOutputTokensForModel(this.modelId);
1738
+ const maxTokens = maxOutputTokens != null ? maxOutputTokens : maxOutputTokensForModel;
1723
1739
  const baseArgs = {
1724
1740
  // model id:
1725
1741
  model: this.modelId,
1726
1742
  // standardized settings:
1727
- max_tokens: maxOutputTokens,
1743
+ max_tokens: maxTokens,
1728
1744
  temperature,
1729
1745
  top_k: topK,
1730
1746
  top_p: topP,
@@ -1733,6 +1749,17 @@ var AnthropicMessagesLanguageModel = class {
1733
1749
  ...isThinking && {
1734
1750
  thinking: { type: "enabled", budget_tokens: thinkingBudget }
1735
1751
  },
1752
+ // container with agent skills:
1753
+ ...(anthropicOptions == null ? void 0 : anthropicOptions.container) && {
1754
+ container: {
1755
+ id: anthropicOptions.container.id,
1756
+ skills: (_d = anthropicOptions.container.skills) == null ? void 0 : _d.map((skill) => ({
1757
+ type: skill.type,
1758
+ skill_id: skill.skillId,
1759
+ version: skill.version
1760
+ }))
1761
+ }
1762
+ },
1736
1763
  // prompt:
1737
1764
  system: messagesPrompt.system,
1738
1765
  messages: messagesPrompt.messages
@@ -1767,7 +1794,30 @@ var AnthropicMessagesLanguageModel = class {
1767
1794
  details: "topP is not supported when thinking is enabled"
1768
1795
  });
1769
1796
  }
1770
- baseArgs.max_tokens = maxOutputTokens + thinkingBudget;
1797
+ baseArgs.max_tokens = maxTokens + thinkingBudget;
1798
+ }
1799
+ if (baseArgs.max_tokens > maxOutputTokensForModel) {
1800
+ if (maxOutputTokens != null) {
1801
+ warnings.push({
1802
+ type: "unsupported-setting",
1803
+ setting: "maxOutputTokens",
1804
+ details: `${baseArgs.max_tokens} (maxOutputTokens + thinkingBudget) is greater than ${this.modelId} ${maxOutputTokensForModel} max output tokens. The max output tokens have been limited to ${maxOutputTokensForModel}.`
1805
+ });
1806
+ }
1807
+ baseArgs.max_tokens = maxOutputTokensForModel;
1808
+ }
1809
+ if ((anthropicOptions == null ? void 0 : anthropicOptions.container) && anthropicOptions.container.skills && anthropicOptions.container.skills.length > 0) {
1810
+ betas.add("code-execution-2025-08-25");
1811
+ betas.add("skills-2025-10-02");
1812
+ betas.add("files-api-2025-04-14");
1813
+ if (!(tools == null ? void 0 : tools.some(
1814
+ (tool) => tool.type === "provider-defined" && tool.id === "anthropic.code_execution_20250825"
1815
+ ))) {
1816
+ warnings.push({
1817
+ type: "other",
1818
+ message: "code execution tool is required when using skills"
1819
+ });
1820
+ }
1771
1821
  }
1772
1822
  const {
1773
1823
  tools: anthropicTools2,
@@ -1792,7 +1842,7 @@ var AnthropicMessagesLanguageModel = class {
1792
1842
  tool_choice: anthropicToolChoice
1793
1843
  },
1794
1844
  warnings: [...warnings, ...toolWarnings],
1795
- betas: /* @__PURE__ */ new Set([...messagesBetas, ...toolsBetas]),
1845
+ betas: /* @__PURE__ */ new Set([...betas, ...toolsBetas]),
1796
1846
  usesJsonResponseTool: jsonResponseTool != null
1797
1847
  };
1798
1848
  }
@@ -2547,6 +2597,17 @@ var AnthropicMessagesLanguageModel = class {
2547
2597
  };
2548
2598
  }
2549
2599
  };
2600
+ function getMaxOutputTokensForModel(modelId) {
2601
+ if (modelId.includes("claude-sonnet-4-") || modelId.includes("claude-3-7-sonnet") || modelId.includes("claude-haiku-4-5")) {
2602
+ return 64e3;
2603
+ } else if (modelId.includes("claude-opus-4-")) {
2604
+ return 32e3;
2605
+ } else if (modelId.includes("claude-3-5-haiku")) {
2606
+ return 8192;
2607
+ } else {
2608
+ return 4096;
2609
+ }
2610
+ }
2550
2611
 
2551
2612
  // src/tool/bash_20241022.ts
2552
2613
  import {