@ai-sdk/anthropic 2.0.29 → 2.0.31
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.d.mts +1 -1
- package/dist/index.d.ts +1 -1
- package/dist/index.js +27 -5
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +27 -5
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +1 -1
- package/dist/internal/index.d.ts +1 -1
- package/dist/internal/index.js +26 -4
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +26 -4
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/index.mjs
CHANGED
|
@@ -10,7 +10,7 @@ import {
|
|
|
10
10
|
} from "@ai-sdk/provider-utils";
|
|
11
11
|
|
|
12
12
|
// src/version.ts
|
|
13
|
-
var VERSION = true ? "2.0.
|
|
13
|
+
var VERSION = true ? "2.0.31" : "0.0.0-test";
|
|
14
14
|
|
|
15
15
|
// src/anthropic-messages-language-model.ts
|
|
16
16
|
import {
|
|
@@ -1653,8 +1653,7 @@ var AnthropicMessagesLanguageModel = class {
|
|
|
1653
1653
|
}
|
|
1654
1654
|
async getArgs({
|
|
1655
1655
|
prompt,
|
|
1656
|
-
maxOutputTokens
|
|
1657
|
-
// 4096: max model output tokens TODO update default in v5
|
|
1656
|
+
maxOutputTokens,
|
|
1658
1657
|
temperature,
|
|
1659
1658
|
topP,
|
|
1660
1659
|
topK,
|
|
@@ -1720,11 +1719,13 @@ var AnthropicMessagesLanguageModel = class {
|
|
|
1720
1719
|
});
|
|
1721
1720
|
const isThinking = ((_b = anthropicOptions == null ? void 0 : anthropicOptions.thinking) == null ? void 0 : _b.type) === "enabled";
|
|
1722
1721
|
const thinkingBudget = (_c = anthropicOptions == null ? void 0 : anthropicOptions.thinking) == null ? void 0 : _c.budgetTokens;
|
|
1722
|
+
const maxOutputTokensForModel = getMaxOutputTokensForModel(this.modelId);
|
|
1723
|
+
const maxTokens = maxOutputTokens != null ? maxOutputTokens : maxOutputTokensForModel;
|
|
1723
1724
|
const baseArgs = {
|
|
1724
1725
|
// model id:
|
|
1725
1726
|
model: this.modelId,
|
|
1726
1727
|
// standardized settings:
|
|
1727
|
-
max_tokens:
|
|
1728
|
+
max_tokens: maxTokens,
|
|
1728
1729
|
temperature,
|
|
1729
1730
|
top_k: topK,
|
|
1730
1731
|
top_p: topP,
|
|
@@ -1767,7 +1768,17 @@ var AnthropicMessagesLanguageModel = class {
|
|
|
1767
1768
|
details: "topP is not supported when thinking is enabled"
|
|
1768
1769
|
});
|
|
1769
1770
|
}
|
|
1770
|
-
baseArgs.max_tokens =
|
|
1771
|
+
baseArgs.max_tokens = maxTokens + thinkingBudget;
|
|
1772
|
+
}
|
|
1773
|
+
if (baseArgs.max_tokens > maxOutputTokensForModel) {
|
|
1774
|
+
if (maxOutputTokens != null) {
|
|
1775
|
+
warnings.push({
|
|
1776
|
+
type: "unsupported-setting",
|
|
1777
|
+
setting: "maxOutputTokens",
|
|
1778
|
+
details: `${maxTokens} (maxOutputTokens + thinkingBudget) is greater than ${this.modelId} ${maxOutputTokensForModel} max output tokens. The max output tokens have been limited to ${maxOutputTokensForModel}.`
|
|
1779
|
+
});
|
|
1780
|
+
}
|
|
1781
|
+
baseArgs.max_tokens = maxOutputTokensForModel;
|
|
1771
1782
|
}
|
|
1772
1783
|
const {
|
|
1773
1784
|
tools: anthropicTools2,
|
|
@@ -2547,6 +2558,17 @@ var AnthropicMessagesLanguageModel = class {
|
|
|
2547
2558
|
};
|
|
2548
2559
|
}
|
|
2549
2560
|
};
|
|
2561
|
+
function getMaxOutputTokensForModel(modelId) {
|
|
2562
|
+
if (modelId.includes("claude-sonnet-4-") || modelId.includes("claude-3-7-sonnet") || modelId.includes("claude-haiku-4-5")) {
|
|
2563
|
+
return 64e3;
|
|
2564
|
+
} else if (modelId.includes("claude-opus-4-")) {
|
|
2565
|
+
return 32e3;
|
|
2566
|
+
} else if (modelId.includes("claude-3-5-haiku")) {
|
|
2567
|
+
return 8192;
|
|
2568
|
+
} else {
|
|
2569
|
+
return 4096;
|
|
2570
|
+
}
|
|
2571
|
+
}
|
|
2550
2572
|
|
|
2551
2573
|
// src/tool/bash_20241022.ts
|
|
2552
2574
|
import {
|