@ai-sdk/anthropic 3.0.0-beta.94 → 3.0.0-beta.96
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +18 -0
- package/dist/index.js +43 -29
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +43 -29
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.js +42 -28
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +42 -28
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/index.mjs
CHANGED
|
@@ -11,7 +11,7 @@ import {
|
|
|
11
11
|
} from "@ai-sdk/provider-utils";
|
|
12
12
|
|
|
13
13
|
// src/version.ts
|
|
14
|
-
var VERSION = true ? "3.0.0-beta.
|
|
14
|
+
var VERSION = true ? "3.0.0-beta.96" : "0.0.0-test";
|
|
15
15
|
|
|
16
16
|
// src/anthropic-messages-language-model.ts
|
|
17
17
|
import {
|
|
@@ -1726,6 +1726,9 @@ async function convertToAnthropicMessagesPrompt({
|
|
|
1726
1726
|
case "tool": {
|
|
1727
1727
|
for (let i2 = 0; i2 < content.length; i2++) {
|
|
1728
1728
|
const part = content[i2];
|
|
1729
|
+
if (part.type === "tool-approval-response") {
|
|
1730
|
+
continue;
|
|
1731
|
+
}
|
|
1729
1732
|
const isLastPart = i2 === content.length - 1;
|
|
1730
1733
|
const cacheControl = (_d = validator.getCacheControl(part.providerOptions, {
|
|
1731
1734
|
type: "tool result part",
|
|
@@ -2299,11 +2302,10 @@ function mapAnthropicStopReason({
|
|
|
2299
2302
|
case "tool_use":
|
|
2300
2303
|
return isJsonResponseFromTool ? "stop" : "tool-calls";
|
|
2301
2304
|
case "max_tokens":
|
|
2302
|
-
return "length";
|
|
2303
2305
|
case "model_context_window_exceeded":
|
|
2304
2306
|
return "length";
|
|
2305
2307
|
default:
|
|
2306
|
-
return "
|
|
2308
|
+
return "other";
|
|
2307
2309
|
}
|
|
2308
2310
|
}
|
|
2309
2311
|
|
|
@@ -2719,7 +2721,7 @@ var AnthropicMessagesLanguageModel = class {
|
|
|
2719
2721
|
});
|
|
2720
2722
|
}
|
|
2721
2723
|
async doGenerate(options) {
|
|
2722
|
-
var _a, _b, _c, _d, _e, _f, _g, _h, _i;
|
|
2724
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
|
|
2723
2725
|
const { args, warnings, betas, usesJsonResponseTool, toolNameMapping } = await this.getArgs({
|
|
2724
2726
|
...options,
|
|
2725
2727
|
stream: false,
|
|
@@ -3028,15 +3030,18 @@ var AnthropicMessagesLanguageModel = class {
|
|
|
3028
3030
|
}
|
|
3029
3031
|
return {
|
|
3030
3032
|
content,
|
|
3031
|
-
finishReason:
|
|
3032
|
-
|
|
3033
|
-
|
|
3034
|
-
|
|
3033
|
+
finishReason: {
|
|
3034
|
+
unified: mapAnthropicStopReason({
|
|
3035
|
+
finishReason: response.stop_reason,
|
|
3036
|
+
isJsonResponseFromTool
|
|
3037
|
+
}),
|
|
3038
|
+
raw: (_c = response.stop_reason) != null ? _c : void 0
|
|
3039
|
+
},
|
|
3035
3040
|
usage: convertAnthropicMessagesUsage(response.usage),
|
|
3036
3041
|
request: { body: args },
|
|
3037
3042
|
response: {
|
|
3038
|
-
id: (
|
|
3039
|
-
modelId: (
|
|
3043
|
+
id: (_d = response.id) != null ? _d : void 0,
|
|
3044
|
+
modelId: (_e = response.model) != null ? _e : void 0,
|
|
3040
3045
|
headers: responseHeaders,
|
|
3041
3046
|
body: rawResponse
|
|
3042
3047
|
},
|
|
@@ -3044,20 +3049,20 @@ var AnthropicMessagesLanguageModel = class {
|
|
|
3044
3049
|
providerMetadata: {
|
|
3045
3050
|
anthropic: {
|
|
3046
3051
|
usage: response.usage,
|
|
3047
|
-
cacheCreationInputTokens: (
|
|
3048
|
-
stopSequence: (
|
|
3052
|
+
cacheCreationInputTokens: (_f = response.usage.cache_creation_input_tokens) != null ? _f : null,
|
|
3053
|
+
stopSequence: (_g = response.stop_sequence) != null ? _g : null,
|
|
3049
3054
|
container: response.container ? {
|
|
3050
3055
|
expiresAt: response.container.expires_at,
|
|
3051
3056
|
id: response.container.id,
|
|
3052
|
-
skills: (
|
|
3057
|
+
skills: (_i = (_h = response.container.skills) == null ? void 0 : _h.map((skill) => ({
|
|
3053
3058
|
type: skill.type,
|
|
3054
3059
|
skillId: skill.skill_id,
|
|
3055
3060
|
version: skill.version
|
|
3056
|
-
}))) != null ?
|
|
3061
|
+
}))) != null ? _i : null
|
|
3057
3062
|
} : null,
|
|
3058
|
-
contextManagement: (
|
|
3063
|
+
contextManagement: (_j = mapAnthropicResponseContextManagement(
|
|
3059
3064
|
response.context_management
|
|
3060
|
-
)) != null ?
|
|
3065
|
+
)) != null ? _j : null
|
|
3061
3066
|
}
|
|
3062
3067
|
}
|
|
3063
3068
|
};
|
|
@@ -3088,7 +3093,10 @@ var AnthropicMessagesLanguageModel = class {
|
|
|
3088
3093
|
abortSignal: options.abortSignal,
|
|
3089
3094
|
fetch: this.config.fetch
|
|
3090
3095
|
});
|
|
3091
|
-
let finishReason =
|
|
3096
|
+
let finishReason = {
|
|
3097
|
+
unified: "other",
|
|
3098
|
+
raw: void 0
|
|
3099
|
+
};
|
|
3092
3100
|
const usage = {
|
|
3093
3101
|
input_tokens: 0,
|
|
3094
3102
|
output_tokens: 0,
|
|
@@ -3111,7 +3119,7 @@ var AnthropicMessagesLanguageModel = class {
|
|
|
3111
3119
|
controller.enqueue({ type: "stream-start", warnings });
|
|
3112
3120
|
},
|
|
3113
3121
|
transform(chunk, controller) {
|
|
3114
|
-
var _a2, _b2, _c, _d, _e, _f, _g, _h, _i, _j, _k;
|
|
3122
|
+
var _a2, _b2, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
|
|
3115
3123
|
if (options.includeRawChunks) {
|
|
3116
3124
|
controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
|
|
3117
3125
|
}
|
|
@@ -3594,10 +3602,13 @@ var AnthropicMessagesLanguageModel = class {
|
|
|
3594
3602
|
};
|
|
3595
3603
|
}
|
|
3596
3604
|
if (value.message.stop_reason != null) {
|
|
3597
|
-
finishReason =
|
|
3598
|
-
|
|
3599
|
-
|
|
3600
|
-
|
|
3605
|
+
finishReason = {
|
|
3606
|
+
unified: mapAnthropicStopReason({
|
|
3607
|
+
finishReason: value.message.stop_reason,
|
|
3608
|
+
isJsonResponseFromTool
|
|
3609
|
+
}),
|
|
3610
|
+
raw: value.message.stop_reason
|
|
3611
|
+
};
|
|
3601
3612
|
}
|
|
3602
3613
|
controller.enqueue({
|
|
3603
3614
|
type: "response-metadata",
|
|
@@ -3648,19 +3659,22 @@ var AnthropicMessagesLanguageModel = class {
|
|
|
3648
3659
|
}
|
|
3649
3660
|
case "message_delta": {
|
|
3650
3661
|
usage.output_tokens = value.usage.output_tokens;
|
|
3651
|
-
finishReason =
|
|
3652
|
-
|
|
3653
|
-
|
|
3654
|
-
|
|
3655
|
-
|
|
3662
|
+
finishReason = {
|
|
3663
|
+
unified: mapAnthropicStopReason({
|
|
3664
|
+
finishReason: value.delta.stop_reason,
|
|
3665
|
+
isJsonResponseFromTool
|
|
3666
|
+
}),
|
|
3667
|
+
raw: (_i = value.delta.stop_reason) != null ? _i : void 0
|
|
3668
|
+
};
|
|
3669
|
+
stopSequence = (_j = value.delta.stop_sequence) != null ? _j : null;
|
|
3656
3670
|
container = value.delta.container != null ? {
|
|
3657
3671
|
expiresAt: value.delta.container.expires_at,
|
|
3658
3672
|
id: value.delta.container.id,
|
|
3659
|
-
skills: (
|
|
3673
|
+
skills: (_l = (_k = value.delta.container.skills) == null ? void 0 : _k.map((skill) => ({
|
|
3660
3674
|
type: skill.type,
|
|
3661
3675
|
skillId: skill.skill_id,
|
|
3662
3676
|
version: skill.version
|
|
3663
|
-
}))) != null ?
|
|
3677
|
+
}))) != null ? _l : null
|
|
3664
3678
|
} : null;
|
|
3665
3679
|
if (value.delta.context_management) {
|
|
3666
3680
|
contextManagement = mapAnthropicResponseContextManagement(
|