@ai-sdk/anthropic 3.0.0-beta.77 → 3.0.0-beta.79

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,20 @@
1
1
  # @ai-sdk/anthropic
2
2
 
3
+ ## 3.0.0-beta.79
4
+
5
+ ### Patch Changes
6
+
7
+ - 3bd2689: feat: extended token usage
8
+ - Updated dependencies [3bd2689]
9
+ - @ai-sdk/provider@3.0.0-beta.26
10
+ - @ai-sdk/provider-utils@4.0.0-beta.45
11
+
12
+ ## 3.0.0-beta.78
13
+
14
+ ### Patch Changes
15
+
16
+ - 9e1e758: fix(anthropic): use default thinking budget when unspecified
17
+
3
18
  ## 3.0.0-beta.77
4
19
 
5
20
  ### Patch Changes
package/dist/index.js CHANGED
@@ -31,7 +31,7 @@ var import_provider4 = require("@ai-sdk/provider");
31
31
  var import_provider_utils22 = require("@ai-sdk/provider-utils");
32
32
 
33
33
  // src/version.ts
34
- var VERSION = true ? "3.0.0-beta.77" : "0.0.0-test";
34
+ var VERSION = true ? "3.0.0-beta.79" : "0.0.0-test";
35
35
 
36
36
  // src/anthropic-messages-language-model.ts
37
37
  var import_provider3 = require("@ai-sdk/provider");
@@ -56,6 +56,29 @@ var anthropicFailedResponseHandler = (0, import_provider_utils.createJsonErrorRe
56
56
  errorToMessage: (data) => data.error.message
57
57
  });
58
58
 
59
+ // src/convert-anthropic-messages-usage.ts
60
+ function convertAnthropicMessagesUsage(usage) {
61
+ var _a, _b;
62
+ const inputTokens = usage.input_tokens;
63
+ const outputTokens = usage.output_tokens;
64
+ const cacheCreationTokens = (_a = usage.cache_creation_input_tokens) != null ? _a : 0;
65
+ const cacheReadTokens = (_b = usage.cache_read_input_tokens) != null ? _b : 0;
66
+ return {
67
+ inputTokens: {
68
+ total: inputTokens + cacheCreationTokens + cacheReadTokens,
69
+ noCache: inputTokens,
70
+ cacheRead: cacheReadTokens,
71
+ cacheWrite: cacheCreationTokens
72
+ },
73
+ outputTokens: {
74
+ total: outputTokens,
75
+ text: void 0,
76
+ reasoning: void 0
77
+ },
78
+ raw: usage
79
+ };
80
+ }
81
+
59
82
  // src/anthropic-messages-api.ts
60
83
  var import_provider_utils2 = require("@ai-sdk/provider-utils");
61
84
  var import_v42 = require("zod/v4");
@@ -2234,7 +2257,7 @@ var AnthropicMessagesLanguageModel = class {
2234
2257
  toolNameMapping
2235
2258
  });
2236
2259
  const isThinking = ((_c = anthropicOptions == null ? void 0 : anthropicOptions.thinking) == null ? void 0 : _c.type) === "enabled";
2237
- const thinkingBudget = (_d = anthropicOptions == null ? void 0 : anthropicOptions.thinking) == null ? void 0 : _d.budgetTokens;
2260
+ let thinkingBudget = (_d = anthropicOptions == null ? void 0 : anthropicOptions.thinking) == null ? void 0 : _d.budgetTokens;
2238
2261
  const maxTokens = maxOutputTokens != null ? maxOutputTokens : maxOutputTokensForModel;
2239
2262
  const baseArgs = {
2240
2263
  // model id:
@@ -2326,9 +2349,16 @@ var AnthropicMessagesLanguageModel = class {
2326
2349
  };
2327
2350
  if (isThinking) {
2328
2351
  if (thinkingBudget == null) {
2329
- throw new import_provider3.UnsupportedFunctionalityError({
2330
- functionality: "thinking requires a budget"
2352
+ warnings.push({
2353
+ type: "compatibility",
2354
+ feature: "extended thinking",
2355
+ details: "thinking budget is required when thinking is enabled. using default budget of 1024 tokens."
2331
2356
  });
2357
+ baseArgs.thinking = {
2358
+ type: "enabled",
2359
+ budget_tokens: 1024
2360
+ };
2361
+ thinkingBudget = 1024;
2332
2362
  }
2333
2363
  if (baseArgs.temperature != null) {
2334
2364
  baseArgs.temperature = void 0;
@@ -2354,7 +2384,7 @@ var AnthropicMessagesLanguageModel = class {
2354
2384
  details: "topP is not supported when thinking is enabled"
2355
2385
  });
2356
2386
  }
2357
- baseArgs.max_tokens = maxTokens + thinkingBudget;
2387
+ baseArgs.max_tokens = maxTokens + (thinkingBudget != null ? thinkingBudget : 0);
2358
2388
  }
2359
2389
  if (isKnownModel && baseArgs.max_tokens > maxOutputTokensForModel) {
2360
2390
  if (maxOutputTokens != null) {
@@ -2483,7 +2513,7 @@ var AnthropicMessagesLanguageModel = class {
2483
2513
  });
2484
2514
  }
2485
2515
  async doGenerate(options) {
2486
- var _a, _b, _c, _d, _e, _f, _g, _h, _i;
2516
+ var _a, _b, _c, _d, _e, _f, _g, _h;
2487
2517
  const { args, warnings, betas, usesJsonResponseTool, toolNameMapping } = await this.getArgs({
2488
2518
  ...options,
2489
2519
  stream: false,
@@ -2782,16 +2812,11 @@ var AnthropicMessagesLanguageModel = class {
2782
2812
  finishReason: response.stop_reason,
2783
2813
  isJsonResponseFromTool
2784
2814
  }),
2785
- usage: {
2786
- inputTokens: response.usage.input_tokens,
2787
- outputTokens: response.usage.output_tokens,
2788
- totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2789
- cachedInputTokens: (_b = response.usage.cache_read_input_tokens) != null ? _b : void 0
2790
- },
2815
+ usage: convertAnthropicMessagesUsage(response.usage),
2791
2816
  request: { body: args },
2792
2817
  response: {
2793
- id: (_c = response.id) != null ? _c : void 0,
2794
- modelId: (_d = response.model) != null ? _d : void 0,
2818
+ id: (_b = response.id) != null ? _b : void 0,
2819
+ modelId: (_c = response.model) != null ? _c : void 0,
2795
2820
  headers: responseHeaders,
2796
2821
  body: rawResponse
2797
2822
  },
@@ -2799,20 +2824,20 @@ var AnthropicMessagesLanguageModel = class {
2799
2824
  providerMetadata: {
2800
2825
  anthropic: {
2801
2826
  usage: response.usage,
2802
- cacheCreationInputTokens: (_e = response.usage.cache_creation_input_tokens) != null ? _e : null,
2803
- stopSequence: (_f = response.stop_sequence) != null ? _f : null,
2827
+ cacheCreationInputTokens: (_d = response.usage.cache_creation_input_tokens) != null ? _d : null,
2828
+ stopSequence: (_e = response.stop_sequence) != null ? _e : null,
2804
2829
  container: response.container ? {
2805
2830
  expiresAt: response.container.expires_at,
2806
2831
  id: response.container.id,
2807
- skills: (_h = (_g = response.container.skills) == null ? void 0 : _g.map((skill) => ({
2832
+ skills: (_g = (_f = response.container.skills) == null ? void 0 : _f.map((skill) => ({
2808
2833
  type: skill.type,
2809
2834
  skillId: skill.skill_id,
2810
2835
  version: skill.version
2811
- }))) != null ? _h : null
2836
+ }))) != null ? _g : null
2812
2837
  } : null,
2813
- contextManagement: (_i = mapAnthropicResponseContextManagement(
2838
+ contextManagement: (_h = mapAnthropicResponseContextManagement(
2814
2839
  response.context_management
2815
- )) != null ? _i : null
2840
+ )) != null ? _h : null
2816
2841
  }
2817
2842
  }
2818
2843
  };
@@ -2845,9 +2870,10 @@ var AnthropicMessagesLanguageModel = class {
2845
2870
  });
2846
2871
  let finishReason = "unknown";
2847
2872
  const usage = {
2848
- inputTokens: void 0,
2849
- outputTokens: void 0,
2850
- totalTokens: void 0
2873
+ input_tokens: 0,
2874
+ output_tokens: 0,
2875
+ cache_creation_input_tokens: 0,
2876
+ cache_read_input_tokens: 0
2851
2877
  };
2852
2878
  const contentBlocks = {};
2853
2879
  const mcpToolCalls = {};
@@ -2865,7 +2891,7 @@ var AnthropicMessagesLanguageModel = class {
2865
2891
  controller.enqueue({ type: "stream-start", warnings });
2866
2892
  },
2867
2893
  transform(chunk, controller) {
2868
- var _a2, _b2, _c, _d, _e, _f, _g, _h, _i, _j;
2894
+ var _a2, _b2, _c, _d, _e, _f, _g, _h, _i;
2869
2895
  if (options.includeRawChunks) {
2870
2896
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
2871
2897
  }
@@ -3304,35 +3330,35 @@ var AnthropicMessagesLanguageModel = class {
3304
3330
  }
3305
3331
  }
3306
3332
  case "message_start": {
3307
- usage.inputTokens = value.message.usage.input_tokens;
3308
- usage.cachedInputTokens = (_b2 = value.message.usage.cache_read_input_tokens) != null ? _b2 : void 0;
3333
+ usage.input_tokens = value.message.usage.input_tokens;
3334
+ usage.cache_read_input_tokens = (_b2 = value.message.usage.cache_read_input_tokens) != null ? _b2 : 0;
3335
+ usage.cache_creation_input_tokens = (_c = value.message.usage.cache_creation_input_tokens) != null ? _c : 0;
3309
3336
  rawUsage = {
3310
3337
  ...value.message.usage
3311
3338
  };
3312
- cacheCreationInputTokens = (_c = value.message.usage.cache_creation_input_tokens) != null ? _c : null;
3339
+ cacheCreationInputTokens = (_d = value.message.usage.cache_creation_input_tokens) != null ? _d : null;
3313
3340
  controller.enqueue({
3314
3341
  type: "response-metadata",
3315
- id: (_d = value.message.id) != null ? _d : void 0,
3316
- modelId: (_e = value.message.model) != null ? _e : void 0
3342
+ id: (_e = value.message.id) != null ? _e : void 0,
3343
+ modelId: (_f = value.message.model) != null ? _f : void 0
3317
3344
  });
3318
3345
  return;
3319
3346
  }
3320
3347
  case "message_delta": {
3321
- usage.outputTokens = value.usage.output_tokens;
3322
- usage.totalTokens = ((_f = usage.inputTokens) != null ? _f : 0) + ((_g = value.usage.output_tokens) != null ? _g : 0);
3348
+ usage.output_tokens = value.usage.output_tokens;
3323
3349
  finishReason = mapAnthropicStopReason({
3324
3350
  finishReason: value.delta.stop_reason,
3325
3351
  isJsonResponseFromTool
3326
3352
  });
3327
- stopSequence = (_h = value.delta.stop_sequence) != null ? _h : null;
3353
+ stopSequence = (_g = value.delta.stop_sequence) != null ? _g : null;
3328
3354
  container = value.delta.container != null ? {
3329
3355
  expiresAt: value.delta.container.expires_at,
3330
3356
  id: value.delta.container.id,
3331
- skills: (_j = (_i = value.delta.container.skills) == null ? void 0 : _i.map((skill) => ({
3357
+ skills: (_i = (_h = value.delta.container.skills) == null ? void 0 : _h.map((skill) => ({
3332
3358
  type: skill.type,
3333
3359
  skillId: skill.skill_id,
3334
3360
  version: skill.version
3335
- }))) != null ? _j : null
3361
+ }))) != null ? _i : null
3336
3362
  } : null;
3337
3363
  if (value.delta.context_management) {
3338
3364
  contextManagement = mapAnthropicResponseContextManagement(
@@ -3349,7 +3375,7 @@ var AnthropicMessagesLanguageModel = class {
3349
3375
  controller.enqueue({
3350
3376
  type: "finish",
3351
3377
  finishReason,
3352
- usage,
3378
+ usage: convertAnthropicMessagesUsage(usage),
3353
3379
  providerMetadata: {
3354
3380
  anthropic: {
3355
3381
  usage: rawUsage != null ? rawUsage : null,