@ai-sdk/anthropic 3.0.0-beta.77 → 3.0.0-beta.79

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,6 @@
1
1
  // src/anthropic-messages-language-model.ts
2
2
  import {
3
- APICallError,
4
- UnsupportedFunctionalityError as UnsupportedFunctionalityError3
3
+ APICallError
5
4
  } from "@ai-sdk/provider";
6
5
  import {
7
6
  combineHeaders,
@@ -37,6 +36,29 @@ var anthropicFailedResponseHandler = createJsonErrorResponseHandler({
37
36
  errorToMessage: (data) => data.error.message
38
37
  });
39
38
 
39
+ // src/convert-anthropic-messages-usage.ts
40
+ function convertAnthropicMessagesUsage(usage) {
41
+ var _a, _b;
42
+ const inputTokens = usage.input_tokens;
43
+ const outputTokens = usage.output_tokens;
44
+ const cacheCreationTokens = (_a = usage.cache_creation_input_tokens) != null ? _a : 0;
45
+ const cacheReadTokens = (_b = usage.cache_read_input_tokens) != null ? _b : 0;
46
+ return {
47
+ inputTokens: {
48
+ total: inputTokens + cacheCreationTokens + cacheReadTokens,
49
+ noCache: inputTokens,
50
+ cacheRead: cacheReadTokens,
51
+ cacheWrite: cacheCreationTokens
52
+ },
53
+ outputTokens: {
54
+ total: outputTokens,
55
+ text: void 0,
56
+ reasoning: void 0
57
+ },
58
+ raw: usage
59
+ };
60
+ }
61
+
40
62
  // src/anthropic-messages-api.ts
41
63
  import { lazySchema as lazySchema2, zodSchema as zodSchema2 } from "@ai-sdk/provider-utils";
42
64
  import { z as z2 } from "zod/v4";
@@ -2244,7 +2266,7 @@ var AnthropicMessagesLanguageModel = class {
2244
2266
  toolNameMapping
2245
2267
  });
2246
2268
  const isThinking = ((_c = anthropicOptions == null ? void 0 : anthropicOptions.thinking) == null ? void 0 : _c.type) === "enabled";
2247
- const thinkingBudget = (_d = anthropicOptions == null ? void 0 : anthropicOptions.thinking) == null ? void 0 : _d.budgetTokens;
2269
+ let thinkingBudget = (_d = anthropicOptions == null ? void 0 : anthropicOptions.thinking) == null ? void 0 : _d.budgetTokens;
2248
2270
  const maxTokens = maxOutputTokens != null ? maxOutputTokens : maxOutputTokensForModel;
2249
2271
  const baseArgs = {
2250
2272
  // model id:
@@ -2336,9 +2358,16 @@ var AnthropicMessagesLanguageModel = class {
2336
2358
  };
2337
2359
  if (isThinking) {
2338
2360
  if (thinkingBudget == null) {
2339
- throw new UnsupportedFunctionalityError3({
2340
- functionality: "thinking requires a budget"
2361
+ warnings.push({
2362
+ type: "compatibility",
2363
+ feature: "extended thinking",
2364
+ details: "thinking budget is required when thinking is enabled. using default budget of 1024 tokens."
2341
2365
  });
2366
+ baseArgs.thinking = {
2367
+ type: "enabled",
2368
+ budget_tokens: 1024
2369
+ };
2370
+ thinkingBudget = 1024;
2342
2371
  }
2343
2372
  if (baseArgs.temperature != null) {
2344
2373
  baseArgs.temperature = void 0;
@@ -2364,7 +2393,7 @@ var AnthropicMessagesLanguageModel = class {
2364
2393
  details: "topP is not supported when thinking is enabled"
2365
2394
  });
2366
2395
  }
2367
- baseArgs.max_tokens = maxTokens + thinkingBudget;
2396
+ baseArgs.max_tokens = maxTokens + (thinkingBudget != null ? thinkingBudget : 0);
2368
2397
  }
2369
2398
  if (isKnownModel && baseArgs.max_tokens > maxOutputTokensForModel) {
2370
2399
  if (maxOutputTokens != null) {
@@ -2493,7 +2522,7 @@ var AnthropicMessagesLanguageModel = class {
2493
2522
  });
2494
2523
  }
2495
2524
  async doGenerate(options) {
2496
- var _a, _b, _c, _d, _e, _f, _g, _h, _i;
2525
+ var _a, _b, _c, _d, _e, _f, _g, _h;
2497
2526
  const { args, warnings, betas, usesJsonResponseTool, toolNameMapping } = await this.getArgs({
2498
2527
  ...options,
2499
2528
  stream: false,
@@ -2792,16 +2821,11 @@ var AnthropicMessagesLanguageModel = class {
2792
2821
  finishReason: response.stop_reason,
2793
2822
  isJsonResponseFromTool
2794
2823
  }),
2795
- usage: {
2796
- inputTokens: response.usage.input_tokens,
2797
- outputTokens: response.usage.output_tokens,
2798
- totalTokens: response.usage.input_tokens + response.usage.output_tokens,
2799
- cachedInputTokens: (_b = response.usage.cache_read_input_tokens) != null ? _b : void 0
2800
- },
2824
+ usage: convertAnthropicMessagesUsage(response.usage),
2801
2825
  request: { body: args },
2802
2826
  response: {
2803
- id: (_c = response.id) != null ? _c : void 0,
2804
- modelId: (_d = response.model) != null ? _d : void 0,
2827
+ id: (_b = response.id) != null ? _b : void 0,
2828
+ modelId: (_c = response.model) != null ? _c : void 0,
2805
2829
  headers: responseHeaders,
2806
2830
  body: rawResponse
2807
2831
  },
@@ -2809,20 +2833,20 @@ var AnthropicMessagesLanguageModel = class {
2809
2833
  providerMetadata: {
2810
2834
  anthropic: {
2811
2835
  usage: response.usage,
2812
- cacheCreationInputTokens: (_e = response.usage.cache_creation_input_tokens) != null ? _e : null,
2813
- stopSequence: (_f = response.stop_sequence) != null ? _f : null,
2836
+ cacheCreationInputTokens: (_d = response.usage.cache_creation_input_tokens) != null ? _d : null,
2837
+ stopSequence: (_e = response.stop_sequence) != null ? _e : null,
2814
2838
  container: response.container ? {
2815
2839
  expiresAt: response.container.expires_at,
2816
2840
  id: response.container.id,
2817
- skills: (_h = (_g = response.container.skills) == null ? void 0 : _g.map((skill) => ({
2841
+ skills: (_g = (_f = response.container.skills) == null ? void 0 : _f.map((skill) => ({
2818
2842
  type: skill.type,
2819
2843
  skillId: skill.skill_id,
2820
2844
  version: skill.version
2821
- }))) != null ? _h : null
2845
+ }))) != null ? _g : null
2822
2846
  } : null,
2823
- contextManagement: (_i = mapAnthropicResponseContextManagement(
2847
+ contextManagement: (_h = mapAnthropicResponseContextManagement(
2824
2848
  response.context_management
2825
- )) != null ? _i : null
2849
+ )) != null ? _h : null
2826
2850
  }
2827
2851
  }
2828
2852
  };
@@ -2855,9 +2879,10 @@ var AnthropicMessagesLanguageModel = class {
2855
2879
  });
2856
2880
  let finishReason = "unknown";
2857
2881
  const usage = {
2858
- inputTokens: void 0,
2859
- outputTokens: void 0,
2860
- totalTokens: void 0
2882
+ input_tokens: 0,
2883
+ output_tokens: 0,
2884
+ cache_creation_input_tokens: 0,
2885
+ cache_read_input_tokens: 0
2861
2886
  };
2862
2887
  const contentBlocks = {};
2863
2888
  const mcpToolCalls = {};
@@ -2875,7 +2900,7 @@ var AnthropicMessagesLanguageModel = class {
2875
2900
  controller.enqueue({ type: "stream-start", warnings });
2876
2901
  },
2877
2902
  transform(chunk, controller) {
2878
- var _a2, _b2, _c, _d, _e, _f, _g, _h, _i, _j;
2903
+ var _a2, _b2, _c, _d, _e, _f, _g, _h, _i;
2879
2904
  if (options.includeRawChunks) {
2880
2905
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
2881
2906
  }
@@ -3314,35 +3339,35 @@ var AnthropicMessagesLanguageModel = class {
3314
3339
  }
3315
3340
  }
3316
3341
  case "message_start": {
3317
- usage.inputTokens = value.message.usage.input_tokens;
3318
- usage.cachedInputTokens = (_b2 = value.message.usage.cache_read_input_tokens) != null ? _b2 : void 0;
3342
+ usage.input_tokens = value.message.usage.input_tokens;
3343
+ usage.cache_read_input_tokens = (_b2 = value.message.usage.cache_read_input_tokens) != null ? _b2 : 0;
3344
+ usage.cache_creation_input_tokens = (_c = value.message.usage.cache_creation_input_tokens) != null ? _c : 0;
3319
3345
  rawUsage = {
3320
3346
  ...value.message.usage
3321
3347
  };
3322
- cacheCreationInputTokens = (_c = value.message.usage.cache_creation_input_tokens) != null ? _c : null;
3348
+ cacheCreationInputTokens = (_d = value.message.usage.cache_creation_input_tokens) != null ? _d : null;
3323
3349
  controller.enqueue({
3324
3350
  type: "response-metadata",
3325
- id: (_d = value.message.id) != null ? _d : void 0,
3326
- modelId: (_e = value.message.model) != null ? _e : void 0
3351
+ id: (_e = value.message.id) != null ? _e : void 0,
3352
+ modelId: (_f = value.message.model) != null ? _f : void 0
3327
3353
  });
3328
3354
  return;
3329
3355
  }
3330
3356
  case "message_delta": {
3331
- usage.outputTokens = value.usage.output_tokens;
3332
- usage.totalTokens = ((_f = usage.inputTokens) != null ? _f : 0) + ((_g = value.usage.output_tokens) != null ? _g : 0);
3357
+ usage.output_tokens = value.usage.output_tokens;
3333
3358
  finishReason = mapAnthropicStopReason({
3334
3359
  finishReason: value.delta.stop_reason,
3335
3360
  isJsonResponseFromTool
3336
3361
  });
3337
- stopSequence = (_h = value.delta.stop_sequence) != null ? _h : null;
3362
+ stopSequence = (_g = value.delta.stop_sequence) != null ? _g : null;
3338
3363
  container = value.delta.container != null ? {
3339
3364
  expiresAt: value.delta.container.expires_at,
3340
3365
  id: value.delta.container.id,
3341
- skills: (_j = (_i = value.delta.container.skills) == null ? void 0 : _i.map((skill) => ({
3366
+ skills: (_i = (_h = value.delta.container.skills) == null ? void 0 : _h.map((skill) => ({
3342
3367
  type: skill.type,
3343
3368
  skillId: skill.skill_id,
3344
3369
  version: skill.version
3345
- }))) != null ? _j : null
3370
+ }))) != null ? _i : null
3346
3371
  } : null;
3347
3372
  if (value.delta.context_management) {
3348
3373
  contextManagement = mapAnthropicResponseContextManagement(
@@ -3359,7 +3384,7 @@ var AnthropicMessagesLanguageModel = class {
3359
3384
  controller.enqueue({
3360
3385
  type: "finish",
3361
3386
  finishReason,
3362
- usage,
3387
+ usage: convertAnthropicMessagesUsage(usage),
3363
3388
  providerMetadata: {
3364
3389
  anthropic: {
3365
3390
  usage: rawUsage != null ? rawUsage : null,