@ai-sdk/anthropic 4.0.0-beta.4 → 4.0.0-beta.41

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/CHANGELOG.md +305 -4
  2. package/README.md +2 -0
  3. package/dist/index.d.ts +83 -58
  4. package/dist/index.js +2043 -1356
  5. package/dist/index.js.map +1 -1
  6. package/dist/internal/index.d.ts +85 -58
  7. package/dist/internal/index.js +1804 -1342
  8. package/dist/internal/index.js.map +1 -1
  9. package/docs/05-anthropic.mdx +116 -13
  10. package/package.json +14 -15
  11. package/src/{anthropic-messages-api.ts → anthropic-api.ts} +14 -6
  12. package/src/anthropic-error.ts +1 -1
  13. package/src/anthropic-files.ts +95 -0
  14. package/src/{anthropic-messages-language-model.ts → anthropic-language-model.ts} +263 -78
  15. package/src/anthropic-message-metadata.ts +1 -4
  16. package/src/{anthropic-messages-options.ts → anthropic-options.ts} +68 -11
  17. package/src/anthropic-prepare-tools.ts +14 -7
  18. package/src/anthropic-provider.ts +42 -13
  19. package/src/{convert-anthropic-messages-usage.ts → convert-anthropic-usage.ts} +4 -4
  20. package/src/{convert-to-anthropic-messages-prompt.ts → convert-to-anthropic-prompt.ts} +190 -149
  21. package/src/forward-anthropic-container-id-from-last-step.ts +2 -2
  22. package/src/get-cache-control.ts +5 -2
  23. package/src/index.ts +1 -1
  24. package/src/internal/index.ts +11 -2
  25. package/src/map-anthropic-stop-reason.ts +1 -1
  26. package/src/sanitize-json-schema.ts +203 -0
  27. package/src/skills/anthropic-skills-api.ts +44 -0
  28. package/src/skills/anthropic-skills.ts +132 -0
  29. package/src/tool/bash_20241022.ts +2 -2
  30. package/src/tool/bash_20250124.ts +2 -2
  31. package/src/tool/code-execution_20250522.ts +2 -2
  32. package/src/tool/code-execution_20250825.ts +2 -2
  33. package/src/tool/code-execution_20260120.ts +2 -2
  34. package/src/tool/computer_20241022.ts +2 -2
  35. package/src/tool/computer_20250124.ts +2 -2
  36. package/src/tool/computer_20251124.ts +2 -2
  37. package/src/tool/memory_20250818.ts +2 -2
  38. package/src/tool/text-editor_20241022.ts +2 -2
  39. package/src/tool/text-editor_20250124.ts +2 -2
  40. package/src/tool/text-editor_20250429.ts +2 -2
  41. package/src/tool/text-editor_20250728.ts +6 -3
  42. package/src/tool/tool-search-bm25_20251119.ts +2 -2
  43. package/src/tool/tool-search-regex_20251119.ts +2 -2
  44. package/src/tool/web-fetch-20250910.ts +2 -2
  45. package/src/tool/web-fetch-20260209.ts +2 -2
  46. package/src/tool/web-search_20250305.ts +2 -2
  47. package/src/tool/web-search_20260209.ts +2 -2
  48. package/dist/index.d.mts +0 -1090
  49. package/dist/index.mjs +0 -5244
  50. package/dist/index.mjs.map +0 -1
  51. package/dist/internal/index.d.mts +0 -969
  52. package/dist/internal/index.mjs +0 -5136
  53. package/dist/internal/index.mjs.map +0 -1
@@ -122,14 +122,22 @@ The following optional provider options are available for Anthropic models:
122
122
  If you are experiencing issues with the model handling requests involving
123
123
  reasoning content, you can set this to `false` to omit them from the request.
124
124
 
125
- - `effort` _"high" | "medium" | "low"_
125
+ - `effort` _"low" | "medium" | "high" | "xhigh" | "max"_
126
126
 
127
127
  Optional. See [Effort section](#effort) for more details.
128
128
 
129
+ - `taskBudget` _object_
130
+
131
+ Optional. See [Task Budgets section](#task-budgets) for more details.
132
+
129
133
  - `speed` _"fast" | "standard"_
130
134
 
131
135
  Optional. See [Fast Mode section](#fast-mode) for more details.
132
136
 
137
+ - `inferenceGeo` _"us" | "global"_
138
+
139
+ Optional. See [Data Residency section](#data-residency) for more details.
140
+
133
141
  - `thinking` _object_
134
142
 
135
143
  Optional. See [Reasoning section](#reasoning) for more details.
@@ -146,6 +154,12 @@ The following optional provider options are available for Anthropic models:
146
154
  - `"jsonTool"`: Use a special `"json"` tool to specify the structured output format.
147
155
  - `"auto"`: Use `"outputFormat"` when supported, otherwise fall back to `"jsonTool"` (default).
148
156
 
157
+ - `metadata` _object_
158
+
159
+ Optional. Metadata to include with the request. See the [Anthropic API documentation](https://platform.claude.com/docs/en/api/messages/create) for details.
160
+
161
+ - `userId` _string_ - An external identifier for the end-user. Should be a UUID, hash, or other opaque identifier. Must not contain PII.
162
+
149
163
  ### Structured Outputs and Tool Input Streaming
150
164
 
151
165
  Tool call streaming is enabled by default. You can opt out by setting the
@@ -177,7 +191,7 @@ const result = streamText({
177
191
 
178
192
  ### Effort
179
193
 
180
- Anthropic introduced an `effort` option with `claude-opus-4-5` that affects thinking, text responses, and function calls. Effort defaults to `high` and you can set it to `medium` or `low` to save tokens and to lower time-to-last-token latency (TTLT).
194
+ Anthropic introduced an `effort` option with `claude-opus-4-5` that affects thinking, text responses, and function calls. Effort defaults to `high` and you can set it to `medium` or `low` to save tokens and to lower time-to-last-token latency (TTLT). `claude-opus-4-7` additionally supports `xhigh` for maximum reasoning effort.
181
195
 
182
196
  ```ts highlight="8-10"
183
197
  import { anthropic, AnthropicLanguageModelOptions } from '@ai-sdk/anthropic';
@@ -218,6 +232,67 @@ const { text } = await generateText({
218
232
 
219
233
  The `speed` option accepts `'fast'` or `'standard'` (default behavior).
220
234
 
235
+ ### Task Budgets
236
+
237
+ `claude-opus-4-7` supports a `taskBudget` option that informs the model of the total token budget available for an agentic turn. The model uses this information to prioritize work, plan ahead, and wind down gracefully as the budget is consumed.
238
+
239
+ Task budgets are advisory — they do not enforce a hard token limit. The model will attempt to stay within budget, but actual usage may vary.
240
+
241
+ ```ts highlight="8-13"
242
+ import { anthropic, AnthropicLanguageModelOptions } from '@ai-sdk/anthropic';
243
+ import { generateText } from 'ai';
244
+
245
+ const { text } = await generateText({
246
+ model: anthropic('claude-opus-4-7'),
247
+ prompt: 'Research the pros and cons of Rust vs Go for building CLI tools.',
248
+ providerOptions: {
249
+ anthropic: {
250
+ taskBudget: {
251
+ type: 'tokens',
252
+ total: 400000,
253
+ },
254
+ } satisfies AnthropicLanguageModelOptions,
255
+ },
256
+ });
257
+ ```
258
+
259
+ For long-running agents that compact and restart context, you can carry the remaining budget forward using the `remaining` field:
260
+
261
+ ```ts
262
+ taskBudget: {
263
+ type: 'tokens',
264
+ total: 400000,
265
+ remaining: 215000, // budget left after prior compacted-away contexts
266
+ }
267
+ ```
268
+
269
+ The `taskBudget` object accepts:
270
+
271
+ - `type` _"tokens"_ - Budget type. Currently only `"tokens"` is supported.
272
+ - `total` _number_ - Total task budget for the agentic turn. Minimum 20,000.
273
+ - `remaining` _number_ - Budget left after prior compacted-away contexts. Must be between 0 and `total`. Defaults to `total` if omitted.
274
+
275
+ ### Data Residency
276
+
277
+ Anthropic supports an [`inferenceGeo` option](https://platform.claude.com/docs/en/build-with-claude/data-residency) that controls where model inference runs for a request.
278
+
279
+ ```ts highlight="8-10"
280
+ import { anthropic, AnthropicLanguageModelOptions } from '@ai-sdk/anthropic';
281
+ import { generateText } from 'ai';
282
+
283
+ const { text } = await generateText({
284
+ model: anthropic('claude-opus-4-6'),
285
+ prompt: 'Summarize the key points of this document.',
286
+ providerOptions: {
287
+ anthropic: {
288
+ inferenceGeo: 'us',
289
+ } satisfies AnthropicLanguageModelOptions,
290
+ },
291
+ });
292
+ ```
293
+
294
+ The `inferenceGeo` option accepts `'us'` (US-only infrastructure) or `'global'` (default, any available geography).
295
+
221
296
  ### Reasoning
222
297
 
223
298
  Anthropic models support extended thinking, where Claude shows its reasoning process before providing a final answer.
@@ -261,6 +336,31 @@ const { text } = await generateText({
261
336
  });
262
337
  ```
263
338
 
339
+ ##### Thinking Display (Opus 4.7+)
340
+
341
+ Starting with `claude-opus-4-7`, thinking content is omitted from the response by default — thinking blocks are present in the stream but their text is empty. To receive reasoning output, set `display: 'summarized'`:
342
+
343
+ ```ts highlight="5"
344
+ const { text, reasoningText } = await generateText({
345
+ model: anthropic('claude-opus-4-7'),
346
+ providerOptions: {
347
+ anthropic: {
348
+ thinking: { type: 'adaptive', display: 'summarized' },
349
+ } satisfies AnthropicLanguageModelOptions,
350
+ },
351
+ prompt: 'How many people will live in the world in 2040?',
352
+ });
353
+
354
+ console.log(reasoningText); // reasoning text (empty without display: 'summarized')
355
+ console.log(text);
356
+ ```
357
+
358
+ <Note>
359
+ If you stream reasoning to users with `claude-opus-4-7`, the default `"omitted"` display will
360
+ cause a long pause before output begins. Set `display: "summarized"` to restore visible
361
+ progress during thinking.
362
+ </Note>
363
+
264
364
  #### Budget-Based Thinking
265
365
 
266
366
  For earlier models (`claude-opus-4-20250514`, `claude-sonnet-4-20250514`, `claude-sonnet-4-5-20250929`),
@@ -484,13 +584,12 @@ For more details, see [Anthropic's Context Management documentation](https://doc
484
584
  In the messages and message parts, you can use the `providerOptions` property to set cache control breakpoints.
485
585
  You need to set the `anthropic` property in the `providerOptions` object to `{ cacheControl: { type: 'ephemeral' } }` to set a cache control breakpoint.
486
586
 
487
- The cache creation input tokens are then returned in the `providerMetadata` object
488
- for `generateText`, again under the `anthropic` property.
489
- When you use `streamText`, the response contains a promise
490
- that resolves to the metadata. Alternatively you can receive it in the
491
- `onFinish` callback.
587
+ Cache read and cache write (creation) token counts are returned on the standard
588
+ `usage` object for both `generateText` and `streamText`. You can access them at
589
+ `result.usage.inputTokenDetails.cacheReadTokens` and
590
+ `result.usage.inputTokenDetails.cacheWriteTokens`.
492
591
 
493
- ```ts highlight="8,18-20,29-30"
592
+ ```ts highlight="8,18-20,29-32"
494
593
  import { anthropic } from '@ai-sdk/anthropic';
495
594
  import { generateText } from 'ai';
496
595
 
@@ -517,8 +616,11 @@ const result = await generateText({
517
616
  });
518
617
 
519
618
  console.log(result.text);
520
- console.log(result.providerMetadata?.anthropic);
521
- // e.g. { cacheCreationInputTokens: 2118 }
619
+ console.log('Cache read tokens:', result.usage.inputTokenDetails.cacheReadTokens);
620
+ console.log(
621
+ 'Cache write tokens:',
622
+ result.usage.inputTokenDetails.cacheWriteTokens,
623
+ );
522
624
  ```
523
625
 
524
626
  You can also use cache control on system messages by providing multiple system messages at the head of your messages array:
@@ -741,7 +843,7 @@ const computerTool = anthropic.tools.computer_20251124({
741
843
  toModelOutput({ output }) {
742
844
  return typeof output === 'string'
743
845
  ? [{ type: 'text', text: output }]
744
- : [{ type: 'image', data: output.data, mediaType: 'image/png' }];
846
+ : [{ type: 'file-data', data: output.data, mediaType: 'image/png' }];
745
847
  },
746
848
  });
747
849
  ```
@@ -1155,12 +1257,12 @@ import {
1155
1257
  anthropic,
1156
1258
  forwardAnthropicContainerIdFromLastStep,
1157
1259
  } from '@ai-sdk/anthropic';
1158
- import { generateText, tool, stepCountIs } from 'ai';
1260
+ import { generateText, tool, isStepCount } from 'ai';
1159
1261
  import { z } from 'zod';
1160
1262
 
1161
1263
  const result = await generateText({
1162
1264
  model: anthropic('claude-sonnet-4-5'),
1163
- stopWhen: stepCountIs(10),
1265
+ stopWhen: isStepCount(10),
1164
1266
  prompt:
1165
1267
  'Get the weather for Tokyo, Sydney, and London, then calculate the average temperature.',
1166
1268
  tools: {
@@ -1345,6 +1447,7 @@ and the `mediaType` should be set to `'application/pdf'`.
1345
1447
 
1346
1448
  | Model | Image Input | Object Generation | Tool Usage | Computer Use | Web Search | Tool Search | Compaction |
1347
1449
  | ------------------- | ------------------- | ------------------- | ------------------- | ------------------- | ------------------- | ------------------- | ------------------- |
1450
+ | `claude-opus-4-7` | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> |
1348
1451
  | `claude-opus-4-6` | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> |
1349
1452
  | `claude-sonnet-4-6` | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> | |
1350
1453
  | `claude-opus-4-5` | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> | |
package/package.json CHANGED
@@ -1,10 +1,10 @@
1
1
  {
2
2
  "name": "@ai-sdk/anthropic",
3
- "version": "4.0.0-beta.4",
3
+ "version": "4.0.0-beta.41",
4
+ "type": "module",
4
5
  "license": "Apache-2.0",
5
6
  "sideEffects": false,
6
7
  "main": "./dist/index.js",
7
- "module": "./dist/index.mjs",
8
8
  "types": "./dist/index.d.ts",
9
9
  "files": [
10
10
  "dist/**/*",
@@ -25,27 +25,26 @@
25
25
  "./package.json": "./package.json",
26
26
  ".": {
27
27
  "types": "./dist/index.d.ts",
28
- "import": "./dist/index.mjs",
29
- "require": "./dist/index.js"
28
+ "import": "./dist/index.js",
29
+ "default": "./dist/index.js"
30
30
  },
31
31
  "./internal": {
32
32
  "types": "./dist/internal/index.d.ts",
33
- "import": "./dist/internal/index.mjs",
34
- "module": "./dist/internal/index.mjs",
35
- "require": "./dist/internal/index.js"
33
+ "import": "./dist/internal/index.js",
34
+ "default": "./dist/internal/index.js"
36
35
  }
37
36
  },
38
37
  "dependencies": {
39
- "@ai-sdk/provider": "4.0.0-beta.0",
40
- "@ai-sdk/provider-utils": "5.0.0-beta.1"
38
+ "@ai-sdk/provider": "4.0.0-beta.14",
39
+ "@ai-sdk/provider-utils": "5.0.0-beta.29"
41
40
  },
42
41
  "devDependencies": {
43
42
  "@types/node": "20.17.24",
44
43
  "tsup": "^8",
45
44
  "typescript": "5.8.3",
46
45
  "zod": "3.25.76",
47
- "@vercel/ai-tsconfig": "0.0.0",
48
- "@ai-sdk/test-server": "2.0.0-beta.0"
46
+ "@ai-sdk/test-server": "2.0.0-beta.3",
47
+ "@vercel/ai-tsconfig": "0.0.0"
49
48
  },
50
49
  "peerDependencies": {
51
50
  "zod": "^3.25.76 || ^4.1.8"
@@ -54,12 +53,14 @@
54
53
  "node": ">=18"
55
54
  },
56
55
  "publishConfig": {
57
- "access": "public"
56
+ "access": "public",
57
+ "provenance": true
58
58
  },
59
59
  "homepage": "https://ai-sdk.dev/docs",
60
60
  "repository": {
61
61
  "type": "git",
62
- "url": "git+https://github.com/vercel/ai.git"
62
+ "url": "https://github.com/vercel/ai",
63
+ "directory": "packages/anthropic"
63
64
  },
64
65
  "bugs": {
65
66
  "url": "https://github.com/vercel/ai/issues"
@@ -71,9 +72,7 @@
71
72
  "build": "pnpm clean && tsup --tsconfig tsconfig.build.json",
72
73
  "build:watch": "pnpm clean && tsup --watch --tsconfig tsconfig.build.json",
73
74
  "clean": "del-cli dist docs *.tsbuildinfo",
74
- "lint": "eslint \"./**/*.ts*\"",
75
75
  "type-check": "tsc --build",
76
- "prettier-check": "prettier --check \"./**/*.ts*\"",
77
76
  "test": "pnpm test:node && pnpm test:edge",
78
77
  "test:update": "pnpm test:node -u",
79
78
  "test:watch": "vitest --config vitest.node.config.js",
@@ -1,8 +1,12 @@
1
- import { JSONSchema7 } from '@ai-sdk/provider';
2
- import { InferSchema, lazySchema, zodSchema } from '@ai-sdk/provider-utils';
1
+ import type { JSONSchema7 } from '@ai-sdk/provider';
2
+ import {
3
+ lazySchema,
4
+ zodSchema,
5
+ type InferSchema,
6
+ } from '@ai-sdk/provider-utils';
3
7
  import { z } from 'zod/v4';
4
8
 
5
- export type AnthropicMessagesPrompt = {
9
+ export type AnthropicPrompt = {
6
10
  system: Array<AnthropicTextContent> | undefined;
7
11
  messages: AnthropicMessage[];
8
12
  };
@@ -87,6 +91,10 @@ type AnthropicContentSource =
87
91
  type: 'text';
88
92
  media_type: 'text/plain';
89
93
  data: string;
94
+ }
95
+ | {
96
+ type: 'file';
97
+ file_id: string;
90
98
  };
91
99
 
92
100
  export interface AnthropicImageContent {
@@ -549,7 +557,7 @@ export type AnthropicResponseContextManagement = {
549
557
 
550
558
  // limited version of the schema, focussed on what is needed for the implementation
551
559
  // this approach limits breakages when the API changes and increases efficiency
552
- export const anthropicMessagesResponseSchema = lazySchema(() =>
560
+ export const anthropicResponseSchema = lazySchema(() =>
553
561
  zodSchema(
554
562
  z.object({
555
563
  type: z.literal('message'),
@@ -889,7 +897,7 @@ export const anthropicMessagesResponseSchema = lazySchema(() =>
889
897
 
890
898
  // limited version of the schema, focused on what is needed for the implementation
891
899
  // this approach limits breakages when the API changes and increases efficiency
892
- export const anthropicMessagesChunkSchema = lazySchema(() =>
900
+ export const anthropicChunkSchema = lazySchema(() =>
893
901
  zodSchema(
894
902
  z.discriminatedUnion('type', [
895
903
  z.object({
@@ -1338,7 +1346,7 @@ export type AnthropicReasoningMetadata = InferSchema<
1338
1346
  >;
1339
1347
 
1340
1348
  export type Citation = NonNullable<
1341
- (InferSchema<typeof anthropicMessagesResponseSchema>['content'][number] & {
1349
+ (InferSchema<typeof anthropicResponseSchema>['content'][number] & {
1342
1350
  type: 'text';
1343
1351
  })['citations']
1344
1352
  >[number];
@@ -1,8 +1,8 @@
1
1
  import {
2
2
  createJsonErrorResponseHandler,
3
- InferSchema,
4
3
  lazySchema,
5
4
  zodSchema,
5
+ type InferSchema,
6
6
  } from '@ai-sdk/provider-utils';
7
7
  import { z } from 'zod/v4';
8
8
 
@@ -0,0 +1,95 @@
1
+ import type {
2
+ FilesV4,
3
+ FilesV4UploadFileCallOptions,
4
+ FilesV4UploadFileResult,
5
+ } from '@ai-sdk/provider';
6
+ import {
7
+ combineHeaders,
8
+ convertInlineFileDataToUint8Array,
9
+ createJsonResponseHandler,
10
+ lazySchema,
11
+ postFormDataToApi,
12
+ zodSchema,
13
+ type FetchFunction,
14
+ } from '@ai-sdk/provider-utils';
15
+ import { z } from 'zod/v4';
16
+ import { anthropicFailedResponseHandler } from './anthropic-error';
17
+
18
+ const anthropicUploadFileResponseSchema = lazySchema(() =>
19
+ zodSchema(
20
+ z.object({
21
+ id: z.string(),
22
+ type: z.literal('file'),
23
+ filename: z.string(),
24
+ mime_type: z.string(),
25
+ size_bytes: z.number(),
26
+ created_at: z.string(),
27
+ downloadable: z.boolean().nullish(),
28
+ }),
29
+ ),
30
+ );
31
+
32
+ interface AnthropicFilesConfig {
33
+ provider: string;
34
+ baseURL: string;
35
+ headers: () => Record<string, string | undefined>;
36
+ fetch?: FetchFunction;
37
+ }
38
+
39
+ export class AnthropicFiles implements FilesV4 {
40
+ readonly specificationVersion = 'v4';
41
+
42
+ get provider(): string {
43
+ return this.config.provider;
44
+ }
45
+
46
+ constructor(private readonly config: AnthropicFilesConfig) {}
47
+
48
+ async uploadFile({
49
+ data,
50
+ mediaType,
51
+ filename,
52
+ }: FilesV4UploadFileCallOptions): Promise<FilesV4UploadFileResult> {
53
+ const fileBytes = convertInlineFileDataToUint8Array(data);
54
+
55
+ const blob = new Blob([fileBytes], { type: mediaType });
56
+
57
+ const formData = new FormData();
58
+ if (filename != null) {
59
+ formData.append('file', blob, filename);
60
+ } else {
61
+ formData.append('file', blob);
62
+ }
63
+
64
+ const { value: response } = await postFormDataToApi({
65
+ url: `${this.config.baseURL}/files`,
66
+ headers: combineHeaders(this.config.headers(), {
67
+ 'anthropic-beta': 'files-api-2025-04-14',
68
+ }),
69
+ formData,
70
+ failedResponseHandler: anthropicFailedResponseHandler,
71
+ successfulResponseHandler: createJsonResponseHandler(
72
+ anthropicUploadFileResponseSchema,
73
+ ),
74
+ fetch: this.config.fetch,
75
+ });
76
+
77
+ return {
78
+ warnings: [],
79
+ providerReference: { anthropic: response.id },
80
+ mediaType: response.mime_type ?? mediaType,
81
+ filename: response.filename ?? filename,
82
+ providerMetadata: {
83
+ anthropic: {
84
+ filename: response.filename,
85
+ mimeType: response.mime_type,
86
+ sizeBytes: response.size_bytes,
87
+ createdAt: response.created_at,
88
+ ...(response.downloadable != null
89
+ ? { downloadable: response.downloadable }
90
+ : {}),
91
+ },
92
+ },
93
+ };
94
+ }
95
+ }