@fairyhunter13/ai-anthropic 3.0.58-fork.2 → 3.0.58-fork.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -146,6 +146,12 @@ The following optional provider options are available for Anthropic models:
146
146
  - `"jsonTool"`: Use a special `"json"` tool to specify the structured output format.
147
147
  - `"auto"`: Use `"outputFormat"` when supported, otherwise fall back to `"jsonTool"` (default).
148
148
 
149
+ - `metadata` _object_
150
+
151
+ Optional. Metadata to include with the request. See the [Anthropic API documentation](https://platform.claude.com/docs/en/api/messages/create) for details.
152
+
153
+ - `userId` _string_ - An external identifier for the end-user. Should be a UUID, hash, or other opaque identifier. Must not contain PII.
154
+
149
155
  ### Structured Outputs and Tool Input Streaming
150
156
 
151
157
  Tool call streaming is enabled by default. You can opt out by setting the
@@ -220,17 +226,58 @@ The `speed` option accepts `'fast'` or `'standard'` (default behavior).
220
226
 
221
227
  ### Reasoning
222
228
 
223
- Anthropic has reasoning support for `claude-opus-4-20250514`, `claude-sonnet-4-20250514`, and `claude-sonnet-4-5-20250929` models.
229
+ Anthropic models support extended thinking, where Claude shows its reasoning process before providing a final answer.
224
230
 
225
- You can enable it using the `thinking` provider option
226
- and specifying a thinking budget in tokens.
231
+ #### Adaptive Thinking
232
+
233
+ For newer models (`claude-sonnet-4-6`, `claude-opus-4-6`, and later), use adaptive thinking.
234
+ Claude automatically determines how much reasoning to use based on the complexity of the prompt.
227
235
 
228
236
  ```ts highlight="4,8-10"
229
237
  import { anthropic, AnthropicLanguageModelOptions } from '@ai-sdk/anthropic';
230
238
  import { generateText } from 'ai';
231
239
 
232
240
  const { text, reasoningText, reasoning } = await generateText({
233
- model: anthropic('claude-opus-4-20250514'),
241
+ model: anthropic('claude-opus-4-6'),
242
+ prompt: 'How many people will live in the world in 2040?',
243
+ providerOptions: {
244
+ anthropic: {
245
+ thinking: { type: 'adaptive' },
246
+ } satisfies AnthropicLanguageModelOptions,
247
+ },
248
+ });
249
+
250
+ console.log(reasoningText); // reasoning text
251
+ console.log(reasoning); // reasoning details including redacted reasoning
252
+ console.log(text); // text response
253
+ ```
254
+
255
+ You can combine adaptive thinking with the `effort` option to control how much reasoning Claude uses:
256
+
257
+ ```ts highlight="6-8"
258
+ const { text } = await generateText({
259
+ model: anthropic('claude-opus-4-6'),
260
+ prompt: 'Invent a new holiday and describe its traditions.',
261
+ providerOptions: {
262
+ anthropic: {
263
+ thinking: { type: 'adaptive' },
264
+ effort: 'max', // 'low' | 'medium' | 'high' | 'max'
265
+ } satisfies AnthropicLanguageModelOptions,
266
+ },
267
+ });
268
+ ```
269
+
270
+ #### Budget-Based Thinking
271
+
272
+ For earlier models (`claude-opus-4-20250514`, `claude-sonnet-4-20250514`, `claude-sonnet-4-5-20250929`),
273
+ use `type: 'enabled'` with an explicit token budget:
274
+
275
+ ```ts highlight="4,8-10"
276
+ import { anthropic, AnthropicLanguageModelOptions } from '@ai-sdk/anthropic';
277
+ import { generateText } from 'ai';
278
+
279
+ const { text, reasoningText, reasoning } = await generateText({
280
+ model: anthropic('claude-sonnet-4-5-20250929'),
234
281
  prompt: 'How many people will live in the world in 2040?',
235
282
  providerOptions: {
236
283
  anthropic: {
@@ -1114,12 +1161,12 @@ import {
1114
1161
  anthropic,
1115
1162
  forwardAnthropicContainerIdFromLastStep,
1116
1163
  } from '@ai-sdk/anthropic';
1117
- import { generateText, tool, stepCountIs } from 'ai';
1164
+ import { generateText, tool, isStepCount } from 'ai';
1118
1165
  import { z } from 'zod';
1119
1166
 
1120
1167
  const result = await generateText({
1121
1168
  model: anthropic('claude-sonnet-4-5'),
1122
- stopWhen: stepCountIs(10),
1169
+ stopWhen: isStepCount(10),
1123
1170
  prompt:
1124
1171
  'Get the weather for Tokyo, Sydney, and London, then calculate the average temperature.',
1125
1172
  tools: {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@fairyhunter13/ai-anthropic",
3
- "version": "3.0.58-fork.2",
3
+ "version": "3.0.58-fork.21",
4
4
  "license": "Apache-2.0",
5
5
  "sideEffects": false,
6
6
  "main": "./dist/index.js",
@@ -27,9 +27,7 @@
27
27
  "clean": "del-cli dist docs *.tsbuildinfo",
28
28
  "prepack": "mkdir -p docs && cp ../../content/providers/01-ai-sdk-providers/05-anthropic.mdx ./docs/",
29
29
  "postpack": "del-cli docs",
30
- "lint": "eslint \"./**/*.ts*\"",
31
30
  "type-check": "tsc --build",
32
- "prettier-check": "prettier --check \"./**/*.ts*\"",
33
31
  "test": "pnpm test:node && pnpm test:edge",
34
32
  "test:update": "pnpm test:node -u",
35
33
  "test:watch": "vitest --config vitest.node.config.js",
@@ -51,11 +49,13 @@
51
49
  }
52
50
  },
53
51
  "dependencies": {
54
- "@ai-sdk/provider": "3.0.8",
55
- "@ai-sdk/provider-utils": "4.0.19"
52
+ "@ai-sdk/provider": "workspace:*",
53
+ "@ai-sdk/provider-utils": "workspace:*"
56
54
  },
57
55
  "devDependencies": {
56
+ "@ai-sdk/test-server": "workspace:*",
58
57
  "@types/node": "20.17.24",
58
+ "@vercel/ai-tsconfig": "workspace:*",
59
59
  "tsup": "^8",
60
60
  "typescript": "5.8.3",
61
61
  "zod": "3.25.76"
@@ -72,10 +72,10 @@
72
72
  "homepage": "https://ai-sdk.dev/docs",
73
73
  "repository": {
74
74
  "type": "git",
75
- "url": "git+https://github.com/fairyhunter13/ai.git"
75
+ "url": "git+https://github.com/vercel/ai.git"
76
76
  },
77
77
  "bugs": {
78
- "url": "https://github.com/fairyhunter13/ai/issues"
78
+ "url": "https://github.com/vercel/ai/issues"
79
79
  },
80
80
  "keywords": [
81
81
  "ai"
@@ -202,13 +202,18 @@ export interface AnthropicToolResultContent {
202
202
  export interface AnthropicWebSearchToolResultContent {
203
203
  type: 'web_search_tool_result';
204
204
  tool_use_id: string;
205
- content: Array<{
206
- url: string;
207
- title: string | null;
208
- page_age: string | null;
209
- encrypted_content: string;
210
- type: string;
211
- }>;
205
+ content:
206
+ | Array<{
207
+ url: string;
208
+ title: string | null;
209
+ page_age: string | null;
210
+ encrypted_content: string;
211
+ type: string;
212
+ }>
213
+ | {
214
+ type: 'web_search_tool_result_error';
215
+ error_code: string;
216
+ };
212
217
  cache_control: AnthropicCacheControl | undefined;
213
218
  }
214
219
 
@@ -437,6 +442,22 @@ export type AnthropicTool =
437
442
  max_content_tokens?: number;
438
443
  cache_control: AnthropicCacheControl | undefined;
439
444
  }
445
+ | {
446
+ type: 'web_fetch_20260309';
447
+ name: string;
448
+ max_uses?: number;
449
+ allowed_domains?: string[];
450
+ blocked_domains?: string[];
451
+ citations?: { enabled: boolean };
452
+ max_content_tokens?: number;
453
+ allowed_callers?: Array<
454
+ 'direct' | 'code_execution_20250825' | 'code_execution_20260120'
455
+ >;
456
+ use_cache?: boolean;
457
+ defer_loading?: boolean;
458
+ strict?: boolean;
459
+ cache_control: AnthropicCacheControl | undefined;
460
+ }
440
461
  | {
441
462
  type: 'web_search_20250305' | 'web_search_20260209';
442
463
  name: string;
@@ -450,6 +471,9 @@ export type AnthropicTool =
450
471
  country?: string;
451
472
  timezone?: string;
452
473
  };
474
+ allowed_callers?: Array<
475
+ 'direct' | 'code_execution_20250825' | 'code_execution_20260120'
476
+ >;
453
477
  cache_control: AnthropicCacheControl | undefined;
454
478
  }
455
479
  | {