ai-sdk-docs 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +15 -0
- package/index.ts +67 -0
- package/migration.md +819 -0
- package/package.json +24 -0
- package/system-prompt.md +1 -0
- package/text.d.ts +5 -0
package/README.md
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
# agent-academy
|
|
2
|
+
|
|
3
|
+
To install dependencies:
|
|
4
|
+
|
|
5
|
+
```bash
|
|
6
|
+
bun install
|
|
7
|
+
```
|
|
8
|
+
|
|
9
|
+
To run:
|
|
10
|
+
|
|
11
|
+
```bash
|
|
12
|
+
bun run index.ts
|
|
13
|
+
```
|
|
14
|
+
|
|
15
|
+
This project was created using `bun init` in bun v1.3.5. [Bun](https://bun.com) is a fast all-in-one JavaScript runtime.
|
package/index.ts
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
#!/usr/bin/env bun
|
|
2
|
+
import { query, type SDKResultSuccess, type SDKResultError } from "@anthropic-ai/claude-agent-sdk"
|
|
3
|
+
|
|
4
|
+
// Embed markdown files at build time using Bun's text imports
|
|
5
|
+
import systemPrompt from "./system-prompt.md" with { type: "text" }
|
|
6
|
+
import migrationDocs from "./migration.md" with { type: "text" }
|
|
7
|
+
|
|
8
|
+
const userPrompt = Bun.argv[2]
|
|
9
|
+
|
|
10
|
+
if (!userPrompt) {
|
|
11
|
+
console.error("Usage: ai-sdk-docs <question>")
|
|
12
|
+
console.error('Example: ai-sdk-docs "How do I use streamText?"')
|
|
13
|
+
process.exit(1)
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
// CLI flag mapping:
|
|
17
|
+
// --setting-sources "" → settingSources: [] (default, no filesystem settings loaded)
|
|
18
|
+
// --settings { disableAllHooks: true } → hooks: {} (empty, no hooks loaded)
|
|
19
|
+
// --tools "" → tools: [] (disables all built-in tools)
|
|
20
|
+
// --system-prompt → systemPrompt: string
|
|
21
|
+
// --append-system-prompt → concatenated to systemPrompt
|
|
22
|
+
// --no-chrome → not needed when tools disabled
|
|
23
|
+
// --disable-slash-commands → slash commands not available when settingSources: []
|
|
24
|
+
// --model haiku → model: 'claude-3-5-haiku-20241022'
|
|
25
|
+
// --print → query() with single prompt
|
|
26
|
+
|
|
27
|
+
try {
|
|
28
|
+
const q = query({
|
|
29
|
+
prompt: userPrompt,
|
|
30
|
+
options: {
|
|
31
|
+
model: "claude-3-5-haiku-20241022",
|
|
32
|
+
systemPrompt: `${systemPrompt}\n\n${migrationDocs}`,
|
|
33
|
+
tools: [],
|
|
34
|
+
settingSources: [],
|
|
35
|
+
hooks: {},
|
|
36
|
+
},
|
|
37
|
+
})
|
|
38
|
+
|
|
39
|
+
for await (const msg of q) {
|
|
40
|
+
if (msg.type === "result") {
|
|
41
|
+
if (msg.subtype === "success") {
|
|
42
|
+
console.log((msg as SDKResultSuccess).result)
|
|
43
|
+
} else {
|
|
44
|
+
const errorMsg = msg as SDKResultError
|
|
45
|
+
console.error("Error:", errorMsg.errors?.join("\n") ?? "Unknown error")
|
|
46
|
+
process.exit(1)
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
} catch (error) {
|
|
51
|
+
const err = error as Error
|
|
52
|
+
|
|
53
|
+
if (err.message?.includes("ENOENT") || err.message?.includes("not found")) {
|
|
54
|
+
console.error("Error: Claude Code CLI not found.")
|
|
55
|
+
console.error("Please install Claude Code: https://claude.ai/code")
|
|
56
|
+
process.exit(1)
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
if (err.message?.includes("authentication") || err.message?.includes("API key") || err.message?.includes("unauthorized")) {
|
|
60
|
+
console.error("Error: Not authenticated with Claude.")
|
|
61
|
+
console.error("Please run 'claude login' to authenticate.")
|
|
62
|
+
process.exit(1)
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
console.error("Error:", err.message ?? "Unknown error occurred")
|
|
66
|
+
process.exit(1)
|
|
67
|
+
}
|
package/migration.md
ADDED
|
@@ -0,0 +1,819 @@
|
|
|
1
|
+
|
|
2
|
+
# Migrate AI SDK 5.x to 6.0
|
|
3
|
+
|
|
4
|
+
## Recommended Migration Process
|
|
5
|
+
|
|
6
|
+
1. Backup your project. If you use a versioning control system, make sure all previous versions are committed.
|
|
7
|
+
1. Upgrade to AI SDK 6.0.
|
|
8
|
+
1. Follow the breaking changes guide below.
|
|
9
|
+
1. Verify your project is working as expected.
|
|
10
|
+
1. Commit your changes.
|
|
11
|
+
|
|
12
|
+
## AI SDK 6.0 Package Versions
|
|
13
|
+
|
|
14
|
+
You need to update the following packages to the latest versions in your `package.json` file(s):
|
|
15
|
+
|
|
16
|
+
- `ai` package: `^6.0.0`
|
|
17
|
+
- `@ai-sdk/provider` package: `^3.0.0`
|
|
18
|
+
- `@ai-sdk/provider-utils` package: `^4.0.0`
|
|
19
|
+
- `@ai-sdk/*` packages: `^3.0.0`
|
|
20
|
+
|
|
21
|
+
An example upgrade command would be:
|
|
22
|
+
|
|
23
|
+
```
|
|
24
|
+
pnpm install ai@latest @ai-sdk/react@latest @ai-sdk/openai@latest
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
## Codemods
|
|
28
|
+
|
|
29
|
+
The AI SDK provides Codemod transformations to help upgrade your codebase when a
|
|
30
|
+
feature is deprecated, removed, or otherwise changed.
|
|
31
|
+
|
|
32
|
+
Codemods are transformations that run on your codebase automatically. They
|
|
33
|
+
allow you to easily apply many changes without having to manually go through
|
|
34
|
+
every file.
|
|
35
|
+
|
|
36
|
+
You can run all v6 codemods (v5 → v6 migration) by running the following command
|
|
37
|
+
from the root of your project:
|
|
38
|
+
|
|
39
|
+
```sh
|
|
40
|
+
npx @ai-sdk/codemod v6
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
<Note>
|
|
44
|
+
There is also an `npx @ai-sdk/codemod upgrade` command, but it runs all
|
|
45
|
+
codemods from all versions (v4, v5, and v6). Use `v6` when upgrading from v5.
|
|
46
|
+
</Note>
|
|
47
|
+
|
|
48
|
+
Individual codemods can be run by specifying the name of the codemod:
|
|
49
|
+
|
|
50
|
+
```sh
|
|
51
|
+
npx @ai-sdk/codemod <codemod-name> <path>
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
For example, to run a specific v6 codemod:
|
|
55
|
+
|
|
56
|
+
```sh
|
|
57
|
+
npx @ai-sdk/codemod v6/rename-text-embedding-to-embedding src/
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
<Note>
|
|
61
|
+
Codemods are intended as a tool to help you with the upgrade process. They may
|
|
62
|
+
not cover all of the changes you need to make. You may need to make additional
|
|
63
|
+
changes manually.
|
|
64
|
+
</Note>
|
|
65
|
+
|
|
66
|
+
## Codemod Table
|
|
67
|
+
|
|
68
|
+
| Codemod Name | Description |
|
|
69
|
+
| -------------------------------------------------------- | -------------------------------------------------------------------------------------------------- |
|
|
70
|
+
| `rename-text-embedding-to-embedding` | Renames `textEmbeddingModel` to `embeddingModel` and `textEmbedding` to `embedding` on providers |
|
|
71
|
+
| `rename-mock-v2-to-v3` | Renames V2 mock classes from `ai/test` to V3 (e.g., `MockLanguageModelV2` → `MockLanguageModelV3`) |
|
|
72
|
+
| `rename-tool-call-options-to-tool-execution-options` | Renames the `ToolCallOptions` type to `ToolExecutionOptions` |
|
|
73
|
+
| `rename-core-message-to-model-message` | Renames the `CoreMessage` type to `ModelMessage` |
|
|
74
|
+
| `rename-converttocoremessages-to-converttomodelmessages` | Renames `convertToCoreMessages` function to `convertToModelMessages` |
|
|
75
|
+
| `rename-vertex-provider-metadata-key` | Renames `google` to `vertex` in `providerMetadata` and `providerOptions` for Google Vertex files |
|
|
76
|
+
| `wrap-tomodeloutput-parameter` | Wraps `toModelOutput` parameter in object destructuring (`output` → `{ output }`) |
|
|
77
|
+
| `add-await-converttomodelmessages` | Adds `await` to `convertToModelMessages` calls (now async in AI SDK 6) |
|
|
78
|
+
|
|
79
|
+
## AI SDK Core
|
|
80
|
+
|
|
81
|
+
### `Experimental_Agent` to `ToolLoopAgent` Class
|
|
82
|
+
|
|
83
|
+
The `Experimental_Agent` class has been replaced with the `ToolLoopAgent` class. Two key changes:
|
|
84
|
+
|
|
85
|
+
1. The `system` parameter has been renamed to `instructions`
|
|
86
|
+
2. The default `stopWhen` has changed from `stepCountIs(1)` to `stepCountIs(20)`
|
|
87
|
+
|
|
88
|
+
```tsx filename="AI SDK 5"
|
|
89
|
+
import { Experimental_Agent as Agent, stepCountIs } from 'ai';
|
|
90
|
+
__PROVIDER_IMPORT__;
|
|
91
|
+
|
|
92
|
+
const agent = new Agent({
|
|
93
|
+
model: __MODEL__,
|
|
94
|
+
system: 'You are a helpful assistant.',
|
|
95
|
+
tools: {
|
|
96
|
+
// your tools here
|
|
97
|
+
},
|
|
98
|
+
stopWhen: stepCountIs(20), // Required for multi-step agent loops
|
|
99
|
+
});
|
|
100
|
+
|
|
101
|
+
const result = await agent.generate({
|
|
102
|
+
prompt: 'What is the weather in San Francisco?',
|
|
103
|
+
});
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
```tsx filename="AI SDK 6"
|
|
107
|
+
import { ToolLoopAgent } from 'ai';
|
|
108
|
+
__PROVIDER_IMPORT__;
|
|
109
|
+
|
|
110
|
+
const agent = new ToolLoopAgent({
|
|
111
|
+
model: __MODEL__,
|
|
112
|
+
instructions: 'You are a helpful assistant.',
|
|
113
|
+
tools: {
|
|
114
|
+
// your tools here
|
|
115
|
+
},
|
|
116
|
+
// stopWhen defaults to stepCountIs(20)
|
|
117
|
+
});
|
|
118
|
+
|
|
119
|
+
const result = await agent.generate({
|
|
120
|
+
prompt: 'What is the weather in San Francisco?',
|
|
121
|
+
});
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
Learn more about [building agents](/docs/agents/building-agents).
|
|
125
|
+
|
|
126
|
+
### `CoreMessage` Removal
|
|
127
|
+
|
|
128
|
+
The deprecated `CoreMessage` type and related functions have been removed ([PR #10710](https://github.com/vercel/ai/pull/10710)). Replace `convertToCoreMessages` with `convertToModelMessages`.
|
|
129
|
+
|
|
130
|
+
```tsx filename="AI SDK 5"
|
|
131
|
+
import { convertToCoreMessages, type CoreMessage } from 'ai';
|
|
132
|
+
|
|
133
|
+
const coreMessages = convertToCoreMessages(messages); // CoreMessage[]
|
|
134
|
+
```
|
|
135
|
+
|
|
136
|
+
```tsx filename="AI SDK 6"
|
|
137
|
+
import { convertToModelMessages, type ModelMessage } from 'ai';
|
|
138
|
+
|
|
139
|
+
const modelMessages = await convertToModelMessages(messages); // ModelMessage[]
|
|
140
|
+
```
|
|
141
|
+
|
|
142
|
+
<Note>
|
|
143
|
+
Use the `rename-core-message-to-model-message` and
|
|
144
|
+
`rename-converttocoremessages-to-converttomodelmessages` codemods to
|
|
145
|
+
automatically update your codebase.
|
|
146
|
+
</Note>
|
|
147
|
+
|
|
148
|
+
### `generateObject` and `streamObject` Deprecation
|
|
149
|
+
|
|
150
|
+
`generateObject` and `streamObject` have been deprecated ([PR #10754](https://github.com/vercel/ai/pull/10754)).
|
|
151
|
+
They will be removed in a future version.
|
|
152
|
+
Use `generateText` and `streamText` with an `output` setting instead.
|
|
153
|
+
|
|
154
|
+
```tsx filename="AI SDK 5"
|
|
155
|
+
import { generateObject } from 'ai';
|
|
156
|
+
__PROVIDER_IMPORT__;
|
|
157
|
+
import { z } from 'zod';
|
|
158
|
+
|
|
159
|
+
const { object } = await generateObject({
|
|
160
|
+
model: __MODEL__,
|
|
161
|
+
schema: z.object({
|
|
162
|
+
recipe: z.object({
|
|
163
|
+
name: z.string(),
|
|
164
|
+
ingredients: z.array(z.object({ name: z.string(), amount: z.string() })),
|
|
165
|
+
steps: z.array(z.string()),
|
|
166
|
+
}),
|
|
167
|
+
}),
|
|
168
|
+
prompt: 'Generate a lasagna recipe.',
|
|
169
|
+
});
|
|
170
|
+
```
|
|
171
|
+
|
|
172
|
+
```tsx filename="AI SDK 6"
|
|
173
|
+
import { generateText, Output } from 'ai';
|
|
174
|
+
__PROVIDER_IMPORT__;
|
|
175
|
+
import { z } from 'zod';
|
|
176
|
+
|
|
177
|
+
const { output } = await generateText({
|
|
178
|
+
model: __MODEL__,
|
|
179
|
+
output: Output.object({
|
|
180
|
+
schema: z.object({
|
|
181
|
+
recipe: z.object({
|
|
182
|
+
name: z.string(),
|
|
183
|
+
ingredients: z.array(
|
|
184
|
+
z.object({ name: z.string(), amount: z.string() }),
|
|
185
|
+
),
|
|
186
|
+
steps: z.array(z.string()),
|
|
187
|
+
}),
|
|
188
|
+
}),
|
|
189
|
+
}),
|
|
190
|
+
prompt: 'Generate a lasagna recipe.',
|
|
191
|
+
});
|
|
192
|
+
```
|
|
193
|
+
|
|
194
|
+
For streaming structured data, replace `streamObject` with `streamText`:
|
|
195
|
+
|
|
196
|
+
```tsx filename="AI SDK 5"
|
|
197
|
+
import { streamObject } from 'ai';
|
|
198
|
+
__PROVIDER_IMPORT__;
|
|
199
|
+
import { z } from 'zod';
|
|
200
|
+
|
|
201
|
+
const { partialObjectStream } = streamObject({
|
|
202
|
+
model: __MODEL__,
|
|
203
|
+
schema: z.object({
|
|
204
|
+
recipe: z.object({
|
|
205
|
+
name: z.string(),
|
|
206
|
+
ingredients: z.array(z.object({ name: z.string(), amount: z.string() })),
|
|
207
|
+
steps: z.array(z.string()),
|
|
208
|
+
}),
|
|
209
|
+
}),
|
|
210
|
+
prompt: 'Generate a lasagna recipe.',
|
|
211
|
+
});
|
|
212
|
+
|
|
213
|
+
for await (const partialObject of partialObjectStream) {
|
|
214
|
+
console.log(partialObject);
|
|
215
|
+
}
|
|
216
|
+
```
|
|
217
|
+
|
|
218
|
+
```tsx filename="AI SDK 6"
|
|
219
|
+
import { streamText, Output } from 'ai';
|
|
220
|
+
__PROVIDER_IMPORT__;
|
|
221
|
+
import { z } from 'zod';
|
|
222
|
+
|
|
223
|
+
const { partialOutputStream } = streamText({
|
|
224
|
+
model: __MODEL__,
|
|
225
|
+
output: Output.object({
|
|
226
|
+
schema: z.object({
|
|
227
|
+
recipe: z.object({
|
|
228
|
+
name: z.string(),
|
|
229
|
+
ingredients: z.array(
|
|
230
|
+
z.object({ name: z.string(), amount: z.string() }),
|
|
231
|
+
),
|
|
232
|
+
steps: z.array(z.string()),
|
|
233
|
+
}),
|
|
234
|
+
}),
|
|
235
|
+
}),
|
|
236
|
+
prompt: 'Generate a lasagna recipe.',
|
|
237
|
+
});
|
|
238
|
+
|
|
239
|
+
for await (const partialObject of partialOutputStream) {
|
|
240
|
+
console.log(partialObject);
|
|
241
|
+
}
|
|
242
|
+
```
|
|
243
|
+
|
|
244
|
+
Learn more about [generating structured data](/docs/ai-sdk-core/generating-structured-data).
|
|
245
|
+
|
|
246
|
+
### async `convertToModelMessages`
|
|
247
|
+
|
|
248
|
+
`convertToModelMessages()` is async in AI SDK 6 to support async `Tool.toModelOutput()`.
|
|
249
|
+
|
|
250
|
+
```tsx filename="AI SDK 5"
|
|
251
|
+
import { convertToModelMessages } from 'ai';
|
|
252
|
+
|
|
253
|
+
const modelMessages = convertToModelMessages(uiMessages);
|
|
254
|
+
```
|
|
255
|
+
|
|
256
|
+
```tsx filename="AI SDK 6"
|
|
257
|
+
import { convertToModelMessages } from 'ai';
|
|
258
|
+
|
|
259
|
+
const modelMessages = await convertToModelMessages(uiMessages);
|
|
260
|
+
```
|
|
261
|
+
|
|
262
|
+
<Note>
|
|
263
|
+
Use the `add-await-converttomodelmessages` codemod to automatically update
|
|
264
|
+
your codebase.
|
|
265
|
+
</Note>
|
|
266
|
+
|
|
267
|
+
### `Tool.toModelOutput` changes
|
|
268
|
+
|
|
269
|
+
`toModelOutput()` receives a parameter object with an `output` property in AI SDK 6.
|
|
270
|
+
|
|
271
|
+
In AI SDK 5, the `output` was the arguments.
|
|
272
|
+
|
|
273
|
+
```tsx filename="AI SDK 5"
|
|
274
|
+
import { tool } from 'ai';
|
|
275
|
+
|
|
276
|
+
const someTool = tool({
|
|
277
|
+
// ...
|
|
278
|
+
toModelOutput: output => {
|
|
279
|
+
// ...
|
|
280
|
+
},
|
|
281
|
+
});
|
|
282
|
+
```
|
|
283
|
+
|
|
284
|
+
```tsx filename="AI SDK 6"
|
|
285
|
+
import { tool } from 'ai';
|
|
286
|
+
|
|
287
|
+
const someTool = tool({
|
|
288
|
+
// ...
|
|
289
|
+
toModelOutput: ({ output }) => {
|
|
290
|
+
// ...
|
|
291
|
+
},
|
|
292
|
+
});
|
|
293
|
+
```
|
|
294
|
+
|
|
295
|
+
<Note>
|
|
296
|
+
Use the `wrap-tomodeloutput-parameter` codemod to automatically update your
|
|
297
|
+
codebase.
|
|
298
|
+
</Note>
|
|
299
|
+
|
|
300
|
+
### `cachedInputTokens` and `reasoningTokens` in `LanguageModelUsage` Deprecation
|
|
301
|
+
|
|
302
|
+
`cachedInputTokens` and `reasoningTokens` in `LanguageModelUsage` have been deprecated.
|
|
303
|
+
|
|
304
|
+
You can replace `cachedInputTokens` with `inputTokenDetails.cacheReadTokens`
|
|
305
|
+
and `reasoningTokens` with `outputTokenDetails.reasoningTokens`.
|
|
306
|
+
|
|
307
|
+
### `ToolCallOptions` to `ToolExecutionOptions` Rename
|
|
308
|
+
|
|
309
|
+
The `ToolCallOptions` type has been renamed to `ToolExecutionOptions`
|
|
310
|
+
and is now deprecated.
|
|
311
|
+
|
|
312
|
+
<Note>
|
|
313
|
+
Use the `rename-tool-call-options-to-tool-execution-options` codemod to
|
|
314
|
+
automatically update your codebase.
|
|
315
|
+
</Note>
|
|
316
|
+
|
|
317
|
+
### Per-Tool Strict Mode
|
|
318
|
+
|
|
319
|
+
Strict mode for tools is now controlled by setting `strict` on each tool ([PR #10817](https://github.com/vercel/ai/pull/10817)). This enables fine-grained control over strict tool calls, which is important since strict mode depends on the specific tool input schema.
|
|
320
|
+
|
|
321
|
+
```tsx filename="AI SDK 5"
|
|
322
|
+
__PROVIDER_IMPORT__;
|
|
323
|
+
import { streamText, tool } from 'ai';
|
|
324
|
+
import { z } from 'zod';
|
|
325
|
+
|
|
326
|
+
// Tool strict mode was controlled by strictJsonSchema
|
|
327
|
+
const result = streamText({
|
|
328
|
+
model: __MODEL__,
|
|
329
|
+
tools: {
|
|
330
|
+
calculator: tool({
|
|
331
|
+
description: 'A simple calculator',
|
|
332
|
+
inputSchema: z.object({
|
|
333
|
+
expression: z.string(),
|
|
334
|
+
}),
|
|
335
|
+
execute: async ({ expression }) => {
|
|
336
|
+
const result = eval(expression);
|
|
337
|
+
return { result };
|
|
338
|
+
},
|
|
339
|
+
}),
|
|
340
|
+
},
|
|
341
|
+
providerOptions: {
|
|
342
|
+
openai: {
|
|
343
|
+
strictJsonSchema: true, // Applied to all tools
|
|
344
|
+
},
|
|
345
|
+
},
|
|
346
|
+
});
|
|
347
|
+
```
|
|
348
|
+
|
|
349
|
+
```tsx filename="AI SDK 6"
|
|
350
|
+
__PROVIDER_IMPORT__;
|
|
351
|
+
import { streamText, tool } from 'ai';
|
|
352
|
+
import { z } from 'zod';
|
|
353
|
+
|
|
354
|
+
const result = streamText({
|
|
355
|
+
model: __MODEL__,
|
|
356
|
+
tools: {
|
|
357
|
+
calculator: tool({
|
|
358
|
+
description: 'A simple calculator',
|
|
359
|
+
inputSchema: z.object({
|
|
360
|
+
expression: z.string(),
|
|
361
|
+
}),
|
|
362
|
+
execute: async ({ expression }) => {
|
|
363
|
+
const result = eval(expression);
|
|
364
|
+
return { result };
|
|
365
|
+
},
|
|
366
|
+
strict: true, // Control strict mode per tool
|
|
367
|
+
}),
|
|
368
|
+
},
|
|
369
|
+
});
|
|
370
|
+
```
|
|
371
|
+
|
|
372
|
+
### Flexible Tool Content
|
|
373
|
+
|
|
374
|
+
AI SDK 6 introduces more flexible tool output and result content support ([PR #9605](https://github.com/vercel/ai/pull/9605)), enabling richer tool interactions and better support for complex tool execution patterns.
|
|
375
|
+
|
|
376
|
+
### `ToolCallRepairFunction` Signature
|
|
377
|
+
|
|
378
|
+
The `system` parameter in the `ToolCallRepairFunction` type now accepts `SystemModelMessage` in addition to `string` ([PR #10635](https://github.com/vercel/ai/pull/10635)). This allows for more flexible system message configuration, including provider-specific options like caching.
|
|
379
|
+
|
|
380
|
+
```tsx filename="AI SDK 5"
|
|
381
|
+
import type { ToolCallRepairFunction } from 'ai';
|
|
382
|
+
|
|
383
|
+
const repairToolCall: ToolCallRepairFunction<MyTools> = async ({
|
|
384
|
+
system, // type: string | undefined
|
|
385
|
+
messages,
|
|
386
|
+
toolCall,
|
|
387
|
+
tools,
|
|
388
|
+
inputSchema,
|
|
389
|
+
error,
|
|
390
|
+
}) => {
|
|
391
|
+
// ...
|
|
392
|
+
};
|
|
393
|
+
```
|
|
394
|
+
|
|
395
|
+
```tsx filename="AI SDK 6"
|
|
396
|
+
import type { ToolCallRepairFunction, SystemModelMessage } from 'ai';
|
|
397
|
+
|
|
398
|
+
const repairToolCall: ToolCallRepairFunction<MyTools> = async ({
|
|
399
|
+
system, // type: string | SystemModelMessage | undefined
|
|
400
|
+
messages,
|
|
401
|
+
toolCall,
|
|
402
|
+
tools,
|
|
403
|
+
inputSchema,
|
|
404
|
+
error,
|
|
405
|
+
}) => {
|
|
406
|
+
// Handle both string and SystemModelMessage
|
|
407
|
+
const systemText = typeof system === 'string' ? system : system?.content;
|
|
408
|
+
// ...
|
|
409
|
+
};
|
|
410
|
+
```
|
|
411
|
+
|
|
412
|
+
### Embedding Model Method Rename
|
|
413
|
+
|
|
414
|
+
The `textEmbeddingModel` and `textEmbedding` methods on providers have been renamed to `embeddingModel` and `embedding` respectively. Additionally, generics have been removed from `EmbeddingModel`, `embed`, and `embedMany` ([PR #10592](https://github.com/vercel/ai/pull/10592)).
|
|
415
|
+
|
|
416
|
+
```tsx filename="AI SDK 5"
|
|
417
|
+
import { openai } from '@ai-sdk/openai';
|
|
418
|
+
import { embed } from 'ai';
|
|
419
|
+
|
|
420
|
+
// Using the full method name
|
|
421
|
+
const model = openai.textEmbeddingModel('text-embedding-3-small');
|
|
422
|
+
|
|
423
|
+
// Using the shorthand
|
|
424
|
+
const model = openai.textEmbedding('text-embedding-3-small');
|
|
425
|
+
|
|
426
|
+
const { embedding } = await embed({
|
|
427
|
+
model: openai.textEmbedding('text-embedding-3-small'),
|
|
428
|
+
value: 'sunny day at the beach',
|
|
429
|
+
});
|
|
430
|
+
```
|
|
431
|
+
|
|
432
|
+
```tsx filename="AI SDK 6"
|
|
433
|
+
import { openai } from '@ai-sdk/openai';
|
|
434
|
+
import { embed } from 'ai';
|
|
435
|
+
|
|
436
|
+
// Using the full method name
|
|
437
|
+
const model = openai.embeddingModel('text-embedding-3-small');
|
|
438
|
+
|
|
439
|
+
// Using the shorthand
|
|
440
|
+
const model = openai.embedding('text-embedding-3-small');
|
|
441
|
+
|
|
442
|
+
const { embedding } = await embed({
|
|
443
|
+
model: openai.embedding('text-embedding-3-small'),
|
|
444
|
+
value: 'sunny day at the beach',
|
|
445
|
+
});
|
|
446
|
+
```
|
|
447
|
+
|
|
448
|
+
<Note>
|
|
449
|
+
Use the `rename-text-embedding-to-embedding` codemod to automatically update
|
|
450
|
+
your codebase.
|
|
451
|
+
</Note>
|
|
452
|
+
|
|
453
|
+
### Warning Logger
|
|
454
|
+
|
|
455
|
+
AI SDK 6 introduces a warning logger that outputs deprecation warnings and best practice recommendations ([PR #8343](https://github.com/vercel/ai/pull/8343)).
|
|
456
|
+
|
|
457
|
+
To disable warning logging, set the `AI_SDK_LOG_WARNINGS` environment variable to `false`:
|
|
458
|
+
|
|
459
|
+
```bash
|
|
460
|
+
export AI_SDK_LOG_WARNINGS=false
|
|
461
|
+
```
|
|
462
|
+
|
|
463
|
+
### Warning Type Unification
|
|
464
|
+
|
|
465
|
+
Separate warning types for each generation function have been consolidated into a single `Warning` type exported from the `ai` package ([PR #10631](https://github.com/vercel/ai/pull/10631)).
|
|
466
|
+
|
|
467
|
+
```tsx filename="AI SDK 5"
|
|
468
|
+
// Separate warning types for each generation function
|
|
469
|
+
import type {
|
|
470
|
+
CallWarning,
|
|
471
|
+
ImageModelCallWarning,
|
|
472
|
+
SpeechWarning,
|
|
473
|
+
TranscriptionWarning,
|
|
474
|
+
} from 'ai';
|
|
475
|
+
```
|
|
476
|
+
|
|
477
|
+
```tsx filename="AI SDK 6"
|
|
478
|
+
// Single Warning type for all generation functions
|
|
479
|
+
import type { Warning } from 'ai';
|
|
480
|
+
```
|
|
481
|
+
|
|
482
|
+
### Finish reason "unknown" merged into "other"
|
|
483
|
+
|
|
484
|
+
The `unknown` finish reason has been removed. It is now returned as `other`.
|
|
485
|
+
|
|
486
|
+
## AI SDK UI
|
|
487
|
+
|
|
488
|
+
### Tool UI Part Helper Functions Rename
|
|
489
|
+
|
|
490
|
+
The tool UI part helper functions have been renamed to better reflect their purpose and to accommodate both static and dynamic tool parts ([PR #XXXX](https://github.com/vercel/ai/pull/XXXX)).
|
|
491
|
+
|
|
492
|
+
#### `isToolUIPart` → `isStaticToolUIPart`
|
|
493
|
+
|
|
494
|
+
The `isToolUIPart` function has been renamed to `isStaticToolUIPart` to clarify that it checks for static tool parts only.
|
|
495
|
+
|
|
496
|
+
```tsx filename="AI SDK 5"
|
|
497
|
+
import { isToolUIPart } from 'ai';
|
|
498
|
+
|
|
499
|
+
// Check if a part is a tool UI part
|
|
500
|
+
if (isToolUIPart(part)) {
|
|
501
|
+
console.log(part.toolName);
|
|
502
|
+
}
|
|
503
|
+
```
|
|
504
|
+
|
|
505
|
+
```tsx filename="AI SDK 6"
|
|
506
|
+
import { isStaticToolUIPart } from 'ai';
|
|
507
|
+
|
|
508
|
+
// Check if a part is a static tool UI part
|
|
509
|
+
if (isStaticToolUIPart(part)) {
|
|
510
|
+
console.log(part.toolName);
|
|
511
|
+
}
|
|
512
|
+
```
|
|
513
|
+
|
|
514
|
+
#### `isToolOrDynamicToolUIPart` → `isToolUIPart`
|
|
515
|
+
|
|
516
|
+
The `isToolOrDynamicToolUIPart` function has been renamed to `isToolUIPart`. The old name is deprecated but still available.
|
|
517
|
+
|
|
518
|
+
```tsx filename="AI SDK 5"
|
|
519
|
+
import { isToolOrDynamicToolUIPart } from 'ai';
|
|
520
|
+
|
|
521
|
+
// Check if a part is either a static or dynamic tool UI part
|
|
522
|
+
if (isToolOrDynamicToolUIPart(part)) {
|
|
523
|
+
console.log('Tool part found');
|
|
524
|
+
}
|
|
525
|
+
```
|
|
526
|
+
|
|
527
|
+
```tsx filename="AI SDK 6"
|
|
528
|
+
import { isToolUIPart } from 'ai';
|
|
529
|
+
|
|
530
|
+
// Check if a part is either a static or dynamic tool UI part
|
|
531
|
+
if (isToolUIPart(part)) {
|
|
532
|
+
console.log('Tool part found');
|
|
533
|
+
}
|
|
534
|
+
```
|
|
535
|
+
|
|
536
|
+
#### `getToolName` → `getStaticToolName`
|
|
537
|
+
|
|
538
|
+
The `getToolName` function has been renamed to `getStaticToolName` to clarify that it returns the tool name from static tool parts only.
|
|
539
|
+
|
|
540
|
+
```tsx filename="AI SDK 5"
|
|
541
|
+
import { getToolName } from 'ai';
|
|
542
|
+
|
|
543
|
+
// Get the tool name from a tool part
|
|
544
|
+
const name = getToolName(toolPart);
|
|
545
|
+
```
|
|
546
|
+
|
|
547
|
+
```tsx filename="AI SDK 6"
|
|
548
|
+
import { getStaticToolName } from 'ai';
|
|
549
|
+
|
|
550
|
+
// Get the tool name from a static tool part
|
|
551
|
+
const name = getStaticToolName(toolPart);
|
|
552
|
+
```
|
|
553
|
+
|
|
554
|
+
#### `getToolOrDynamicToolName` → `getToolName`
|
|
555
|
+
|
|
556
|
+
The `getToolOrDynamicToolName` function has been renamed to `getToolName`. The old name is deprecated but still available.
|
|
557
|
+
|
|
558
|
+
```tsx filename="AI SDK 5"
|
|
559
|
+
import { getToolOrDynamicToolName } from 'ai';
|
|
560
|
+
|
|
561
|
+
// Get the tool name from either a static or dynamic tool part
|
|
562
|
+
const name = getToolOrDynamicToolName(toolPart);
|
|
563
|
+
```
|
|
564
|
+
|
|
565
|
+
```tsx filename="AI SDK 6"
|
|
566
|
+
import { getToolName } from 'ai';
|
|
567
|
+
|
|
568
|
+
// Get the tool name from either a static or dynamic tool part
|
|
569
|
+
const name = getToolName(toolPart);
|
|
570
|
+
```
|
|
571
|
+
|
|
572
|
+
## Providers
|
|
573
|
+
|
|
574
|
+
### OpenAI
|
|
575
|
+
|
|
576
|
+
#### `strictJsonSchema` Defaults to True
|
|
577
|
+
|
|
578
|
+
The `strictJsonSchema` setting for JSON outputs and tool calls is enabled by default ([PR #10752](https://github.com/vercel/ai/pull/10752)). This improves stability and ensures valid JSON output that matches your schema.
|
|
579
|
+
|
|
580
|
+
However, strict mode is stricter about schema requirements. If you receive schema rejection errors, adjust your schema (for example, use `null` instead of `undefined`) or disable strict mode.
|
|
581
|
+
|
|
582
|
+
```tsx filename="AI SDK 5"
|
|
583
|
+
import { openai } from '@ai-sdk/openai';
|
|
584
|
+
import { generateObject } from 'ai';
|
|
585
|
+
import { z } from 'zod';
|
|
586
|
+
|
|
587
|
+
// strictJsonSchema was false by default
|
|
588
|
+
const result = await generateObject({
|
|
589
|
+
model: openai('gpt-5.1'),
|
|
590
|
+
schema: z.object({
|
|
591
|
+
name: z.string(),
|
|
592
|
+
}),
|
|
593
|
+
prompt: 'Generate a person',
|
|
594
|
+
});
|
|
595
|
+
```
|
|
596
|
+
|
|
597
|
+
```tsx filename="AI SDK 6"
|
|
598
|
+
import { openai } from '@ai-sdk/openai';
|
|
599
|
+
import { generateObject } from 'ai';
|
|
600
|
+
import { z } from 'zod';
|
|
601
|
+
|
|
602
|
+
// strictJsonSchema is true by default
|
|
603
|
+
const result = await generateObject({
|
|
604
|
+
model: openai('gpt-5.1'),
|
|
605
|
+
schema: z.object({
|
|
606
|
+
name: z.string(),
|
|
607
|
+
}),
|
|
608
|
+
prompt: 'Generate a person',
|
|
609
|
+
});
|
|
610
|
+
|
|
611
|
+
// Disable strict mode if needed
|
|
612
|
+
const resultNoStrict = await generateObject({
|
|
613
|
+
model: openai('gpt-5.1'),
|
|
614
|
+
schema: z.object({
|
|
615
|
+
name: z.string(),
|
|
616
|
+
}),
|
|
617
|
+
prompt: 'Generate a person',
|
|
618
|
+
providerOptions: {
|
|
619
|
+
openai: {
|
|
620
|
+
strictJsonSchema: false,
|
|
621
|
+
} satisfies OpenAIResponsesProviderOptions,
|
|
622
|
+
},
|
|
623
|
+
});
|
|
624
|
+
```
|
|
625
|
+
|
|
626
|
+
#### `structuredOutputs` Option Removed from Chat Model
|
|
627
|
+
|
|
628
|
+
The `structuredOutputs` provider option has been removed from chat models ([PR #10752](https://github.com/vercel/ai/pull/10752)). Use `strictJsonSchema` instead.
|
|
629
|
+
|
|
630
|
+
### Azure
|
|
631
|
+
|
|
632
|
+
#### Default Provider Uses Responses API
|
|
633
|
+
|
|
634
|
+
The `@ai-sdk/azure` provider now uses the Responses API by default when calling `azure()` ([PR #9868](https://github.com/vercel/ai/pull/9868)). To use the previous Chat Completions API behavior, use `azure.chat()` instead.
|
|
635
|
+
|
|
636
|
+
```tsx filename="AI SDK 5"
|
|
637
|
+
import { azure } from '@ai-sdk/azure';
|
|
638
|
+
|
|
639
|
+
// Used Chat Completions API
|
|
640
|
+
const model = azure('gpt-4o');
|
|
641
|
+
```
|
|
642
|
+
|
|
643
|
+
```tsx filename="AI SDK 6"
|
|
644
|
+
import { azure } from '@ai-sdk/azure';
|
|
645
|
+
|
|
646
|
+
// Now uses Responses API by default
|
|
647
|
+
const model = azure('gpt-4o');
|
|
648
|
+
|
|
649
|
+
// Use azure.chat() for Chat Completions API
|
|
650
|
+
const chatModel = azure.chat('gpt-4o');
|
|
651
|
+
|
|
652
|
+
// Use azure.responses() explicitly for Responses API
|
|
653
|
+
const responsesModel = azure.responses('gpt-4o');
|
|
654
|
+
```
|
|
655
|
+
|
|
656
|
+
<Note>
|
|
657
|
+
The Responses and Chat Completions APIs have different behavior and defaults.
|
|
658
|
+
If you depend on the Chat Completions API, switch your model instance to
|
|
659
|
+
`azure.chat()` and audit your configuration.
|
|
660
|
+
</Note>
|
|
661
|
+
|
|
662
|
+
#### Responses API `providerMetadata` and `providerOptions` Key
|
|
663
|
+
|
|
664
|
+
For the **Responses API**, the `@ai-sdk/azure` provider now uses `azure` as the key for `providerMetadata` and `providerOptions` instead of `openai`. The `openai` key is still supported for `providerOptions` input, but resulting `providerMetadata` output now uses `azure`.
|
|
665
|
+
|
|
666
|
+
```tsx filename="AI SDK 5"
|
|
667
|
+
import { azure } from '@ai-sdk/azure';
|
|
668
|
+
import { generateText } from 'ai';
|
|
669
|
+
|
|
670
|
+
const result = await generateText({
|
|
671
|
+
model: azure.responses('gpt-5-mini'), // use your own deployment
|
|
672
|
+
prompt: 'Hello',
|
|
673
|
+
providerOptions: {
|
|
674
|
+
openai: {
|
|
675
|
+
// AI SDK 5: use `openai` key for Responses API options
|
|
676
|
+
reasoningSummary: 'auto',
|
|
677
|
+
},
|
|
678
|
+
},
|
|
679
|
+
});
|
|
680
|
+
|
|
681
|
+
// Accessed metadata via 'openai' key
|
|
682
|
+
console.log(result.providerMetadata?.openai?.responseId);
|
|
683
|
+
```
|
|
684
|
+
|
|
685
|
+
```tsx filename="AI SDK 6"
|
|
686
|
+
import { azure } from '@ai-sdk/azure';
|
|
687
|
+
import { generateText } from 'ai';
|
|
688
|
+
|
|
689
|
+
const result = await generateText({
|
|
690
|
+
// azure() now uses the Responses API by default
|
|
691
|
+
model: azure('gpt-5-mini'), // use your own deployment
|
|
692
|
+
prompt: 'Hello',
|
|
693
|
+
providerOptions: {
|
|
694
|
+
azure: {
|
|
695
|
+
// AI SDK 6: use `azure` key for Responses API options
|
|
696
|
+
reasoningSummary: 'auto',
|
|
697
|
+
},
|
|
698
|
+
},
|
|
699
|
+
});
|
|
700
|
+
|
|
701
|
+
// Access metadata via 'azure' key
|
|
702
|
+
console.log(result.providerMetadata?.azure?.responseId);
|
|
703
|
+
```
|
|
704
|
+
|
|
705
|
+
### Anthropic
|
|
706
|
+
|
|
707
|
+
#### Structured Outputs Mode
|
|
708
|
+
|
|
709
|
+
Anthropic has [ introduced native structured outputs for Claude Sonnet 4.5 and later models ](https://www.claude.com/blog/structured-outputs-on-the-claude-developer-platform). The `@ai-sdk/anthropic` provider now includes a `structuredOutputMode` option to control how structured outputs are generated ([PR #10502](https://github.com/vercel/ai/pull/10502)).
|
|
710
|
+
|
|
711
|
+
The available modes are:
|
|
712
|
+
|
|
713
|
+
- `'outputFormat'`: Use Anthropic's native `output_format` parameter
|
|
714
|
+
- `'jsonTool'`: Use a special JSON tool to specify the structured output format
|
|
715
|
+
- `'auto'` (default): Use `'outputFormat'` when supported by the model, otherwise fall back to `'jsonTool'`
|
|
716
|
+
|
|
717
|
+
```tsx filename="AI SDK 6"
|
|
718
|
+
import { anthropic } from '@ai-sdk/anthropic';
|
|
719
|
+
import { generateObject } from 'ai';
|
|
720
|
+
import { z } from 'zod';
|
|
721
|
+
|
|
722
|
+
const result = await generateObject({
|
|
723
|
+
model: anthropic('claude-sonnet-4-5-20250929'),
|
|
724
|
+
schema: z.object({
|
|
725
|
+
name: z.string(),
|
|
726
|
+
age: z.number(),
|
|
727
|
+
}),
|
|
728
|
+
prompt: 'Generate a person',
|
|
729
|
+
providerOptions: {
|
|
730
|
+
anthropic: {
|
|
731
|
+
// Explicitly set the structured output mode (optional)
|
|
732
|
+
structuredOutputMode: 'outputFormat',
|
|
733
|
+
} satisfies AnthropicProviderOptions,
|
|
734
|
+
},
|
|
735
|
+
});
|
|
736
|
+
```
|
|
737
|
+
|
|
738
|
+
### Google Vertex
|
|
739
|
+
|
|
740
|
+
#### `providerMetadata` and `providerOptions` Key
|
|
741
|
+
|
|
742
|
+
The `@ai-sdk/google-vertex` provider now uses `vertex` as the key for `providerMetadata` and `providerOptions` instead of `google`. The `google` key is still supported for `providerOptions` input, but resulting `providerMetadata` output now uses `vertex`.
|
|
743
|
+
|
|
744
|
+
```tsx filename="AI SDK 5"
|
|
745
|
+
import { vertex } from '@ai-sdk/google-vertex';
|
|
746
|
+
import { generateText } from 'ai';
|
|
747
|
+
|
|
748
|
+
const result = await generateText({
|
|
749
|
+
model: vertex('gemini-2.5-flash'),
|
|
750
|
+
providerOptions: {
|
|
751
|
+
google: {
|
|
752
|
+
safetySettings: [
|
|
753
|
+
/* ... */
|
|
754
|
+
],
|
|
755
|
+
}, // Used 'google' key
|
|
756
|
+
},
|
|
757
|
+
prompt: 'Hello',
|
|
758
|
+
});
|
|
759
|
+
|
|
760
|
+
// Accessed metadata via 'google' key
|
|
761
|
+
console.log(result.providerMetadata?.google?.safetyRatings);
|
|
762
|
+
```
|
|
763
|
+
|
|
764
|
+
```tsx filename="AI SDK 6"
|
|
765
|
+
import { vertex } from '@ai-sdk/google-vertex';
|
|
766
|
+
import { generateText } from 'ai';
|
|
767
|
+
|
|
768
|
+
const result = await generateText({
|
|
769
|
+
model: vertex('gemini-2.5-flash'),
|
|
770
|
+
providerOptions: {
|
|
771
|
+
vertex: {
|
|
772
|
+
safetySettings: [
|
|
773
|
+
/* ... */
|
|
774
|
+
],
|
|
775
|
+
}, // Now uses 'vertex' key
|
|
776
|
+
},
|
|
777
|
+
prompt: 'Hello',
|
|
778
|
+
});
|
|
779
|
+
|
|
780
|
+
// Access metadata via 'vertex' key
|
|
781
|
+
console.log(result.providerMetadata?.vertex?.safetyRatings);
|
|
782
|
+
```
|
|
783
|
+
|
|
784
|
+
<Note>
|
|
785
|
+
Use the `rename-vertex-provider-metadata-key` codemod to automatically update
|
|
786
|
+
your codebase.
|
|
787
|
+
</Note>
|
|
788
|
+
|
|
789
|
+
## `ai/test`
|
|
790
|
+
|
|
791
|
+
### Mock Classes
|
|
792
|
+
|
|
793
|
+
V2 mock classes have been removed from the `ai/test` module. Use the new V3 mock classes instead for testing.
|
|
794
|
+
|
|
795
|
+
```tsx filename="AI SDK 5"
|
|
796
|
+
import {
|
|
797
|
+
MockEmbeddingModelV2,
|
|
798
|
+
MockImageModelV2,
|
|
799
|
+
MockLanguageModelV2,
|
|
800
|
+
MockProviderV2,
|
|
801
|
+
MockSpeechModelV2,
|
|
802
|
+
MockTranscriptionModelV2,
|
|
803
|
+
} from 'ai/test';
|
|
804
|
+
```
|
|
805
|
+
|
|
806
|
+
```tsx filename="AI SDK 6"
|
|
807
|
+
import {
|
|
808
|
+
MockEmbeddingModelV3,
|
|
809
|
+
MockImageModelV3,
|
|
810
|
+
MockLanguageModelV3,
|
|
811
|
+
MockProviderV3,
|
|
812
|
+
MockSpeechModelV3,
|
|
813
|
+
MockTranscriptionModelV3,
|
|
814
|
+
} from 'ai/test';
|
|
815
|
+
```
|
|
816
|
+
|
|
817
|
+
<Note>
|
|
818
|
+
Use the `rename-mock-v2-to-v3` codemod to automatically update your codebase.
|
|
819
|
+
</Note>
|
package/package.json
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "ai-sdk-docs",
|
|
3
|
+
"version": "0.0.1",
|
|
4
|
+
"description": "AI SDK migration assistant - ask questions about Vercel AI SDK v5 to v6 migration",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"bin": {
|
|
7
|
+
"ai-sdk-docs": "index.ts"
|
|
8
|
+
},
|
|
9
|
+
"files": [
|
|
10
|
+
"index.ts",
|
|
11
|
+
"text.d.ts",
|
|
12
|
+
"system-prompt.md",
|
|
13
|
+
"migration.md"
|
|
14
|
+
],
|
|
15
|
+
"devDependencies": {
|
|
16
|
+
"@types/bun": "latest"
|
|
17
|
+
},
|
|
18
|
+
"peerDependencies": {
|
|
19
|
+
"typescript": "^5"
|
|
20
|
+
},
|
|
21
|
+
"dependencies": {
|
|
22
|
+
"@anthropic-ai/claude-agent-sdk": "^0.2.2"
|
|
23
|
+
}
|
|
24
|
+
}
|
package/system-prompt.md
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
You are an expert in migrating from version 5 to version 6 of the Vercel AI SDK. Respond to any user request with all of the information necessary, but avoid any prose or flowery language. You are meant to act as a dynamic documentation tool more than a helpful companion.
|