ai 6.0.90 → 6.0.92
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +14 -0
- package/dist/index.js +1 -1
- package/dist/index.mjs +1 -1
- package/dist/internal/index.js +1 -1
- package/dist/internal/index.mjs +1 -1
- package/docs/02-foundations/02-providers-and-models.mdx +1 -0
- package/docs/02-getting-started/09-coding-agents.mdx +179 -0
- package/docs/03-agents/06-memory.mdx +33 -0
- package/package.json +2 -2
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,19 @@
|
|
|
1
1
|
# ai
|
|
2
2
|
|
|
3
|
+
## 6.0.92
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- Updated dependencies [765b013]
|
|
8
|
+
- @ai-sdk/gateway@3.0.51
|
|
9
|
+
|
|
10
|
+
## 6.0.91
|
|
11
|
+
|
|
12
|
+
### Patch Changes
|
|
13
|
+
|
|
14
|
+
- Updated dependencies [a433cd3]
|
|
15
|
+
- @ai-sdk/gateway@3.0.50
|
|
16
|
+
|
|
3
17
|
## 6.0.90
|
|
4
18
|
|
|
5
19
|
### Patch Changes
|
package/dist/index.js
CHANGED
|
@@ -1211,7 +1211,7 @@ var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
|
|
1211
1211
|
var import_provider_utils4 = require("@ai-sdk/provider-utils");
|
|
1212
1212
|
|
|
1213
1213
|
// src/version.ts
|
|
1214
|
-
var VERSION = true ? "6.0.
|
|
1214
|
+
var VERSION = true ? "6.0.92" : "0.0.0-test";
|
|
1215
1215
|
|
|
1216
1216
|
// src/util/download/download.ts
|
|
1217
1217
|
var download = async ({
|
package/dist/index.mjs
CHANGED
|
@@ -1104,7 +1104,7 @@ import {
|
|
|
1104
1104
|
} from "@ai-sdk/provider-utils";
|
|
1105
1105
|
|
|
1106
1106
|
// src/version.ts
|
|
1107
|
-
var VERSION = true ? "6.0.
|
|
1107
|
+
var VERSION = true ? "6.0.92" : "0.0.0-test";
|
|
1108
1108
|
|
|
1109
1109
|
// src/util/download/download.ts
|
|
1110
1110
|
var download = async ({
|
package/dist/internal/index.js
CHANGED
|
@@ -153,7 +153,7 @@ var import_provider_utils2 = require("@ai-sdk/provider-utils");
|
|
|
153
153
|
var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
|
154
154
|
|
|
155
155
|
// src/version.ts
|
|
156
|
-
var VERSION = true ? "6.0.
|
|
156
|
+
var VERSION = true ? "6.0.92" : "0.0.0-test";
|
|
157
157
|
|
|
158
158
|
// src/util/download/download.ts
|
|
159
159
|
var download = async ({
|
package/dist/internal/index.mjs
CHANGED
|
@@ -71,6 +71,7 @@ The open-source community has created the following providers:
|
|
|
71
71
|
- [Voyage AI Provider](/providers/community-providers/voyage-ai) (`voyage-ai-provider`)
|
|
72
72
|
- [Mem0 Provider](/providers/community-providers/mem0) (`@mem0/vercel-ai-provider`)
|
|
73
73
|
- [Letta Provider](/providers/community-providers/letta) (`@letta-ai/vercel-ai-sdk-provider`)
|
|
74
|
+
- [Hindsight Provider](/providers/community-providers/hindsight) (`@vectorize-io/hindsight-ai-sdk`)
|
|
74
75
|
- [Supermemory Provider](/providers/community-providers/supermemory) (`@supermemory/tools`)
|
|
75
76
|
- [Spark Provider](/providers/community-providers/spark) (`spark-ai-provider`)
|
|
76
77
|
- [AnthropicVertex Provider](/providers/community-providers/anthropic-vertex-ai) (`anthropic-vertex-ai`)
|
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: Coding Agents
|
|
3
|
+
description: Learn how to set up the AI SDK for use with coding agents, including installing skills, accessing bundled docs, and using DevTools.
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Getting Started with Coding Agents
|
|
7
|
+
|
|
8
|
+
This page explains how to get the most out of the AI SDK when working inside a coding agent (such as Claude Code, Codex, OpenCode, Cursor, or any other AI-assisted development environment).
|
|
9
|
+
|
|
10
|
+
## Install the AI SDK Skill
|
|
11
|
+
|
|
12
|
+
The fastest way to give your coding agent deep knowledge of the AI SDK is to install the official AI SDK skill. Skills are lightweight markdown files that load specialized instructions into your agent's context on demand — so your agent knows exactly how to use the SDK without you needing to explain it.
|
|
13
|
+
|
|
14
|
+
Install the AI SDK skill using `npx skills add`:
|
|
15
|
+
|
|
16
|
+
```bash
|
|
17
|
+
npx skills add vercel/ai
|
|
18
|
+
```
|
|
19
|
+
|
|
20
|
+
This installs the skill into your agent's specific skills directory (e.g., `.claude/skills`, `.codex/skills`). If you select more than one agent, the CLI creates symlinks so each agent can discover the skill. Use `-a` to specify agents directly — for example, `-a amp` installs into the universal `.agents/skills` directory. Use `-y` for non-interactive installation.
|
|
21
|
+
|
|
22
|
+
Once installed, any agent that supports the [Agent Skills](https://agentskills.io) format will automatically discover and load the skill when working on AI SDK tasks.
|
|
23
|
+
|
|
24
|
+
<Note>
|
|
25
|
+
Agent Skills use **progressive disclosure**: your agent loads only the skill's
|
|
26
|
+
name and description at startup. The full instructions are only pulled into
|
|
27
|
+
context when the task calls for it, keeping your agent fast and focused.
|
|
28
|
+
</Note>
|
|
29
|
+
|
|
30
|
+
## Docs and Source Code in `node_modules`
|
|
31
|
+
|
|
32
|
+
Once you've installed the `ai` package, you already have the full AI SDK documentation and source code available locally inside `node_modules`. Your coding agent can read these directly — no internet access required.
|
|
33
|
+
|
|
34
|
+
Install the `ai` package if you haven't already:
|
|
35
|
+
|
|
36
|
+
<div className="my-4">
|
|
37
|
+
<Tabs items={['pnpm', 'npm', 'yarn', 'bun']}>
|
|
38
|
+
<Tab>
|
|
39
|
+
<Snippet text="pnpm add ai" dark />
|
|
40
|
+
</Tab>
|
|
41
|
+
<Tab>
|
|
42
|
+
<Snippet text="npm install ai" dark />
|
|
43
|
+
</Tab>
|
|
44
|
+
<Tab>
|
|
45
|
+
<Snippet text="yarn add ai" dark />
|
|
46
|
+
</Tab>
|
|
47
|
+
<Tab>
|
|
48
|
+
<Snippet text="bun add ai" dark />
|
|
49
|
+
</Tab>
|
|
50
|
+
</Tabs>
|
|
51
|
+
</div>
|
|
52
|
+
|
|
53
|
+
After installation, your agent can reference the bundled source code and documentation at paths like:
|
|
54
|
+
|
|
55
|
+
```
|
|
56
|
+
node_modules/ai/src/ # Full source code organized by module
|
|
57
|
+
node_modules/ai/docs/ # Official documentation with examples
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
This means your agent can look up accurate API signatures, implementations, and usage examples directly from the installed package — ensuring it always uses the version of the SDK that's actually installed in your project.
|
|
61
|
+
|
|
62
|
+
## Install DevTools
|
|
63
|
+
|
|
64
|
+
AI SDK DevTools gives you full visibility into your AI SDK calls during development. It captures LLM requests, responses, tool calls, token usage, and multi-step interactions, and displays them in a local web UI.
|
|
65
|
+
|
|
66
|
+
<Note type="warning">
|
|
67
|
+
AI SDK DevTools is experimental and intended for local development only. Do
|
|
68
|
+
not use in production environments.
|
|
69
|
+
</Note>
|
|
70
|
+
|
|
71
|
+
Install the DevTools package:
|
|
72
|
+
|
|
73
|
+
<div className="my-4">
|
|
74
|
+
<Tabs items={['pnpm', 'npm', 'yarn', 'bun']}>
|
|
75
|
+
<Tab>
|
|
76
|
+
<Snippet text="pnpm add @ai-sdk/devtools" dark />
|
|
77
|
+
</Tab>
|
|
78
|
+
<Tab>
|
|
79
|
+
<Snippet text="npm install @ai-sdk/devtools" dark />
|
|
80
|
+
</Tab>
|
|
81
|
+
<Tab>
|
|
82
|
+
<Snippet text="yarn add @ai-sdk/devtools" dark />
|
|
83
|
+
</Tab>
|
|
84
|
+
<Tab>
|
|
85
|
+
<Snippet text="bun add @ai-sdk/devtools" dark />
|
|
86
|
+
</Tab>
|
|
87
|
+
</Tabs>
|
|
88
|
+
</div>
|
|
89
|
+
|
|
90
|
+
### Add the middleware
|
|
91
|
+
|
|
92
|
+
Wrap your language model with the DevTools middleware using [`wrapLanguageModel`](/docs/ai-sdk-core/middleware):
|
|
93
|
+
|
|
94
|
+
```ts
|
|
95
|
+
import { wrapLanguageModel, gateway } from 'ai';
|
|
96
|
+
import { devToolsMiddleware } from '@ai-sdk/devtools';
|
|
97
|
+
|
|
98
|
+
const model = wrapLanguageModel({
|
|
99
|
+
model: gateway('anthropic/claude-sonnet-4.5'),
|
|
100
|
+
middleware: devToolsMiddleware(),
|
|
101
|
+
});
|
|
102
|
+
```
|
|
103
|
+
|
|
104
|
+
Use the wrapped model with any AI SDK Core function:
|
|
105
|
+
|
|
106
|
+
```ts
|
|
107
|
+
import { generateText } from 'ai';
|
|
108
|
+
|
|
109
|
+
const result = await generateText({
|
|
110
|
+
model, // wrapped model with DevTools middleware
|
|
111
|
+
prompt: 'What cities are in the United States?',
|
|
112
|
+
});
|
|
113
|
+
```
|
|
114
|
+
|
|
115
|
+
### Launch the viewer
|
|
116
|
+
|
|
117
|
+
Start the DevTools viewer in a separate terminal:
|
|
118
|
+
|
|
119
|
+
```bash
|
|
120
|
+
npx @ai-sdk/devtools
|
|
121
|
+
```
|
|
122
|
+
|
|
123
|
+
Open [http://localhost:4983](http://localhost:4983) to inspect your AI SDK interactions in real time.
|
|
124
|
+
|
|
125
|
+
## Inspecting Tool Calls and Outputs
|
|
126
|
+
|
|
127
|
+
DevTools captures and displays the following for every call:
|
|
128
|
+
|
|
129
|
+
- **Input parameters and prompts** — the complete input sent to your LLM
|
|
130
|
+
- **Output content and tool calls** — generated text and tool invocations
|
|
131
|
+
- **Token usage and timing** — resource consumption and latency per step
|
|
132
|
+
- **Raw provider data** — complete request and response payloads
|
|
133
|
+
|
|
134
|
+
For multi-step agent interactions, DevTools groups everything into **runs** (a complete interaction) and **steps** (each individual LLM call within it), making it easy to trace exactly what your agent did and why.
|
|
135
|
+
|
|
136
|
+
You can also log tool results directly in code during development:
|
|
137
|
+
|
|
138
|
+
```ts
|
|
139
|
+
import { streamText, tool, stepCountIs } from 'ai';
|
|
140
|
+
import { z } from 'zod';
|
|
141
|
+
|
|
142
|
+
const result = streamText({
|
|
143
|
+
model,
|
|
144
|
+
prompt: "What's the weather in New York in celsius?",
|
|
145
|
+
tools: {
|
|
146
|
+
weather: tool({
|
|
147
|
+
description: 'Get the weather in a location (fahrenheit)',
|
|
148
|
+
inputSchema: z.object({
|
|
149
|
+
location: z.string().describe('The location to get the weather for'),
|
|
150
|
+
}),
|
|
151
|
+
execute: async ({ location }) => ({
|
|
152
|
+
location,
|
|
153
|
+
temperature: Math.round(Math.random() * (90 - 32) + 32),
|
|
154
|
+
}),
|
|
155
|
+
}),
|
|
156
|
+
},
|
|
157
|
+
stopWhen: stepCountIs(5),
|
|
158
|
+
onStepFinish: async ({ toolResults }) => {
|
|
159
|
+
if (toolResults.length) {
|
|
160
|
+
console.log(JSON.stringify(toolResults, null, 2));
|
|
161
|
+
}
|
|
162
|
+
},
|
|
163
|
+
});
|
|
164
|
+
```
|
|
165
|
+
|
|
166
|
+
The `onStepFinish` callback fires after each LLM step and prints any tool results to your terminal — useful for quick debugging without opening the DevTools UI.
|
|
167
|
+
|
|
168
|
+
<Note>
|
|
169
|
+
DevTools stores all AI interactions in a local `.devtools/generations.json`
|
|
170
|
+
file. It automatically adds `.devtools` to your `.gitignore` to prevent
|
|
171
|
+
committing sensitive interaction data.
|
|
172
|
+
</Note>
|
|
173
|
+
|
|
174
|
+
## Where to Next?
|
|
175
|
+
|
|
176
|
+
- Learn about [Agent Skills](https://agentskills.io/specification) to understand the full skill format.
|
|
177
|
+
- Read the [DevTools reference](/docs/ai-sdk-core/devtools) for a complete list of captured data and configuration options.
|
|
178
|
+
- Explore [Tools and Tool Calling](/docs/ai-sdk-core/tools-and-tool-calling) to build agents that can take real-world actions.
|
|
179
|
+
- Check out the [Add Skills to Your Agent](/docs/guides/agent-skills) cookbook guide for a step-by-step integration walkthrough.
|
|
@@ -175,6 +175,39 @@ Supermemory works with any AI SDK provider. The tools give the model `addMemory`
|
|
|
175
175
|
|
|
176
176
|
See the [Supermemory provider documentation](/providers/community-providers/supermemory) for full setup and configuration.
|
|
177
177
|
|
|
178
|
+
### Hindsight
|
|
179
|
+
|
|
180
|
+
[Hindsight](/providers/community-providers/hindsight) provides agents with persistent memory through five tools: `retain`, `recall`, `reflect`, `getMentalModel`, and `getDocument`. It can be self-hosted with Docker or used as a cloud service.
|
|
181
|
+
|
|
182
|
+
```bash
|
|
183
|
+
pnpm add @vectorize-io/hindsight-ai-sdk @vectorize-io/hindsight-client
|
|
184
|
+
```
|
|
185
|
+
|
|
186
|
+
```ts
|
|
187
|
+
__PROVIDER_IMPORT__;
|
|
188
|
+
import { HindsightClient } from '@vectorize-io/hindsight-client';
|
|
189
|
+
import { createHindsightTools } from '@vectorize-io/hindsight-ai-sdk';
|
|
190
|
+
import { ToolLoopAgent, stepCountIs } from 'ai';
|
|
191
|
+
import { openai } from '@ai-sdk/openai';
|
|
192
|
+
|
|
193
|
+
const client = new HindsightClient({ baseUrl: process.env.HINDSIGHT_API_URL });
|
|
194
|
+
|
|
195
|
+
const agent = new ToolLoopAgent({
|
|
196
|
+
model: __MODEL__,
|
|
197
|
+
tools: createHindsightTools({ client, bankId: 'user-123' }),
|
|
198
|
+
stopWhen: stepCountIs(10),
|
|
199
|
+
instructions: 'You are a helpful assistant with long-term memory.',
|
|
200
|
+
});
|
|
201
|
+
|
|
202
|
+
const result = await agent.generate({
|
|
203
|
+
prompt: 'Remember that my favorite editor is Neovim',
|
|
204
|
+
});
|
|
205
|
+
```
|
|
206
|
+
|
|
207
|
+
The `bankId` identifies the memory store and is typically a user ID. In multi-user apps, call `createHindsightTools` inside your request handler so each request gets the right bank. Hindsight works with any AI SDK provider.
|
|
208
|
+
|
|
209
|
+
See the [Hindsight provider documentation](/providers/community-providers/hindsight) for full setup and configuration.
|
|
210
|
+
|
|
178
211
|
**When to use memory providers**: these providers are a good fit when you want memory without building any storage infrastructure. The tradeoff is that the provider controls memory behavior, so you have less visibility into what gets stored and how it is retrieved. You also take on a dependency on an external service.
|
|
179
212
|
|
|
180
213
|
## Custom Tool
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "ai",
|
|
3
|
-
"version": "6.0.
|
|
3
|
+
"version": "6.0.92",
|
|
4
4
|
"description": "AI SDK by Vercel - The AI Toolkit for TypeScript and JavaScript",
|
|
5
5
|
"license": "Apache-2.0",
|
|
6
6
|
"sideEffects": false,
|
|
@@ -45,7 +45,7 @@
|
|
|
45
45
|
},
|
|
46
46
|
"dependencies": {
|
|
47
47
|
"@opentelemetry/api": "1.9.0",
|
|
48
|
-
"@ai-sdk/gateway": "3.0.
|
|
48
|
+
"@ai-sdk/gateway": "3.0.51",
|
|
49
49
|
"@ai-sdk/provider": "3.0.8",
|
|
50
50
|
"@ai-sdk/provider-utils": "4.0.15"
|
|
51
51
|
},
|