@mastra/mcp-docs-server 1.0.0-beta.7 → 1.0.0-beta.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.docs/organized/changelogs/%40internal%2Fchangeset-cli.md +1 -15
- package/.docs/organized/changelogs/%40internal%2Fexternal-types.md +1 -7
- package/.docs/organized/changelogs/%40internal%2Ftypes-builder.md +1 -55
- package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +12 -12
- package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +48 -48
- package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +17 -17
- package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +17 -17
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +18 -18
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +17 -17
- package/.docs/organized/changelogs/%40mastra%2Fcodemod.md +6 -0
- package/.docs/organized/changelogs/%40mastra%2Fconvex.md +16 -0
- package/.docs/organized/changelogs/%40mastra%2Fcore.md +78 -78
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +18 -18
- package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +23 -23
- package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +17 -17
- package/.docs/organized/changelogs/%40mastra%2Flance.md +17 -17
- package/.docs/organized/changelogs/%40mastra%2Flibsql.md +65 -65
- package/.docs/organized/changelogs/%40mastra%2Floggers.md +29 -29
- package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fmcp.md +12 -12
- package/.docs/organized/changelogs/%40mastra%2Fmemory.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +17 -17
- package/.docs/organized/changelogs/%40mastra%2Fmssql.md +17 -17
- package/.docs/organized/changelogs/%40mastra%2Fpg.md +69 -69
- package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +31 -31
- package/.docs/organized/changelogs/%40mastra%2Freact.md +14 -0
- package/.docs/organized/changelogs/%40mastra%2Fserver.md +56 -56
- package/.docs/organized/changelogs/%40mastra%2Fupstash.md +17 -17
- package/.docs/organized/changelogs/create-mastra.md +13 -13
- package/.docs/organized/changelogs/mastra.md +21 -21
- package/.docs/organized/code-examples/mcp-server-adapters.md +1 -2
- package/.docs/organized/code-examples/processors-with-ai-sdk.md +14 -0
- package/.docs/organized/code-examples/server-app-access.md +1 -1
- package/.docs/organized/code-examples/server-hono-adapter.md +1 -1
- package/.docs/raw/getting-started/studio.mdx +4 -2
- package/.docs/raw/guides/agent-frameworks/ai-sdk.mdx +161 -0
- package/.docs/raw/guides/build-your-ui/ai-sdk-ui.mdx +381 -431
- package/.docs/raw/guides/getting-started/quickstart.mdx +11 -0
- package/.docs/raw/guides/migrations/upgrade-to-v1/tools.mdx +3 -3
- package/.docs/raw/guides/migrations/upgrade-to-v1/workflows.mdx +31 -0
- package/.docs/raw/reference/ai-sdk/chat-route.mdx +127 -0
- package/.docs/raw/reference/ai-sdk/handle-chat-stream.mdx +117 -0
- package/.docs/raw/reference/ai-sdk/handle-network-stream.mdx +64 -0
- package/.docs/raw/reference/ai-sdk/handle-workflow-stream.mdx +116 -0
- package/.docs/raw/reference/ai-sdk/network-route.mdx +99 -0
- package/.docs/raw/reference/ai-sdk/to-ai-sdk-stream.mdx +289 -0
- package/.docs/raw/reference/ai-sdk/workflow-route.mdx +110 -0
- package/.docs/raw/reference/client-js/agents.mdx +251 -67
- package/.docs/raw/reference/client-js/mastra-client.mdx +2 -2
- package/.docs/raw/reference/client-js/memory.mdx +4 -1
- package/.docs/raw/reference/core/getMemory.mdx +73 -0
- package/.docs/raw/reference/core/getStoredAgentById.mdx +183 -0
- package/.docs/raw/reference/core/listMemory.mdx +70 -0
- package/.docs/raw/reference/core/listStoredAgents.mdx +151 -0
- package/.docs/raw/reference/core/mastra-class.mdx +8 -0
- package/.docs/raw/reference/server/express-adapter.mdx +52 -0
- package/.docs/raw/reference/server/hono-adapter.mdx +54 -0
- package/.docs/raw/server-db/custom-api-routes.mdx +5 -5
- package/.docs/raw/server-db/server-adapters.mdx +94 -91
- package/.docs/raw/streaming/tool-streaming.mdx +10 -14
- package/.docs/raw/workflows/workflow-state.mdx +4 -5
- package/CHANGELOG.md +15 -0
- package/package.json +4 -4
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: "AI SDK | Agent Frameworks"
|
|
3
|
+
description: "Use Mastra processors and memory with the Vercel AI SDK"
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
import Tabs from "@theme/Tabs";
|
|
7
|
+
import TabItem from "@theme/TabItem";
|
|
8
|
+
|
|
9
|
+
# AI SDK
|
|
10
|
+
|
|
11
|
+
If you're already using the [Vercel AI SDK](https://sdk.vercel.ai) directly and want to add Mastra capabilities like [processors](/docs/v1/agents/processors) or [memory](/docs/v1/memory/memory-processors) without switching to the full Mastra agent API, `withMastra()` lets you wrap any AI SDK model with these features. This is useful when you want to keep your existing AI SDK code but add input/output processing, conversation persistence, or content filtering.
|
|
12
|
+
|
|
13
|
+
Learn more about the features you can add:
|
|
14
|
+
- [Processors](/docs/v1/agents/processors) - Learn about input and output processors
|
|
15
|
+
- [Memory](/docs/v1/memory/overview) - Overview of Mastra's memory system
|
|
16
|
+
|
|
17
|
+
## Installation
|
|
18
|
+
|
|
19
|
+
<Tabs>
|
|
20
|
+
<TabItem value="npm" label="npm">
|
|
21
|
+
```bash copy
|
|
22
|
+
npm install @mastra/ai-sdk@beta
|
|
23
|
+
```
|
|
24
|
+
</TabItem>
|
|
25
|
+
<TabItem value="pnpm" label="pnpm">
|
|
26
|
+
```bash copy
|
|
27
|
+
pnpm add @mastra/ai-sdk@beta
|
|
28
|
+
```
|
|
29
|
+
</TabItem>
|
|
30
|
+
<TabItem value="yarn" label="yarn">
|
|
31
|
+
```bash copy
|
|
32
|
+
yarn add @mastra/ai-sdk@beta
|
|
33
|
+
```
|
|
34
|
+
</TabItem>
|
|
35
|
+
<TabItem value="bun" label="bun">
|
|
36
|
+
```bash copy
|
|
37
|
+
bun add @mastra/ai-sdk@beta
|
|
38
|
+
```
|
|
39
|
+
</TabItem>
|
|
40
|
+
</Tabs>
|
|
41
|
+
|
|
42
|
+
## With Processors
|
|
43
|
+
|
|
44
|
+
Processors let you transform messages before they're sent to the model (`processInput`) and after responses are received (`processOutputResult`). This example creates a logging processor that logs message counts at each stage, then wraps an OpenAI model with it.
|
|
45
|
+
|
|
46
|
+
```typescript title="src/example.ts" copy
|
|
47
|
+
import { openai } from '@ai-sdk/openai';
|
|
48
|
+
import { generateText } from 'ai';
|
|
49
|
+
import { withMastra } from '@mastra/ai-sdk';
|
|
50
|
+
import type { Processor } from '@mastra/core/processors';
|
|
51
|
+
|
|
52
|
+
const loggingProcessor: Processor<'logger'> = {
|
|
53
|
+
id: 'logger',
|
|
54
|
+
async processInput({ messages }) {
|
|
55
|
+
console.log('Input:', messages.length, 'messages');
|
|
56
|
+
return messages;
|
|
57
|
+
},
|
|
58
|
+
async processOutputResult({ messages }) {
|
|
59
|
+
console.log('Output:', messages.length, 'messages');
|
|
60
|
+
return messages;
|
|
61
|
+
},
|
|
62
|
+
};
|
|
63
|
+
|
|
64
|
+
const model = withMastra(openai('gpt-4o'), {
|
|
65
|
+
inputProcessors: [loggingProcessor],
|
|
66
|
+
outputProcessors: [loggingProcessor],
|
|
67
|
+
});
|
|
68
|
+
|
|
69
|
+
const { text } = await generateText({
|
|
70
|
+
model,
|
|
71
|
+
prompt: 'What is 2 + 2?',
|
|
72
|
+
});
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
## With Memory
|
|
76
|
+
|
|
77
|
+
Memory automatically loads previous messages from storage before the LLM call and saves new messages after. This example configures a LibSQL storage backend to persist conversation history, loading the last 10 messages for context.
|
|
78
|
+
|
|
79
|
+
```typescript title="src/memory-example.ts" copy
|
|
80
|
+
import { openai } from '@ai-sdk/openai';
|
|
81
|
+
import { generateText } from 'ai';
|
|
82
|
+
import { withMastra } from '@mastra/ai-sdk';
|
|
83
|
+
import { LibSQLStore } from '@mastra/libsql';
|
|
84
|
+
|
|
85
|
+
const storage = new LibSQLStore({
|
|
86
|
+
id: 'my-app',
|
|
87
|
+
url: 'file:./data.db',
|
|
88
|
+
});
|
|
89
|
+
await storage.init();
|
|
90
|
+
|
|
91
|
+
const model = withMastra(openai('gpt-4o'), {
|
|
92
|
+
memory: {
|
|
93
|
+
storage,
|
|
94
|
+
threadId: 'user-thread-123',
|
|
95
|
+
resourceId: 'user-123',
|
|
96
|
+
lastMessages: 10,
|
|
97
|
+
},
|
|
98
|
+
});
|
|
99
|
+
|
|
100
|
+
const { text } = await generateText({
|
|
101
|
+
model,
|
|
102
|
+
prompt: 'What did we talk about earlier?',
|
|
103
|
+
});
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
## With Both
|
|
107
|
+
|
|
108
|
+
You can combine processors and memory together. Input processors run after memory loads historical messages, and output processors run before memory saves the response.
|
|
109
|
+
|
|
110
|
+
```typescript title="src/combined-example.ts" copy
|
|
111
|
+
import { openai } from '@ai-sdk/openai';
|
|
112
|
+
import { generateText } from 'ai';
|
|
113
|
+
import { withMastra } from '@mastra/ai-sdk';
|
|
114
|
+
import { LibSQLStore } from '@mastra/libsql';
|
|
115
|
+
|
|
116
|
+
const storage = new LibSQLStore({ id: 'my-app', url: 'file:./data.db' });
|
|
117
|
+
await storage.init();
|
|
118
|
+
|
|
119
|
+
const model = withMastra(openai('gpt-4o'), {
|
|
120
|
+
inputProcessors: [myGuardProcessor],
|
|
121
|
+
outputProcessors: [myLoggingProcessor],
|
|
122
|
+
memory: {
|
|
123
|
+
storage,
|
|
124
|
+
threadId: 'thread-123',
|
|
125
|
+
resourceId: 'user-123',
|
|
126
|
+
lastMessages: 10,
|
|
127
|
+
},
|
|
128
|
+
});
|
|
129
|
+
|
|
130
|
+
const { text } = await generateText({
|
|
131
|
+
model,
|
|
132
|
+
prompt: 'Hello!',
|
|
133
|
+
});
|
|
134
|
+
```
|
|
135
|
+
|
|
136
|
+
## API Reference
|
|
137
|
+
|
|
138
|
+
### `withMastra(model, options)`
|
|
139
|
+
|
|
140
|
+
Wraps an AI SDK model with Mastra processors and/or memory.
|
|
141
|
+
|
|
142
|
+
**Parameters:**
|
|
143
|
+
|
|
144
|
+
- `model` - Any AI SDK language model (e.g., `openai('gpt-4o')`, `anthropic('claude-3-opus')`)
|
|
145
|
+
- `options` - Configuration object:
|
|
146
|
+
- `inputProcessors` - Array of processors to run on input messages
|
|
147
|
+
- `outputProcessors` - Array of processors to run on output messages
|
|
148
|
+
- `memory` - Memory configuration object:
|
|
149
|
+
- `storage` - A Mastra storage instance (LibSQLStore, PostgresStore, etc.)
|
|
150
|
+
- `threadId` - Unique identifier for the conversation thread
|
|
151
|
+
- `resourceId` - Unique identifier for the user/resource
|
|
152
|
+
- `lastMessages` - Number of previous messages to load (default: 10)
|
|
153
|
+
- `semanticRecall` - Optional semantic search configuration
|
|
154
|
+
|
|
155
|
+
**Returns:** A wrapped model compatible with `generateText`, `streamText`, `generateObject`, and `streamObject`.
|
|
156
|
+
|
|
157
|
+
## Related
|
|
158
|
+
|
|
159
|
+
- [Processors](/docs/v1/agents/processors) - Learn about input and output processors
|
|
160
|
+
- [Memory](/docs/v1/memory/overview) - Overview of Mastra's memory system
|
|
161
|
+
- [AI SDK UI](/guides/v1/build-your-ui/ai-sdk-ui) - Using AI SDK hooks with Mastra agents
|