@mastra/mcp-docs-server 0.13.31 → 0.13.32-alpha.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.docs/organized/changelogs/%40internal%2Fexternal-types.md +1 -0
- package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +25 -25
- package/.docs/organized/changelogs/%40mastra%2Fastra.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Fchroma.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +15 -15
- package/.docs/organized/changelogs/%40mastra%2Fcloud.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +23 -23
- package/.docs/organized/changelogs/%40mastra%2Fcore.md +122 -122
- package/.docs/organized/changelogs/%40mastra%2Fcouchbase.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +20 -20
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +19 -19
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +19 -19
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +19 -19
- package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +31 -31
- package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fevals.md +19 -19
- package/.docs/organized/changelogs/%40mastra%2Flance.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Flibsql.md +23 -23
- package/.docs/organized/changelogs/%40mastra%2Floggers.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +16 -16
- package/.docs/organized/changelogs/%40mastra%2Fmcp-registry-registry.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fmcp.md +14 -14
- package/.docs/organized/changelogs/%40mastra%2Fmemory.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fmssql.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Fopensearch.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fpg.md +21 -21
- package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +35 -35
- package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Frag.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Freact.md +20 -0
- package/.docs/organized/changelogs/%40mastra%2Fs3vectors.md +9 -0
- package/.docs/organized/changelogs/%40mastra%2Fserver.md +37 -37
- package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Fupstash.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fvoice-azure.md +13 -13
- package/.docs/organized/changelogs/%40mastra%2Fvoice-cloudflare.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fvoice-deepgram.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fvoice-elevenlabs.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fvoice-gladia.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fvoice-google-gemini-live.md +9 -0
- package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +19 -19
- package/.docs/organized/changelogs/%40mastra%2Fvoice-murf.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fvoice-openai-realtime.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Fvoice-openai.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fvoice-playai.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fvoice-sarvam.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fvoice-speechify.md +10 -10
- package/.docs/organized/changelogs/create-mastra.md +11 -11
- package/.docs/organized/changelogs/mastra.md +26 -26
- package/.docs/organized/code-examples/agent.md +55 -1
- package/.docs/organized/code-examples/agui.md +2 -2
- package/.docs/organized/code-examples/ai-elements.md +2 -2
- package/.docs/organized/code-examples/ai-sdk-useChat.md +2 -2
- package/.docs/organized/code-examples/ai-sdk-v5.md +2 -2
- package/.docs/organized/code-examples/assistant-ui.md +2 -2
- package/.docs/organized/code-examples/bird-checker-with-nextjs-and-eval.md +2 -2
- package/.docs/organized/code-examples/bird-checker-with-nextjs.md +2 -2
- package/.docs/organized/code-examples/client-side-tools.md +2 -2
- package/.docs/organized/code-examples/crypto-chatbot.md +2 -2
- package/.docs/organized/code-examples/heads-up-game.md +2 -2
- package/.docs/organized/code-examples/openapi-spec-writer.md +2 -2
- package/.docs/raw/agents/agent-memory.mdx +48 -31
- package/.docs/raw/agents/guardrails.mdx +8 -1
- package/.docs/raw/agents/networks.mdx +197 -128
- package/.docs/raw/agents/overview.mdx +10 -9
- package/.docs/raw/frameworks/agentic-uis/ai-sdk.mdx +92 -1
- package/.docs/raw/getting-started/installation.mdx +61 -68
- package/.docs/raw/memory/conversation-history.mdx +2 -2
- package/.docs/raw/memory/semantic-recall.mdx +36 -10
- package/.docs/raw/observability/ai-tracing/overview.mdx +220 -0
- package/.docs/raw/rag/chunking-and-embedding.mdx +19 -7
- package/.docs/raw/reference/cli/create-mastra.mdx +1 -1
- package/.docs/raw/reference/cli/mastra.mdx +1 -1
- package/.docs/raw/reference/client-js/agents.mdx +44 -25
- package/.docs/raw/reference/scorers/answer-relevancy.mdx +3 -6
- package/.docs/raw/reference/scorers/answer-similarity.mdx +7 -13
- package/.docs/raw/reference/scorers/bias.mdx +3 -6
- package/.docs/raw/reference/scorers/completeness.mdx +3 -6
- package/.docs/raw/reference/scorers/context-precision.mdx +6 -9
- package/.docs/raw/reference/scorers/context-relevance.mdx +12 -18
- package/.docs/raw/reference/scorers/faithfulness.mdx +3 -6
- package/.docs/raw/reference/scorers/hallucination.mdx +3 -6
- package/.docs/raw/reference/scorers/noise-sensitivity.mdx +13 -23
- package/.docs/raw/reference/scorers/prompt-alignment.mdx +16 -20
- package/.docs/raw/reference/scorers/tool-call-accuracy.mdx +4 -5
- package/.docs/raw/reference/scorers/toxicity.mdx +3 -6
- package/.docs/raw/reference/workflows/step.mdx +1 -1
- package/.docs/raw/reference/workflows/workflow-methods/sendEvent.mdx +23 -2
- package/.docs/raw/reference/workflows/workflow-methods/sleep.mdx +22 -4
- package/.docs/raw/reference/workflows/workflow-methods/sleepUntil.mdx +14 -4
- package/.docs/raw/reference/workflows/workflow-methods/waitForEvent.mdx +18 -1
- package/.docs/raw/server-db/runtime-context.mdx +13 -3
- package/.docs/raw/streaming/tool-streaming.mdx +30 -0
- package/.docs/raw/tools-mcp/overview.mdx +1 -1
- package/.docs/raw/workflows/overview.mdx +1 -1
- package/.docs/raw/workflows/suspend-and-resume.mdx +34 -23
- package/CHANGELOG.md +15 -0
- package/package.json +5 -5
- package/.docs/raw/workflows/pausing-execution.mdx +0 -142
|
@@ -82,6 +82,30 @@ const { error, status, sendMessage, messages, regenerate, stop } =
|
|
|
82
82
|
}),
|
|
83
83
|
});
|
|
84
84
|
```
|
|
85
|
+
|
|
86
|
+
Pass extra agent stream execution options:
|
|
87
|
+
|
|
88
|
+
```typescript
|
|
89
|
+
const { error, status, sendMessage, messages, regenerate, stop } =
|
|
90
|
+
useChat({
|
|
91
|
+
transport: new DefaultChatTransport({
|
|
92
|
+
api: 'http://localhost:4111/chat',
|
|
93
|
+
prepareSendMessagesRequest({ messages }) {
|
|
94
|
+
return {
|
|
95
|
+
body: {
|
|
96
|
+
messages,
|
|
97
|
+
// Pass memory config
|
|
98
|
+
memory: {
|
|
99
|
+
thread: "user-1",
|
|
100
|
+
resource: "user-1"
|
|
101
|
+
}
|
|
102
|
+
},
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
}),
|
|
106
|
+
});
|
|
107
|
+
```
|
|
108
|
+
|
|
85
109
|
### `workflowRoute()`
|
|
86
110
|
|
|
87
111
|
Use the `workflowRoute()` utility to create a route handler that automatically formats the workflow stream into an AI SDK-compatible format.
|
|
@@ -155,7 +179,7 @@ const { error, status, sendMessage, messages, regenerate, stop } =
|
|
|
155
179
|
|
|
156
180
|
### Custom UI
|
|
157
181
|
|
|
158
|
-
The `@mastra/ai-sdk` package transforms and emits Mastra streams (e.g workflow, network streams) into AI SDK-compatible format.
|
|
182
|
+
The `@mastra/ai-sdk` package transforms and emits Mastra streams (e.g workflow, network streams) into AI SDK-compatible [uiMessages DataParts](https://ai-sdk.dev/docs/reference/ai-sdk-core/ui-message#datauipart) format.
|
|
159
183
|
|
|
160
184
|
- **Top-level parts**: These are streamed via direct workflow and network stream transformations (e.g in `workflowRoute()` and `networkRoute()`)
|
|
161
185
|
- `data-workflow`: Aggregates a workflow run with step inputs/outputs and final usage.
|
|
@@ -221,6 +245,38 @@ export const AgentTool = ({ id, text, status }: AgentDataPart) => {
|
|
|
221
245
|
);
|
|
222
246
|
};
|
|
223
247
|
```
|
|
248
|
+
### Custom Tool streaming
|
|
249
|
+
To stream custom data parts from within your tool execution function, use the
|
|
250
|
+
`writer.custom()` method.
|
|
251
|
+
|
|
252
|
+
```typescript {5,8,15} showLineNumbers copy
|
|
253
|
+
import { createTool } from "@mastra/core/tools";
|
|
254
|
+
|
|
255
|
+
export const testTool = createTool({
|
|
256
|
+
// ...
|
|
257
|
+
execute: async ({ context, writer }) => {
|
|
258
|
+
const { value } = context;
|
|
259
|
+
|
|
260
|
+
await writer?.custom({
|
|
261
|
+
type: "data-tool-progress",
|
|
262
|
+
status: "pending"
|
|
263
|
+
});
|
|
264
|
+
|
|
265
|
+
const response = await fetch(...);
|
|
266
|
+
|
|
267
|
+
await writer?.custom({
|
|
268
|
+
type: "data-tool-progress",
|
|
269
|
+
status: "success"
|
|
270
|
+
});
|
|
271
|
+
|
|
272
|
+
return {
|
|
273
|
+
value: ""
|
|
274
|
+
};
|
|
275
|
+
}
|
|
276
|
+
});
|
|
277
|
+
```
|
|
278
|
+
|
|
279
|
+
For more information about tool streaming see [Tool streaming documentation](/docs/streaming/tool-streaming)
|
|
224
280
|
|
|
225
281
|
### Stream Transformations
|
|
226
282
|
|
|
@@ -252,6 +308,41 @@ export async function POST(req: Request) {
|
|
|
252
308
|
}
|
|
253
309
|
```
|
|
254
310
|
|
|
311
|
+
### Client Side Stream Transformations
|
|
312
|
+
|
|
313
|
+
If you have a client-side `response` from `agent.stream(...)` and want AI SDK-formatted parts without custom SSE parsing, wrap `response.processDataStream` into a `ReadableStream<ChunkType>` and pipe it through `toAISdkFormat`:
|
|
314
|
+
|
|
315
|
+
```typescript filename="client-stream-to-ai-sdk.ts" copy
|
|
316
|
+
import { createUIMessageStream } from 'ai';
|
|
317
|
+
import { toAISdkFormat } from '@mastra/ai-sdk';
|
|
318
|
+
import type { ChunkType, MastraModelOutput } from '@mastra/core/stream';
|
|
319
|
+
|
|
320
|
+
// Client SDK agent stream
|
|
321
|
+
const response = await agent.stream({ messages: 'What is the weather in Tokyo' });
|
|
322
|
+
|
|
323
|
+
const chunkStream: ReadableStream<ChunkType> = new ReadableStream<ChunkType>({
|
|
324
|
+
start(controller) {
|
|
325
|
+
response.processDataStream({
|
|
326
|
+
onChunk: async (chunk) => {
|
|
327
|
+
controller.enqueue(chunk as ChunkType);
|
|
328
|
+
},
|
|
329
|
+
}).finally(() => controller.close());
|
|
330
|
+
},
|
|
331
|
+
});
|
|
332
|
+
|
|
333
|
+
const uiMessageStream = createUIMessageStream({
|
|
334
|
+
execute: async ({ writer }) => {
|
|
335
|
+
for await (const part of toAISdkFormat(chunkStream as unknown as MastraModelOutput, { from: 'agent' })) {
|
|
336
|
+
writer.write(part);
|
|
337
|
+
}
|
|
338
|
+
},
|
|
339
|
+
});
|
|
340
|
+
|
|
341
|
+
for await (const part of uiMessageStream) {
|
|
342
|
+
console.log(part);
|
|
343
|
+
}
|
|
344
|
+
```
|
|
345
|
+
|
|
255
346
|
## UI Hooks
|
|
256
347
|
|
|
257
348
|
Mastra supports AI SDK UI hooks for connecting frontend components directly to agents using HTTP streams.
|
|
@@ -15,8 +15,8 @@ For more control over setup, or to add Mastra to an existing project, see the [m
|
|
|
15
15
|
|
|
16
16
|
## Before you start
|
|
17
17
|
|
|
18
|
-
- You'll need an API key from a [model provider](/models) to complete setup. We
|
|
19
|
-
- Node.js 20 or later.
|
|
18
|
+
- You'll need an API key from a [model provider](/models) to complete setup. We suggest starting with [OpenAI](https://platform.openai.com/api-keys), but if you need a provider that doesn't require a credit card, Google's [Gemini](https://aistudio.google.com/app/api-keys) is also an option.
|
|
19
|
+
- [Install](https://nodejs.org/en/download) Node.js 20 or later.
|
|
20
20
|
|
|
21
21
|
## Install with `create mastra`
|
|
22
22
|
|
|
@@ -89,15 +89,15 @@ If you prefer not to use our automatic `create mastra` CLI tool, you can set up
|
|
|
89
89
|
|
|
90
90
|
<Steps>
|
|
91
91
|
|
|
92
|
-
### Create
|
|
92
|
+
### Create project
|
|
93
93
|
|
|
94
94
|
Create a new project and change directory:
|
|
95
95
|
|
|
96
96
|
```bash copy
|
|
97
|
-
mkdir
|
|
97
|
+
mkdir my-first-agent && cd my-first-agent
|
|
98
98
|
```
|
|
99
99
|
|
|
100
|
-
Initialize a TypeScript project
|
|
100
|
+
Initialize a TypeScript project and install the following dependencies:
|
|
101
101
|
|
|
102
102
|
{/*
|
|
103
103
|
LLM CONTEXT: This Tabs component shows manual installation commands for different package managers.
|
|
@@ -107,55 +107,42 @@ This helps users manually set up a Mastra project with their preferred package m
|
|
|
107
107
|
*/}
|
|
108
108
|
|
|
109
109
|
<Tabs items={["npm", "pnpm", "yarn", "bun"]}>
|
|
110
|
-
|
|
111
110
|
<Tab>
|
|
112
111
|
```bash copy
|
|
113
112
|
npm init -y
|
|
114
|
-
|
|
115
|
-
npm install
|
|
116
|
-
|
|
117
|
-
npm install @mastra/core@latest zod@^3 @ai-sdk/openai@^1
|
|
113
|
+
npm install -D typescript @types/node mastra@latest
|
|
114
|
+
npm install @mastra/core@latest zod@^4
|
|
118
115
|
```
|
|
119
|
-
|
|
120
116
|
</Tab>
|
|
121
117
|
<Tab>
|
|
122
118
|
```bash copy
|
|
123
|
-
pnpm init
|
|
124
|
-
|
|
125
|
-
pnpm add
|
|
126
|
-
|
|
127
|
-
pnpm add @mastra/core@latest zod@^3 @ai-sdk/openai@^1
|
|
119
|
+
pnpm init -y
|
|
120
|
+
pnpm add -D typescript @types/node mastra@latest
|
|
121
|
+
pnpm add @mastra/core@latest zod@^4
|
|
128
122
|
```
|
|
129
|
-
|
|
130
123
|
</Tab>
|
|
131
124
|
<Tab>
|
|
132
125
|
```bash copy
|
|
133
126
|
yarn init -y
|
|
134
|
-
|
|
135
|
-
yarn add
|
|
136
|
-
|
|
137
|
-
yarn add @mastra/core@latest zod@^3 @ai-sdk/openai@^1
|
|
127
|
+
yarn add -D typescript @types/node mastra@latest
|
|
128
|
+
yarn add @mastra/core@latest zod@^4
|
|
138
129
|
```
|
|
139
|
-
|
|
140
130
|
</Tab>
|
|
141
131
|
<Tab>
|
|
142
132
|
```bash copy
|
|
143
133
|
bun init -y
|
|
144
|
-
|
|
145
|
-
bun add
|
|
146
|
-
|
|
147
|
-
bun add @mastra/core@latest zod@^3 @ai-sdk/openai@^1
|
|
134
|
+
bun add -d typescript @types/node mastra@latest
|
|
135
|
+
bun add @mastra/core@latest zod@^4
|
|
148
136
|
```
|
|
149
|
-
|
|
150
137
|
</Tab>
|
|
151
138
|
</Tabs>
|
|
152
139
|
|
|
153
|
-
Add
|
|
140
|
+
Add `dev` and `build` scripts to your `package.json` file:
|
|
154
141
|
|
|
155
|
-
```json filename="package.json" copy
|
|
142
|
+
```json filename="package.json" copy /,/ /"dev": "mastra dev",/ /"build": "mastra build"/
|
|
156
143
|
{
|
|
157
144
|
"scripts": {
|
|
158
|
-
|
|
145
|
+
"test": "echo \"Error: no test specified\" && exit 1",
|
|
159
146
|
"dev": "mastra dev",
|
|
160
147
|
"build": "mastra build"
|
|
161
148
|
}
|
|
@@ -172,9 +159,8 @@ touch tsconfig.json
|
|
|
172
159
|
|
|
173
160
|
Add the following configuration:
|
|
174
161
|
|
|
175
|
-
Mastra requires `module` and `moduleResolution` values that support modern Node.js versions. Older settings like `CommonJS` or `node` are incompatible with Mastra’s packages and will cause resolution errors.
|
|
176
162
|
|
|
177
|
-
```json
|
|
163
|
+
```json filename="tsconfig.json" copy
|
|
178
164
|
{
|
|
179
165
|
"compilerOptions": {
|
|
180
166
|
"target": "ES2022",
|
|
@@ -192,12 +178,13 @@ Mastra requires `module` and `moduleResolution` values that support modern Node.
|
|
|
192
178
|
]
|
|
193
179
|
}
|
|
194
180
|
```
|
|
181
|
+
<Callout type="info">
|
|
182
|
+
Mastra requires modern `module` and `moduleResolution` settings. Using `CommonJS` or `node` will cause resolution errors.
|
|
183
|
+
</Callout>
|
|
195
184
|
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
### Set up your API key
|
|
185
|
+
### Set API key
|
|
199
186
|
|
|
200
|
-
Create `.env` file:
|
|
187
|
+
Create an `.env` file:
|
|
201
188
|
|
|
202
189
|
```bash copy
|
|
203
190
|
touch .env
|
|
@@ -206,12 +193,14 @@ touch .env
|
|
|
206
193
|
Add your API key:
|
|
207
194
|
|
|
208
195
|
```bash filename=".env" copy
|
|
209
|
-
|
|
196
|
+
GOOGLE_GENERATIVE_AI_API_KEY=<your-api-key>
|
|
210
197
|
```
|
|
211
198
|
|
|
212
|
-
>
|
|
199
|
+
<Callout type="default">
|
|
200
|
+
This guide uses Google Gemini, but you can use any supported [model provider](/models), including OpenAI, Anthropic, and more.
|
|
201
|
+
</Callout>
|
|
213
202
|
|
|
214
|
-
###
|
|
203
|
+
### Add tool
|
|
215
204
|
|
|
216
205
|
Create a `weather-tool.ts` file:
|
|
217
206
|
|
|
@@ -242,9 +231,11 @@ export const weatherTool = createTool({
|
|
|
242
231
|
});
|
|
243
232
|
```
|
|
244
233
|
|
|
245
|
-
>
|
|
234
|
+
<Callout type="info">
|
|
235
|
+
We've shortened and simplified the `weatherTool` example here. You can see the complete weather tool under [Giving an Agent a Tool](/examples/agents/using-a-tool).
|
|
236
|
+
</Callout>
|
|
246
237
|
|
|
247
|
-
###
|
|
238
|
+
### Add agent
|
|
248
239
|
|
|
249
240
|
Create a `weather-agent.ts` file:
|
|
250
241
|
|
|
@@ -255,7 +246,6 @@ mkdir -p src/mastra/agents && touch src/mastra/agents/weather-agent.ts
|
|
|
255
246
|
Add the following code:
|
|
256
247
|
|
|
257
248
|
```ts filename="src/mastra/agents/weather-agent.ts" showLineNumbers copy
|
|
258
|
-
import { openai } from "@ai-sdk/openai";
|
|
259
249
|
import { Agent } from "@mastra/core/agent";
|
|
260
250
|
import { weatherTool } from "../tools/weather-tool";
|
|
261
251
|
|
|
@@ -266,21 +256,21 @@ export const weatherAgent = new Agent({
|
|
|
266
256
|
|
|
267
257
|
Your primary function is to help users get weather details for specific locations. When responding:
|
|
268
258
|
- Always ask for a location if none is provided
|
|
269
|
-
- If the location name isn
|
|
259
|
+
- If the location name isn't in English, please translate it
|
|
270
260
|
- If giving a location with multiple parts (e.g. "New York, NY"), use the most relevant part (e.g. "New York")
|
|
271
261
|
- Include relevant details like humidity, wind conditions, and precipitation
|
|
272
262
|
- Keep responses concise but informative
|
|
273
263
|
|
|
274
264
|
Use the weatherTool to fetch current weather data.
|
|
275
265
|
`,
|
|
276
|
-
model:
|
|
266
|
+
model: "google/gemini-2.5-pro",
|
|
277
267
|
tools: { weatherTool }
|
|
278
268
|
});
|
|
279
269
|
```
|
|
280
270
|
|
|
281
|
-
### Register
|
|
271
|
+
### Register agent
|
|
282
272
|
|
|
283
|
-
Create the Mastra entry point and register agent:
|
|
273
|
+
Create the Mastra entry point and register your agent:
|
|
284
274
|
|
|
285
275
|
```bash copy
|
|
286
276
|
touch src/mastra/index.ts
|
|
@@ -296,28 +286,31 @@ export const mastra = new Mastra({
|
|
|
296
286
|
agents: { weatherAgent }
|
|
297
287
|
});
|
|
298
288
|
```
|
|
289
|
+
### Test your agent
|
|
290
|
+
You can now launch the [Playground](/docs/server-db/local-dev-playground) and test your agent.
|
|
299
291
|
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
292
|
+
<Tabs items={["npm", "pnpm", "yarn", "bun"]}>
|
|
293
|
+
<Tab>
|
|
294
|
+
```bash copy
|
|
295
|
+
npm run dev
|
|
296
|
+
```
|
|
297
|
+
</Tab>
|
|
298
|
+
<Tab>
|
|
299
|
+
```bash copy
|
|
300
|
+
pnpm run dev
|
|
301
|
+
```
|
|
302
|
+
</Tab>
|
|
303
|
+
<Tab>
|
|
304
|
+
```bash copy
|
|
305
|
+
yarn run dev
|
|
306
|
+
```
|
|
307
|
+
</Tab>
|
|
308
|
+
<Tab>
|
|
309
|
+
```bash copy
|
|
310
|
+
bun run dev
|
|
311
|
+
```
|
|
312
|
+
</Tab>
|
|
313
|
+
</Tabs>
|
|
319
314
|
|
|
320
|
-
### Next steps
|
|
321
315
|
|
|
322
|
-
|
|
323
|
-
- [Deploy to Mastra Cloud](/docs/deployment/overview)
|
|
316
|
+
</Steps>
|
|
@@ -98,40 +98,66 @@ const agent = new Agent({
|
|
|
98
98
|
|
|
99
99
|
### Embedder configuration
|
|
100
100
|
|
|
101
|
-
Semantic recall relies on an [embedding model](/reference/memory/Memory#embedder) to convert messages into embeddings.
|
|
101
|
+
Semantic recall relies on an [embedding model](/reference/memory/Memory#embedder) to convert messages into embeddings. Mastra supports embedding models through the model router using `provider/model` strings, or you can use any [embedding model](https://sdk.vercel.ai/docs/ai-sdk-core/embeddings) compatible with the AI SDK.
|
|
102
102
|
|
|
103
|
-
|
|
103
|
+
#### Using the Model Router (Recommended)
|
|
104
104
|
|
|
105
|
-
|
|
106
|
-
|
|
105
|
+
The simplest way is to use a `provider/model` string with autocomplete support:
|
|
106
|
+
|
|
107
|
+
```ts {7}
|
|
108
|
+
import { Memory } from "@mastra/memory";
|
|
109
|
+
import { Agent } from "@mastra/core/agent";
|
|
110
|
+
|
|
111
|
+
const agent = new Agent({
|
|
112
|
+
memory: new Memory({
|
|
113
|
+
// ... other memory options
|
|
114
|
+
embedder: "openai/text-embedding-3-small", // TypeScript autocomplete supported
|
|
115
|
+
}),
|
|
116
|
+
});
|
|
107
117
|
```
|
|
108
118
|
|
|
109
|
-
|
|
119
|
+
Supported embedding models:
|
|
120
|
+
- **OpenAI**: `text-embedding-3-small`, `text-embedding-3-large`, `text-embedding-ada-002`
|
|
121
|
+
- **Google**: `gemini-embedding-001`, `text-embedding-004`
|
|
122
|
+
|
|
123
|
+
The model router automatically handles API key detection from environment variables (`OPENAI_API_KEY`, `GOOGLE_GENERATIVE_AI_API_KEY`).
|
|
124
|
+
|
|
125
|
+
#### Using AI SDK Packages
|
|
126
|
+
|
|
127
|
+
You can also use AI SDK embedding models directly:
|
|
110
128
|
|
|
111
129
|
```ts {3,8}
|
|
112
130
|
import { Memory } from "@mastra/memory";
|
|
113
131
|
import { Agent } from "@mastra/core/agent";
|
|
114
|
-
import {
|
|
132
|
+
import { openai } from "@ai-sdk/openai";
|
|
115
133
|
|
|
116
134
|
const agent = new Agent({
|
|
117
135
|
memory: new Memory({
|
|
118
136
|
// ... other memory options
|
|
119
|
-
embedder:
|
|
137
|
+
embedder: openai.embedding("text-embedding-3-small"),
|
|
120
138
|
}),
|
|
121
139
|
});
|
|
122
140
|
```
|
|
123
141
|
|
|
124
|
-
|
|
142
|
+
#### Using FastEmbed (Local)
|
|
143
|
+
|
|
144
|
+
To use FastEmbed (a local embedding model), install `@mastra/fastembed`:
|
|
145
|
+
|
|
146
|
+
```bash npm2yarn copy
|
|
147
|
+
npm install @mastra/fastembed
|
|
148
|
+
```
|
|
149
|
+
|
|
150
|
+
Then configure it in your memory:
|
|
125
151
|
|
|
126
152
|
```ts {3,8}
|
|
127
153
|
import { Memory } from "@mastra/memory";
|
|
128
154
|
import { Agent } from "@mastra/core/agent";
|
|
129
|
-
import {
|
|
155
|
+
import { fastembed } from "@mastra/fastembed";
|
|
130
156
|
|
|
131
157
|
const agent = new Agent({
|
|
132
158
|
memory: new Memory({
|
|
133
159
|
// ... other memory options
|
|
134
|
-
embedder:
|
|
160
|
+
embedder: fastembed,
|
|
135
161
|
}),
|
|
136
162
|
});
|
|
137
163
|
```
|
|
@@ -367,6 +367,123 @@ execute: async ({ inputData, tracingContext }) => {
|
|
|
367
367
|
|
|
368
368
|
Metadata set here will be shown in all configured exporters.
|
|
369
369
|
|
|
370
|
+
### Automatic Metadata from RuntimeContext
|
|
371
|
+
|
|
372
|
+
Instead of manually adding metadata to each span, you can configure Mastra to automatically extract values from RuntimeContext and attach them as metadata to all spans in a trace. This is useful for consistently tracking user identifiers, environment information, feature flags, or any request-scoped data across your entire trace.
|
|
373
|
+
|
|
374
|
+
#### Configuration-Level Extraction
|
|
375
|
+
|
|
376
|
+
Define which RuntimeContext keys to extract in your tracing configuration. These keys will be automatically included as metadata for all spans created with this configuration:
|
|
377
|
+
|
|
378
|
+
```ts filename="src/mastra/index.ts" showLineNumbers copy
|
|
379
|
+
export const mastra = new Mastra({
|
|
380
|
+
observability: {
|
|
381
|
+
configs: {
|
|
382
|
+
default: {
|
|
383
|
+
serviceName: 'my-service',
|
|
384
|
+
runtimeContextKeys: ['userId', 'environment', 'tenantId'],
|
|
385
|
+
exporters: [new DefaultExporter()],
|
|
386
|
+
},
|
|
387
|
+
},
|
|
388
|
+
},
|
|
389
|
+
});
|
|
390
|
+
```
|
|
391
|
+
|
|
392
|
+
Now when you execute agents or workflows with a RuntimeContext, these values are automatically extracted:
|
|
393
|
+
|
|
394
|
+
```ts showLineNumbers copy
|
|
395
|
+
const runtimeContext = new RuntimeContext();
|
|
396
|
+
runtimeContext.set('userId', 'user-123');
|
|
397
|
+
runtimeContext.set('environment', 'production');
|
|
398
|
+
runtimeContext.set('tenantId', 'tenant-456');
|
|
399
|
+
|
|
400
|
+
// All spans in this trace automatically get userId, environment, and tenantId metadata
|
|
401
|
+
const result = await agent.generate({
|
|
402
|
+
messages: [{ role: 'user', content: 'Hello' }],
|
|
403
|
+
runtimeContext,
|
|
404
|
+
});
|
|
405
|
+
```
|
|
406
|
+
|
|
407
|
+
#### Per-Request Additions
|
|
408
|
+
|
|
409
|
+
You can add trace-specific keys using `tracingOptions.runtimeContextKeys`. These are merged with the configuration-level keys:
|
|
410
|
+
|
|
411
|
+
```ts showLineNumbers copy
|
|
412
|
+
const runtimeContext = new RuntimeContext();
|
|
413
|
+
runtimeContext.set('userId', 'user-123');
|
|
414
|
+
runtimeContext.set('environment', 'production');
|
|
415
|
+
runtimeContext.set('experimentId', 'exp-789');
|
|
416
|
+
|
|
417
|
+
const result = await agent.generate({
|
|
418
|
+
messages: [{ role: 'user', content: 'Hello' }],
|
|
419
|
+
runtimeContext,
|
|
420
|
+
tracingOptions: {
|
|
421
|
+
runtimeContextKeys: ['experimentId'], // Adds to configured keys
|
|
422
|
+
},
|
|
423
|
+
});
|
|
424
|
+
|
|
425
|
+
// All spans now have: userId, environment, AND experimentId
|
|
426
|
+
```
|
|
427
|
+
|
|
428
|
+
#### Nested Value Extraction
|
|
429
|
+
|
|
430
|
+
Use dot notation to extract nested values from RuntimeContext:
|
|
431
|
+
|
|
432
|
+
```ts showLineNumbers copy
|
|
433
|
+
export const mastra = new Mastra({
|
|
434
|
+
observability: {
|
|
435
|
+
configs: {
|
|
436
|
+
default: {
|
|
437
|
+
runtimeContextKeys: ['user.id', 'session.data.experimentId'],
|
|
438
|
+
exporters: [new DefaultExporter()],
|
|
439
|
+
},
|
|
440
|
+
},
|
|
441
|
+
},
|
|
442
|
+
});
|
|
443
|
+
|
|
444
|
+
const runtimeContext = new RuntimeContext();
|
|
445
|
+
runtimeContext.set('user', { id: 'user-456', name: 'John Doe' });
|
|
446
|
+
runtimeContext.set('session', { data: { experimentId: 'exp-999' } });
|
|
447
|
+
|
|
448
|
+
// Metadata will include: { user: { id: 'user-456' }, session: { data: { experimentId: 'exp-999' } } }
|
|
449
|
+
```
|
|
450
|
+
|
|
451
|
+
#### How It Works
|
|
452
|
+
|
|
453
|
+
1. **TraceState Computation**: At the start of a trace (root span creation), Mastra computes which keys to extract by merging configuration-level and per-request keys
|
|
454
|
+
2. **Automatic Extraction**: Root spans (agent runs, workflow executions) automatically extract metadata from RuntimeContext
|
|
455
|
+
3. **Child Span Extraction**: Child spans can also extract metadata if you pass `runtimeContext` when creating them
|
|
456
|
+
4. **Metadata Precedence**: Explicit metadata passed to span options always takes precedence over extracted metadata
|
|
457
|
+
|
|
458
|
+
#### Child Spans and Metadata Extraction
|
|
459
|
+
|
|
460
|
+
When creating child spans within tools or workflow steps, you can pass the `runtimeContext` parameter to enable metadata extraction:
|
|
461
|
+
|
|
462
|
+
```ts showLineNumbers copy
|
|
463
|
+
execute: async ({ tracingContext, runtimeContext }) => {
|
|
464
|
+
// Create child span WITH runtimeContext - gets metadata extraction
|
|
465
|
+
const dbSpan = tracingContext.currentSpan?.createChildSpan({
|
|
466
|
+
type: 'generic',
|
|
467
|
+
name: 'database-query',
|
|
468
|
+
runtimeContext, // Pass to enable metadata extraction
|
|
469
|
+
});
|
|
470
|
+
|
|
471
|
+
const results = await db.query('SELECT * FROM users');
|
|
472
|
+
dbSpan?.end({ output: results });
|
|
473
|
+
|
|
474
|
+
// Or create child span WITHOUT runtimeContext - no metadata extraction
|
|
475
|
+
const cacheSpan = tracingContext.currentSpan?.createChildSpan({
|
|
476
|
+
type: 'generic',
|
|
477
|
+
name: 'cache-check',
|
|
478
|
+
// No runtimeContext - won't extract metadata
|
|
479
|
+
});
|
|
480
|
+
|
|
481
|
+
return results;
|
|
482
|
+
}
|
|
483
|
+
```
|
|
484
|
+
|
|
485
|
+
This gives you fine-grained control over which child spans include RuntimeContext metadata. Root spans (agent/workflow executions) always extract metadata automatically, while child spans only extract when you explicitly pass `runtimeContext`.
|
|
486
|
+
|
|
370
487
|
## Creating Child Spans
|
|
371
488
|
|
|
372
489
|
Child spans allow you to track fine-grained operations within your workflow steps or tools. They provide visibility into sub-operations like database queries, API calls, file operations, or complex calculations. This hierarchical structure helps you identify performance bottlenecks and understand the exact sequence of operations.
|
|
@@ -517,6 +634,109 @@ Once you have a trace ID, you can:
|
|
|
517
634
|
|
|
518
635
|
The trace ID is only available when tracing is enabled. If tracing is disabled or sampling excludes the request, `traceId` will be `undefined`.
|
|
519
636
|
|
|
637
|
+
## Integrating with External Tracing Systems
|
|
638
|
+
|
|
639
|
+
When running Mastra agents or workflows within applications that have existing distributed tracing (OpenTelemetry, Datadog, etc.), you can connect Mastra traces to your parent trace context. This creates a unified view of your entire request flow, making it easier to understand how Mastra operations fit into the broader system.
|
|
640
|
+
|
|
641
|
+
### Passing External Trace IDs
|
|
642
|
+
|
|
643
|
+
Use the `tracingOptions` parameter to specify the trace context from your parent system:
|
|
644
|
+
|
|
645
|
+
```ts showLineNumbers copy
|
|
646
|
+
// Get trace context from your existing tracing system
|
|
647
|
+
const parentTraceId = getCurrentTraceId(); // Your tracing system
|
|
648
|
+
const parentSpanId = getCurrentSpanId(); // Your tracing system
|
|
649
|
+
|
|
650
|
+
// Execute Mastra operations as part of the parent trace
|
|
651
|
+
const result = await agent.generate('Analyze this data', {
|
|
652
|
+
tracingOptions: {
|
|
653
|
+
traceId: parentTraceId,
|
|
654
|
+
parentSpanId: parentSpanId,
|
|
655
|
+
}
|
|
656
|
+
});
|
|
657
|
+
|
|
658
|
+
// The Mastra trace will now appear as a child in your distributed trace
|
|
659
|
+
```
|
|
660
|
+
|
|
661
|
+
### OpenTelemetry Integration
|
|
662
|
+
|
|
663
|
+
Integration with OpenTelemetry allows Mastra traces to appear seamlessly in your existing observability platform:
|
|
664
|
+
|
|
665
|
+
```ts showLineNumbers copy
|
|
666
|
+
import { trace } from '@opentelemetry/api';
|
|
667
|
+
|
|
668
|
+
// Get the current OpenTelemetry span
|
|
669
|
+
const currentSpan = trace.getActiveSpan();
|
|
670
|
+
const spanContext = currentSpan?.spanContext();
|
|
671
|
+
|
|
672
|
+
if (spanContext) {
|
|
673
|
+
const result = await agent.generate(userMessage, {
|
|
674
|
+
tracingOptions: {
|
|
675
|
+
traceId: spanContext.traceId,
|
|
676
|
+
parentSpanId: spanContext.spanId,
|
|
677
|
+
}
|
|
678
|
+
});
|
|
679
|
+
}
|
|
680
|
+
```
|
|
681
|
+
|
|
682
|
+
### Workflow Integration
|
|
683
|
+
|
|
684
|
+
Workflows support the same pattern for trace propagation:
|
|
685
|
+
|
|
686
|
+
```ts showLineNumbers copy
|
|
687
|
+
const workflow = mastra.getWorkflow('data-pipeline');
|
|
688
|
+
const run = await workflow.createRunAsync();
|
|
689
|
+
|
|
690
|
+
const result = await run.start({
|
|
691
|
+
inputData: { data: '...' },
|
|
692
|
+
tracingOptions: {
|
|
693
|
+
traceId: externalTraceId,
|
|
694
|
+
parentSpanId: externalSpanId,
|
|
695
|
+
}
|
|
696
|
+
});
|
|
697
|
+
```
|
|
698
|
+
|
|
699
|
+
### ID Format Requirements
|
|
700
|
+
|
|
701
|
+
Mastra validates trace and span IDs to ensure compatibility:
|
|
702
|
+
|
|
703
|
+
- **Trace IDs**: 1-32 hexadecimal characters (OpenTelemetry uses 32)
|
|
704
|
+
- **Span IDs**: 1-16 hexadecimal characters (OpenTelemetry uses 16)
|
|
705
|
+
|
|
706
|
+
Invalid IDs are handled gracefully — Mastra logs an error and continues:
|
|
707
|
+
- Invalid trace ID → generates a new trace ID
|
|
708
|
+
- Invalid parent span ID → ignores the parent relationship
|
|
709
|
+
|
|
710
|
+
This ensures tracing never crashes your application, even with malformed input.
|
|
711
|
+
|
|
712
|
+
### Example: Express Middleware
|
|
713
|
+
|
|
714
|
+
Here's a complete example showing trace propagation in an Express application:
|
|
715
|
+
|
|
716
|
+
```ts showLineNumbers copy
|
|
717
|
+
import { trace } from '@opentelemetry/api';
|
|
718
|
+
import express from 'express';
|
|
719
|
+
|
|
720
|
+
const app = express();
|
|
721
|
+
|
|
722
|
+
app.post('/api/analyze', async (req, res) => {
|
|
723
|
+
// Get current OpenTelemetry context
|
|
724
|
+
const currentSpan = trace.getActiveSpan();
|
|
725
|
+
const spanContext = currentSpan?.spanContext();
|
|
726
|
+
|
|
727
|
+
const result = await agent.generate(req.body.message, {
|
|
728
|
+
tracingOptions: spanContext ? {
|
|
729
|
+
traceId: spanContext.traceId,
|
|
730
|
+
parentSpanId: spanContext.spanId,
|
|
731
|
+
} : undefined,
|
|
732
|
+
});
|
|
733
|
+
|
|
734
|
+
res.json(result);
|
|
735
|
+
});
|
|
736
|
+
```
|
|
737
|
+
|
|
738
|
+
This creates a single distributed trace that includes both the HTTP request handling and the Mastra agent execution, viewable in your observability platform of choice.
|
|
739
|
+
|
|
520
740
|
## What Gets Traced
|
|
521
741
|
|
|
522
742
|
Mastra automatically creates spans for:
|