@mastra/mcp-docs-server 0.13.31-alpha.0 → 0.13.32-alpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. package/.docs/organized/changelogs/%40internal%2Fchangeset-cli.md +2 -0
  2. package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +8 -8
  3. package/.docs/organized/changelogs/%40internal%2Ftypes-builder.md +2 -0
  4. package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +23 -19
  5. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +15 -15
  6. package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +14 -14
  7. package/.docs/organized/changelogs/%40mastra%2Fcore.md +57 -57
  8. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +17 -17
  9. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +21 -21
  10. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +21 -21
  11. package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +21 -21
  12. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +27 -27
  13. package/.docs/organized/changelogs/%40mastra%2Fevals.md +10 -10
  14. package/.docs/organized/changelogs/%40mastra%2Flibsql.md +14 -14
  15. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +15 -15
  16. package/.docs/organized/changelogs/%40mastra%2Fpg.md +21 -21
  17. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +25 -25
  18. package/.docs/organized/changelogs/%40mastra%2Freact.md +16 -0
  19. package/.docs/organized/changelogs/%40mastra%2Fserver.md +17 -17
  20. package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +11 -11
  21. package/.docs/organized/changelogs/create-mastra.md +11 -11
  22. package/.docs/organized/changelogs/mastra.md +26 -26
  23. package/.docs/organized/code-examples/agui.md +2 -2
  24. package/.docs/organized/code-examples/ai-elements.md +2 -2
  25. package/.docs/organized/code-examples/ai-sdk-useChat.md +2 -2
  26. package/.docs/organized/code-examples/ai-sdk-v5.md +2 -2
  27. package/.docs/organized/code-examples/assistant-ui.md +2 -2
  28. package/.docs/organized/code-examples/bird-checker-with-nextjs-and-eval.md +2 -2
  29. package/.docs/organized/code-examples/bird-checker-with-nextjs.md +2 -2
  30. package/.docs/organized/code-examples/client-side-tools.md +2 -2
  31. package/.docs/organized/code-examples/crypto-chatbot.md +2 -2
  32. package/.docs/organized/code-examples/heads-up-game.md +2 -2
  33. package/.docs/organized/code-examples/openapi-spec-writer.md +2 -2
  34. package/.docs/raw/agents/agent-memory.mdx +48 -31
  35. package/.docs/raw/agents/guardrails.mdx +8 -1
  36. package/.docs/raw/agents/networks.mdx +197 -128
  37. package/.docs/raw/agents/overview.mdx +14 -46
  38. package/.docs/raw/frameworks/agentic-uis/ai-sdk.mdx +92 -1
  39. package/.docs/raw/getting-started/installation.mdx +61 -68
  40. package/.docs/raw/memory/conversation-history.mdx +2 -2
  41. package/.docs/raw/memory/semantic-recall.mdx +36 -10
  42. package/.docs/raw/rag/chunking-and-embedding.mdx +19 -7
  43. package/.docs/raw/reference/client-js/agents.mdx +44 -25
  44. package/.docs/raw/reference/scorers/answer-relevancy.mdx +3 -6
  45. package/.docs/raw/reference/scorers/answer-similarity.mdx +7 -13
  46. package/.docs/raw/reference/scorers/bias.mdx +3 -6
  47. package/.docs/raw/reference/scorers/completeness.mdx +3 -6
  48. package/.docs/raw/reference/scorers/context-precision.mdx +6 -9
  49. package/.docs/raw/reference/scorers/context-relevance.mdx +12 -18
  50. package/.docs/raw/reference/scorers/faithfulness.mdx +3 -6
  51. package/.docs/raw/reference/scorers/hallucination.mdx +3 -6
  52. package/.docs/raw/reference/scorers/noise-sensitivity.mdx +13 -23
  53. package/.docs/raw/reference/scorers/prompt-alignment.mdx +16 -20
  54. package/.docs/raw/reference/scorers/tool-call-accuracy.mdx +4 -5
  55. package/.docs/raw/reference/scorers/toxicity.mdx +3 -6
  56. package/.docs/raw/reference/workflows/step.mdx +1 -1
  57. package/.docs/raw/reference/workflows/workflow-methods/sendEvent.mdx +23 -2
  58. package/.docs/raw/reference/workflows/workflow-methods/sleep.mdx +22 -4
  59. package/.docs/raw/reference/workflows/workflow-methods/sleepUntil.mdx +14 -4
  60. package/.docs/raw/reference/workflows/workflow-methods/waitForEvent.mdx +18 -1
  61. package/.docs/raw/server-db/runtime-context.mdx +13 -3
  62. package/.docs/raw/streaming/tool-streaming.mdx +30 -0
  63. package/.docs/raw/tools-mcp/overview.mdx +3 -39
  64. package/.docs/raw/workflows/overview.mdx +137 -229
  65. package/.docs/raw/workflows/suspend-and-resume.mdx +34 -23
  66. package/CHANGELOG.md +14 -0
  67. package/package.json +5 -5
  68. package/.docs/raw/workflows/pausing-execution.mdx +0 -142
@@ -13,12 +13,12 @@ Agents use LLMs and tools to solve open-ended tasks. They reason about goals, de
13
13
 
14
14
  > **📹 Watch**: → An introduction to agents, and how they compare to workflows [YouTube (7 minutes)](https://youtu.be/0jg2g3sNvgw)
15
15
 
16
- ## Getting started
16
+ ## Setting up agents
17
17
 
18
18
  <Tabs items={["Mastra model router", "Vercel AI SDK"]}>
19
19
  <Tabs.Tab>
20
20
  <Steps>
21
- ### Install dependencies
21
+ ### Install dependencies [#install-dependencies-mastra-router]
22
22
 
23
23
  Add the Mastra core package to your project:
24
24
 
@@ -26,7 +26,7 @@ Add the Mastra core package to your project:
26
26
  npm install @mastra/core
27
27
  ```
28
28
 
29
- ### Set your API key
29
+ ### Set your API key [#set-api-key-mastra-router]
30
30
 
31
31
  Mastra's model router auto-detects environment variables for your chosen provider. For OpenAI, set `OPENAI_API_KEY`:
32
32
 
@@ -36,7 +36,7 @@ OPENAI_API_KEY=<your-api-key>
36
36
 
37
37
  > Mastra supports more than 600 models. Choose from the full list [here](/models).
38
38
 
39
- ### Create an agent
39
+ ### Creating an agent [#creating-an-agent-mastra-router]
40
40
 
41
41
  Create an agent by instantiating the `Agent` class with system `instructions` and a `model`:
42
42
 
@@ -53,7 +53,8 @@ export const testAgent = new Agent({
53
53
  </Tabs.Tab>
54
54
  <Tabs.Tab>
55
55
  <Steps>
56
- ### Install dependencies
56
+
57
+ ### Install dependencies [#install-dependencies-ai-sdk]
57
58
 
58
59
  Include the Mastra core package alongside the Vercel AI SDK provider you want to use:
59
60
 
@@ -61,7 +62,7 @@ Include the Mastra core package alongside the Vercel AI SDK provider you want to
61
62
  npm install @mastra/core @ai-sdk/openai
62
63
  ```
63
64
 
64
- ### Set your API key
65
+ ### Set your API key [#set-api-key-ai-sdk]
65
66
 
66
67
  Set the corresponding environment variable for your provider. For OpenAI via the AI SDK:
67
68
 
@@ -71,7 +72,7 @@ OPENAI_API_KEY=<your-api-key>
71
72
 
72
73
  > See the [AI SDK Providers](https://ai-sdk.dev/providers/ai-sdk-providers) in the Vercel AI SDK docs for additional configuration options.
73
74
 
74
- ### Create an agent
75
+ ### Creating an agent [#creating-an-agent-ai-sdk]
75
76
 
76
77
  To create an agent in Mastra, use the `Agent` class. Every agent must include `instructions` to define its behavior, and a `model` parameter to specify the LLM provider and model. When using the Vercel AI SDK, provide the client to your agent's `model` field:
77
78
 
@@ -125,8 +126,8 @@ instructions: {
125
126
  content:
126
127
  "You are an expert code reviewer. Analyze code for bugs, performance issues, and best practices.",
127
128
  providerOptions: {
128
- openai: { reasoning_effort: "high" }, // OpenAI's reasoning models
129
- anthropic: { cache_control: { type: "ephemeral" } } // Anthropic's prompt caching
129
+ openai: { reasoningEffort: "high" }, // OpenAI's reasoning models
130
+ anthropic: { cacheControl: { type: "ephemeral" } } // Anthropic's prompt caching
130
131
  }
131
132
  }
132
133
  ```
@@ -137,7 +138,7 @@ instructions: {
137
138
 
138
139
  Register your agent in the Mastra instance to make it available throughout your application. Once registered, it can be called from workflows, tools, or other agents, and has access to shared resources such as memory, logging, and observability features:
139
140
 
140
- ```typescript showLineNumbers filename="src/mastra/index.ts" copy
141
+ ```typescript {6} showLineNumbers filename="src/mastra/index.ts" copy
141
142
  import { Mastra } from "@mastra/core/mastra";
142
143
  import { testAgent } from './agents/test-agent';
143
144
 
@@ -156,7 +157,7 @@ const testAgent = mastra.getAgent("testAgent");
156
157
  ```
157
158
  <Callout type="info">
158
159
  <p>
159
- `mastra.getAgent()` is preferred over a direct import, since it preserves the Mastra instance configuration (tools registered, telemetry, vector stores configuration for agent memory, etc.)
160
+ `mastra.getAgent()` is preferred over a direct import, since it provides access to the Mastra instance configuration (logger, telemetry, storage, registered agents, and vector stores).
160
161
  </p>
161
162
  </Callout>
162
163
 
@@ -412,42 +413,9 @@ export const testAgent = new Agent({
412
413
 
413
414
  > See [Runtime Context](../server-db/runtime-context.mdx) for more information.
414
415
 
415
- ## Testing agents locally
416
- There are two ways to run and test agents.
417
-
418
- <Steps>
419
-
420
- ### Mastra Playground
421
-
422
- With the Mastra Dev Server running you can test an agent from the Mastra Playground by visiting [http://localhost:4111/agents](http://localhost:4111/agents) in your browser.
423
-
424
- > For more information, see the [Local Dev Playground](/docs/server-db/local-dev-playground) documentation.
425
-
426
- ### Command line
427
-
428
- Create an agent response using `.generate()` or `.stream()`.
429
-
430
- ```typescript {7} filename="src/test-agent.ts" showLineNumbers copy
431
- import "dotenv/config";
432
-
433
- import { mastra } from "./mastra";
434
-
435
- const agent = mastra.getAgent("testAgent");
436
-
437
- const response = await agent.generate("Help me organize my day");
438
-
439
- console.log(response.text);
440
- ```
441
-
442
- > See [.generate()](../../reference/agents/generate.mdx) or [.stream()](../../reference/agents/stream.mdx) for more information.
443
-
444
- To test this agent, run the following:
445
-
446
- ```bash copy
447
- npx tsx src/test-agent.ts
448
- ```
416
+ ## Testing with Mastra Playground
449
417
 
450
- </Steps>
418
+ Use the Mastra [Playground](../server-db/local-dev-playground.mdx) to test agents with different messages, inspect tool calls and responses, and debug agent behavior.
451
419
 
452
420
  ## Related
453
421
 
@@ -82,6 +82,30 @@ const { error, status, sendMessage, messages, regenerate, stop } =
82
82
  }),
83
83
  });
84
84
  ```
85
+
86
+ Pass extra agent stream execution options:
87
+
88
+ ```typescript
89
+ const { error, status, sendMessage, messages, regenerate, stop } =
90
+ useChat({
91
+ transport: new DefaultChatTransport({
92
+ api: 'http://localhost:4111/chat',
93
+ prepareSendMessagesRequest({ messages }) {
94
+ return {
95
+ body: {
96
+ messages,
97
+ // Pass memory config
98
+ memory: {
99
+ thread: "user-1",
100
+ resource: "user-1"
101
+ }
102
+ },
103
+ }
104
+ }
105
+ }),
106
+ });
107
+ ```
108
+
85
109
  ### `workflowRoute()`
86
110
 
87
111
  Use the `workflowRoute()` utility to create a route handler that automatically formats the workflow stream into an AI SDK-compatible format.
@@ -155,7 +179,7 @@ const { error, status, sendMessage, messages, regenerate, stop } =
155
179
 
156
180
  ### Custom UI
157
181
 
158
- The `@mastra/ai-sdk` package transforms and emits Mastra streams (e.g workflow, network streams) into AI SDK-compatible format.
182
+ The `@mastra/ai-sdk` package transforms and emits Mastra streams (e.g workflow, network streams) into AI SDK-compatible [uiMessages DataParts](https://ai-sdk.dev/docs/reference/ai-sdk-core/ui-message#datauipart) format.
159
183
 
160
184
  - **Top-level parts**: These are streamed via direct workflow and network stream transformations (e.g in `workflowRoute()` and `networkRoute()`)
161
185
  - `data-workflow`: Aggregates a workflow run with step inputs/outputs and final usage.
@@ -221,6 +245,38 @@ export const AgentTool = ({ id, text, status }: AgentDataPart) => {
221
245
  );
222
246
  };
223
247
  ```
248
+ ### Custom Tool streaming
249
+ To stream custom data parts from within your tool execution function, use the
250
+ `writer.custom()` method.
251
+
252
+ ```typescript {5,8,15} showLineNumbers copy
253
+ import { createTool } from "@mastra/core/tools";
254
+
255
+ export const testTool = createTool({
256
+ // ...
257
+ execute: async ({ context, writer }) => {
258
+ const { value } = context;
259
+
260
+ await writer?.custom({
261
+ type: "data-tool-progress",
262
+ status: "pending"
263
+ });
264
+
265
+ const response = await fetch(...);
266
+
267
+ await writer?.custom({
268
+ type: "data-tool-progress",
269
+ status: "success"
270
+ });
271
+
272
+ return {
273
+ value: ""
274
+ };
275
+ }
276
+ });
277
+ ```
278
+
279
+ For more information about tool streaming see [Tool streaming documentation](/docs/streaming/tool-streaming)
224
280
 
225
281
  ### Stream Transformations
226
282
 
@@ -252,6 +308,41 @@ export async function POST(req: Request) {
252
308
  }
253
309
  ```
254
310
 
311
+ ### Client Side Stream Transformations
312
+
313
+ If you have a client-side `response` from `agent.stream(...)` and want AI SDK-formatted parts without custom SSE parsing, wrap `response.processDataStream` into a `ReadableStream<ChunkType>` and pipe it through `toAISdkFormat`:
314
+
315
+ ```typescript filename="client-stream-to-ai-sdk.ts" copy
316
+ import { createUIMessageStream } from 'ai';
317
+ import { toAISdkFormat } from '@mastra/ai-sdk';
318
+ import type { ChunkType, MastraModelOutput } from '@mastra/core/stream';
319
+
320
+ // Client SDK agent stream
321
+ const response = await agent.stream({ messages: 'What is the weather in Tokyo' });
322
+
323
+ const chunkStream: ReadableStream<ChunkType> = new ReadableStream<ChunkType>({
324
+ start(controller) {
325
+ response.processDataStream({
326
+ onChunk: async (chunk) => {
327
+ controller.enqueue(chunk as ChunkType);
328
+ },
329
+ }).finally(() => controller.close());
330
+ },
331
+ });
332
+
333
+ const uiMessageStream = createUIMessageStream({
334
+ execute: async ({ writer }) => {
335
+ for await (const part of toAISdkFormat(chunkStream as unknown as MastraModelOutput, { from: 'agent' })) {
336
+ writer.write(part);
337
+ }
338
+ },
339
+ });
340
+
341
+ for await (const part of uiMessageStream) {
342
+ console.log(part);
343
+ }
344
+ ```
345
+
255
346
  ## UI Hooks
256
347
 
257
348
  Mastra supports AI SDK UI hooks for connecting frontend components directly to agents using HTTP streams.
@@ -15,8 +15,8 @@ For more control over setup, or to add Mastra to an existing project, see the [m
15
15
 
16
16
  ## Before you start
17
17
 
18
- - You'll need an API key from a [model provider](/models) to complete setup. We recommend starting with [Gemini](https://aistudio.google.com/app/api-keys), as you likely already have a Google account and they don't require a card.
19
- - Node.js 20 or later.
18
+ - You'll need an API key from a [model provider](/models) to complete setup. We suggest starting with [OpenAI](https://platform.openai.com/api-keys), but if you need a provider that doesn't require a credit card, Google's [Gemini](https://aistudio.google.com/app/api-keys) is also an option.
19
+ - [Install](https://nodejs.org/en/download) Node.js 20 or later.
20
20
 
21
21
  ## Install with `create mastra`
22
22
 
@@ -89,15 +89,15 @@ If you prefer not to use our automatic `create mastra` CLI tool, you can set up
89
89
 
90
90
  <Steps>
91
91
 
92
- ### Create a new project
92
+ ### Create project
93
93
 
94
94
  Create a new project and change directory:
95
95
 
96
96
  ```bash copy
97
- mkdir hello-mastra && cd hello-mastra
97
+ mkdir my-first-agent && cd my-first-agent
98
98
  ```
99
99
 
100
- Initialize a TypeScript project including the `@mastra/core` package:
100
+ Initialize a TypeScript project and install the following dependencies:
101
101
 
102
102
  {/*
103
103
  LLM CONTEXT: This Tabs component shows manual installation commands for different package managers.
@@ -107,55 +107,42 @@ This helps users manually set up a Mastra project with their preferred package m
107
107
  */}
108
108
 
109
109
  <Tabs items={["npm", "pnpm", "yarn", "bun"]}>
110
-
111
110
  <Tab>
112
111
  ```bash copy
113
112
  npm init -y
114
-
115
- npm install typescript tsx @types/node mastra@latest --save-dev
116
-
117
- npm install @mastra/core@latest zod@^3 @ai-sdk/openai@^1
113
+ npm install -D typescript @types/node mastra@latest
114
+ npm install @mastra/core@latest zod@^4
118
115
  ```
119
-
120
116
  </Tab>
121
117
  <Tab>
122
118
  ```bash copy
123
- pnpm init
124
-
125
- pnpm add typescript tsx @types/node mastra@latest --save-dev
126
-
127
- pnpm add @mastra/core@latest zod@^3 @ai-sdk/openai@^1
119
+ pnpm init -y
120
+ pnpm add -D typescript @types/node mastra@latest
121
+ pnpm add @mastra/core@latest zod@^4
128
122
  ```
129
-
130
123
  </Tab>
131
124
  <Tab>
132
125
  ```bash copy
133
126
  yarn init -y
134
-
135
- yarn add typescript tsx @types/node mastra@latest --dev
136
-
137
- yarn add @mastra/core@latest zod@^3 @ai-sdk/openai@^1
127
+ yarn add -D typescript @types/node mastra@latest
128
+ yarn add @mastra/core@latest zod@^4
138
129
  ```
139
-
140
130
  </Tab>
141
131
  <Tab>
142
132
  ```bash copy
143
133
  bun init -y
144
-
145
- bun add typescript tsx @types/node mastra@latest --dev
146
-
147
- bun add @mastra/core@latest zod@^3 @ai-sdk/openai@^1
134
+ bun add -d typescript @types/node mastra@latest
135
+ bun add @mastra/core@latest zod@^4
148
136
  ```
149
-
150
137
  </Tab>
151
138
  </Tabs>
152
139
 
153
- Add the `dev` and `build` scripts to `package.json`:
140
+ Add `dev` and `build` scripts to your `package.json` file:
154
141
 
155
- ```json filename="package.json" copy
142
+ ```json filename="package.json" copy /,/ /"dev": "mastra dev",/ /"build": "mastra build"/
156
143
  {
157
144
  "scripts": {
158
- // ...
145
+ "test": "echo \"Error: no test specified\" && exit 1",
159
146
  "dev": "mastra dev",
160
147
  "build": "mastra build"
161
148
  }
@@ -172,9 +159,8 @@ touch tsconfig.json
172
159
 
173
160
  Add the following configuration:
174
161
 
175
- Mastra requires `module` and `moduleResolution` values that support modern Node.js versions. Older settings like `CommonJS` or `node` are incompatible with Mastra’s packages and will cause resolution errors.
176
162
 
177
- ```json {4-5} filename="tsconfig.json" copy
163
+ ```json filename="tsconfig.json" copy
178
164
  {
179
165
  "compilerOptions": {
180
166
  "target": "ES2022",
@@ -192,12 +178,13 @@ Mastra requires `module` and `moduleResolution` values that support modern Node.
192
178
  ]
193
179
  }
194
180
  ```
181
+ <Callout type="info">
182
+ Mastra requires modern `module` and `moduleResolution` settings. Using `CommonJS` or `node` will cause resolution errors.
183
+ </Callout>
195
184
 
196
- > This TypeScript configuration is optimized for Mastra projects, using modern module resolution and strict type checking.
197
-
198
- ### Set up your API key
185
+ ### Set API key
199
186
 
200
- Create `.env` file:
187
+ Create an `.env` file:
201
188
 
202
189
  ```bash copy
203
190
  touch .env
@@ -206,12 +193,14 @@ touch .env
206
193
  Add your API key:
207
194
 
208
195
  ```bash filename=".env" copy
209
- OPENAI_API_KEY=<your-api-key>
196
+ GOOGLE_GENERATIVE_AI_API_KEY=<your-api-key>
210
197
  ```
211
198
 
212
- > This example uses OpenAI. Each LLM provider uses a unique name. See [Model Capabilities](/docs/getting-started/model-capability) for more information.
199
+ <Callout type="default">
200
+ This guide uses Google Gemini, but you can use any supported [model provider](/models), including OpenAI, Anthropic, and more.
201
+ </Callout>
213
202
 
214
- ### Create a Tool
203
+ ### Add tool
215
204
 
216
205
  Create a `weather-tool.ts` file:
217
206
 
@@ -242,9 +231,11 @@ export const weatherTool = createTool({
242
231
  });
243
232
  ```
244
233
 
245
- > See the full weatherTool example in [Giving an Agent a Tool](/examples/agents/using-a-tool).
234
+ <Callout type="info">
235
+ We've shortened and simplified the `weatherTool` example here. You can see the complete weather tool under [Giving an Agent a Tool](/examples/agents/using-a-tool).
236
+ </Callout>
246
237
 
247
- ### Create an Agent
238
+ ### Add agent
248
239
 
249
240
  Create a `weather-agent.ts` file:
250
241
 
@@ -255,7 +246,6 @@ mkdir -p src/mastra/agents && touch src/mastra/agents/weather-agent.ts
255
246
  Add the following code:
256
247
 
257
248
  ```ts filename="src/mastra/agents/weather-agent.ts" showLineNumbers copy
258
- import { openai } from "@ai-sdk/openai";
259
249
  import { Agent } from "@mastra/core/agent";
260
250
  import { weatherTool } from "../tools/weather-tool";
261
251
 
@@ -266,21 +256,21 @@ export const weatherAgent = new Agent({
266
256
 
267
257
  Your primary function is to help users get weather details for specific locations. When responding:
268
258
  - Always ask for a location if none is provided
269
- - If the location name isnt in English, please translate it
259
+ - If the location name isn't in English, please translate it
270
260
  - If giving a location with multiple parts (e.g. "New York, NY"), use the most relevant part (e.g. "New York")
271
261
  - Include relevant details like humidity, wind conditions, and precipitation
272
262
  - Keep responses concise but informative
273
263
 
274
264
  Use the weatherTool to fetch current weather data.
275
265
  `,
276
- model: openai('gpt-4o-mini'),
266
+ model: "google/gemini-2.5-pro",
277
267
  tools: { weatherTool }
278
268
  });
279
269
  ```
280
270
 
281
- ### Register the Agent
271
+ ### Register agent
282
272
 
283
- Create the Mastra entry point and register agent:
273
+ Create the Mastra entry point and register your agent:
284
274
 
285
275
  ```bash copy
286
276
  touch src/mastra/index.ts
@@ -296,28 +286,31 @@ export const mastra = new Mastra({
296
286
  agents: { weatherAgent }
297
287
  });
298
288
  ```
289
+ ### Test your agent
290
+ You can now launch the [Playground](/docs/server-db/local-dev-playground) and test your agent.
299
291
 
300
- You can now launch the [Mastra Development Server](/docs/server-db/local-dev-playground) and test your agent using the Mastra Playground.
301
-
302
- </Steps>
303
-
304
- ## Add to an existing project
305
-
306
- Mastra can be installed and integrated into a wide range of projects. Below are links to integration guides to help you get started:
307
-
308
- - [Next.js](/docs/frameworks/web-frameworks/next-js)
309
- - [Vite + React](/docs/frameworks/web-frameworks/vite-react)
310
- - [Astro](/docs/frameworks/web-frameworks/astro)
311
- - [Express](/docs/frameworks/servers/express)
312
-
313
-
314
- ### `mastra init`
315
-
316
- To install Mastra in an existing project, use the `mastra init` command.
317
-
318
- > See [mastra init](/reference/cli/init) for more information.
292
+ <Tabs items={["npm", "pnpm", "yarn", "bun"]}>
293
+ <Tab>
294
+ ```bash copy
295
+ npm run dev
296
+ ```
297
+ </Tab>
298
+ <Tab>
299
+ ```bash copy
300
+ pnpm run dev
301
+ ```
302
+ </Tab>
303
+ <Tab>
304
+ ```bash copy
305
+ yarn run dev
306
+ ```
307
+ </Tab>
308
+ <Tab>
309
+ ```bash copy
310
+ bun run dev
311
+ ```
312
+ </Tab>
313
+ </Tabs>
319
314
 
320
- ### Next steps
321
315
 
322
- - [Local Development](/docs/server-db/local-dev-playground)
323
- - [Deploy to Mastra Cloud](/docs/deployment/overview)
316
+ </Steps>
@@ -16,8 +16,8 @@ export const testAgent = new Agent({
16
16
  // ...
17
17
  memory: new Memory({
18
18
  options: {
19
- lastMessages: 100
19
+ lastMessages: 20
20
20
  },
21
21
  })
22
22
  });
23
- ```
23
+ ```
@@ -98,40 +98,66 @@ const agent = new Agent({
98
98
 
99
99
  ### Embedder configuration
100
100
 
101
- Semantic recall relies on an [embedding model](/reference/memory/Memory#embedder) to convert messages into embeddings. You can specify any [embedding model](https://sdk.vercel.ai/docs/ai-sdk-core/embeddings) compatible with the AI SDK.
101
+ Semantic recall relies on an [embedding model](/reference/memory/Memory#embedder) to convert messages into embeddings. Mastra supports embedding models through the model router using `provider/model` strings, or you can use any [embedding model](https://sdk.vercel.ai/docs/ai-sdk-core/embeddings) compatible with the AI SDK.
102
102
 
103
- To use FastEmbed (a local embedding model), install `@mastra/fastembed`:
103
+ #### Using the Model Router (Recommended)
104
104
 
105
- ```bash npm2yarn copy
106
- npm install @mastra/fastembed
105
+ The simplest way is to use a `provider/model` string with autocomplete support:
106
+
107
+ ```ts {7}
108
+ import { Memory } from "@mastra/memory";
109
+ import { Agent } from "@mastra/core/agent";
110
+
111
+ const agent = new Agent({
112
+ memory: new Memory({
113
+ // ... other memory options
114
+ embedder: "openai/text-embedding-3-small", // TypeScript autocomplete supported
115
+ }),
116
+ });
107
117
  ```
108
118
 
109
- Then configure it in your memory:
119
+ Supported embedding models:
120
+ - **OpenAI**: `text-embedding-3-small`, `text-embedding-3-large`, `text-embedding-ada-002`
121
+ - **Google**: `gemini-embedding-001`, `text-embedding-004`
122
+
123
+ The model router automatically handles API key detection from environment variables (`OPENAI_API_KEY`, `GOOGLE_GENERATIVE_AI_API_KEY`).
124
+
125
+ #### Using AI SDK Packages
126
+
127
+ You can also use AI SDK embedding models directly:
110
128
 
111
129
  ```ts {3,8}
112
130
  import { Memory } from "@mastra/memory";
113
131
  import { Agent } from "@mastra/core/agent";
114
- import { fastembed } from "@mastra/fastembed";
132
+ import { openai } from "@ai-sdk/openai";
115
133
 
116
134
  const agent = new Agent({
117
135
  memory: new Memory({
118
136
  // ... other memory options
119
- embedder: fastembed,
137
+ embedder: openai.embedding("text-embedding-3-small"),
120
138
  }),
121
139
  });
122
140
  ```
123
141
 
124
- Alternatively, use a different provider like OpenAI:
142
+ #### Using FastEmbed (Local)
143
+
144
+ To use FastEmbed (a local embedding model), install `@mastra/fastembed`:
145
+
146
+ ```bash npm2yarn copy
147
+ npm install @mastra/fastembed
148
+ ```
149
+
150
+ Then configure it in your memory:
125
151
 
126
152
  ```ts {3,8}
127
153
  import { Memory } from "@mastra/memory";
128
154
  import { Agent } from "@mastra/core/agent";
129
- import { openai } from "@ai-sdk/openai";
155
+ import { fastembed } from "@mastra/fastembed";
130
156
 
131
157
  const agent = new Agent({
132
158
  memory: new Memory({
133
159
  // ... other memory options
134
- embedder: openai.embedding("text-embedding-3-small"),
160
+ embedder: fastembed,
135
161
  }),
136
162
  });
137
163
  ```
@@ -73,28 +73,40 @@ We go deeper into chunking strategies in our [chunk documentation](/reference/ra
73
73
 
74
74
  ## Step 2: Embedding Generation
75
75
 
76
- Transform chunks into embeddings using your preferred provider. Mastra supports many embedding providers, including OpenAI and Cohere:
76
+ Transform chunks into embeddings using your preferred provider. Mastra supports embedding models through the model router or AI SDK packages.
77
77
 
78
- ### Using OpenAI
78
+ ### Using the Model Router (Recommended)
79
+
80
+ The simplest way is to use Mastra's model router with `provider/model` strings:
79
81
 
80
82
  ```ts showLineNumbers copy
81
- import { openai } from "@ai-sdk/openai";
83
+ import { ModelRouterEmbeddingModel } from "@mastra/core";
82
84
  import { embedMany } from "ai";
83
85
 
86
+ const embeddingModel = new ModelRouterEmbeddingModel("openai/text-embedding-3-small");
87
+
84
88
  const { embeddings } = await embedMany({
85
- model: openai.embedding("text-embedding-3-small"),
89
+ model: embeddingModel,
86
90
  values: chunks.map((chunk) => chunk.text),
87
91
  });
88
92
  ```
89
93
 
90
- ### Using Cohere
94
+ Supported embedding models:
95
+ - **OpenAI**: `text-embedding-3-small`, `text-embedding-3-large`, `text-embedding-ada-002`
96
+ - **Google**: `gemini-embedding-001`, `text-embedding-004`
97
+
98
+ The model router automatically handles API key detection from environment variables.
99
+
100
+ ### Using AI SDK Packages
101
+
102
+ You can also use AI SDK embedding models directly:
91
103
 
92
104
  ```ts showLineNumbers copy
93
- import { cohere } from "@ai-sdk/cohere";
105
+ import { openai } from "@ai-sdk/openai";
94
106
  import { embedMany } from "ai";
95
107
 
96
108
  const { embeddings } = await embedMany({
97
- model: cohere.embedding("embed-english-v3.0"),
109
+ model: openai.embedding("text-embedding-3-small"),
98
110
  values: chunks.map((chunk) => chunk.text),
99
111
  });
100
112
  ```