@mastra/mcp-docs-server 1.0.0-beta.5 → 1.0.0-beta.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (132) hide show
  1. package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +9 -9
  2. package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +67 -67
  3. package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +26 -26
  4. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +53 -53
  5. package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +26 -26
  6. package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +27 -27
  7. package/.docs/organized/changelogs/%40mastra%2Fconvex.md +29 -0
  8. package/.docs/organized/changelogs/%40mastra%2Fcore.md +274 -274
  9. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +15 -15
  10. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +12 -12
  11. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +65 -65
  12. package/.docs/organized/changelogs/%40mastra%2Fduckdb.md +42 -0
  13. package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +26 -26
  14. package/.docs/organized/changelogs/%40mastra%2Felasticsearch.md +52 -0
  15. package/.docs/organized/changelogs/%40mastra%2Fevals.md +12 -12
  16. package/.docs/organized/changelogs/%40mastra%2Flance.md +26 -26
  17. package/.docs/organized/changelogs/%40mastra%2Flibsql.md +24 -24
  18. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +9 -9
  19. package/.docs/organized/changelogs/%40mastra%2Fmcp.md +84 -84
  20. package/.docs/organized/changelogs/%40mastra%2Fmemory.md +36 -36
  21. package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +26 -26
  22. package/.docs/organized/changelogs/%40mastra%2Fmssql.md +27 -27
  23. package/.docs/organized/changelogs/%40mastra%2Fpg.md +28 -28
  24. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +47 -47
  25. package/.docs/organized/changelogs/%40mastra%2Frag.md +43 -43
  26. package/.docs/organized/changelogs/%40mastra%2Freact.md +9 -0
  27. package/.docs/organized/changelogs/%40mastra%2Fschema-compat.md +6 -0
  28. package/.docs/organized/changelogs/%40mastra%2Fserver.md +56 -56
  29. package/.docs/organized/changelogs/%40mastra%2Fupstash.md +26 -26
  30. package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +19 -19
  31. package/.docs/organized/changelogs/create-mastra.md +9 -9
  32. package/.docs/organized/changelogs/mastra.md +17 -17
  33. package/.docs/organized/code-examples/agui.md +1 -0
  34. package/.docs/organized/code-examples/ai-sdk-v5.md +1 -0
  35. package/.docs/organized/code-examples/mcp-server-adapters.md +721 -0
  36. package/.docs/organized/code-examples/server-app-access.md +342 -0
  37. package/.docs/raw/agents/agent-approval.mdx +189 -0
  38. package/.docs/raw/agents/guardrails.mdx +13 -9
  39. package/.docs/raw/agents/networks.mdx +1 -0
  40. package/.docs/raw/agents/overview.mdx +23 -58
  41. package/.docs/raw/agents/processors.mdx +279 -0
  42. package/.docs/raw/deployment/cloud-providers/index.mdx +19 -26
  43. package/.docs/raw/deployment/cloud-providers/netlify-deployer.mdx +44 -13
  44. package/.docs/raw/evals/running-in-ci.mdx +0 -2
  45. package/.docs/raw/{guides/getting-started → getting-started}/manual-install.mdx +2 -2
  46. package/.docs/raw/getting-started/start.mdx +1 -1
  47. package/.docs/raw/guides/build-your-ui/ai-sdk-ui.mdx +8 -0
  48. package/.docs/raw/guides/getting-started/quickstart.mdx +1 -1
  49. package/.docs/raw/guides/guide/whatsapp-chat-bot.mdx +421 -0
  50. package/.docs/raw/guides/index.mdx +3 -35
  51. package/.docs/raw/guides/migrations/upgrade-to-v1/agent.mdx +11 -0
  52. package/.docs/raw/guides/migrations/upgrade-to-v1/workflows.mdx +29 -0
  53. package/.docs/raw/index.mdx +1 -1
  54. package/.docs/raw/memory/memory-processors.mdx +265 -79
  55. package/.docs/raw/memory/working-memory.mdx +10 -2
  56. package/.docs/raw/observability/overview.mdx +0 -1
  57. package/.docs/raw/observability/tracing/bridges/otel.mdx +176 -0
  58. package/.docs/raw/observability/tracing/exporters/arize.mdx +17 -0
  59. package/.docs/raw/observability/tracing/exporters/braintrust.mdx +19 -0
  60. package/.docs/raw/observability/tracing/exporters/langfuse.mdx +20 -0
  61. package/.docs/raw/observability/tracing/exporters/langsmith.mdx +12 -0
  62. package/.docs/raw/observability/tracing/exporters/otel.mdx +5 -4
  63. package/.docs/raw/observability/tracing/overview.mdx +71 -6
  64. package/.docs/raw/observability/tracing/processors/sensitive-data-filter.mdx +0 -1
  65. package/.docs/raw/rag/retrieval.mdx +23 -6
  66. package/.docs/raw/rag/vector-databases.mdx +93 -2
  67. package/.docs/raw/reference/agents/generate.mdx +55 -6
  68. package/.docs/raw/reference/agents/network.mdx +44 -0
  69. package/.docs/raw/reference/client-js/memory.mdx +43 -0
  70. package/.docs/raw/reference/client-js/workflows.mdx +92 -63
  71. package/.docs/raw/reference/deployer/netlify.mdx +1 -2
  72. package/.docs/raw/reference/evals/scorer-utils.mdx +362 -0
  73. package/.docs/raw/reference/index.mdx +1 -0
  74. package/.docs/raw/reference/observability/tracing/bridges/otel.mdx +150 -0
  75. package/.docs/raw/reference/observability/tracing/configuration.mdx +0 -4
  76. package/.docs/raw/reference/observability/tracing/exporters/arize.mdx +4 -0
  77. package/.docs/raw/reference/observability/tracing/exporters/langsmith.mdx +17 -1
  78. package/.docs/raw/reference/observability/tracing/exporters/otel.mdx +6 -0
  79. package/.docs/raw/reference/observability/tracing/instances.mdx +0 -4
  80. package/.docs/raw/reference/observability/tracing/interfaces.mdx +29 -4
  81. package/.docs/raw/reference/observability/tracing/spans.mdx +0 -4
  82. package/.docs/raw/reference/processors/language-detector.mdx +9 -2
  83. package/.docs/raw/reference/processors/message-history-processor.mdx +131 -0
  84. package/.docs/raw/reference/processors/moderation-processor.mdx +10 -3
  85. package/.docs/raw/reference/processors/pii-detector.mdx +10 -3
  86. package/.docs/raw/reference/processors/processor-interface.mdx +502 -0
  87. package/.docs/raw/reference/processors/prompt-injection-detector.mdx +9 -2
  88. package/.docs/raw/reference/processors/semantic-recall-processor.mdx +197 -0
  89. package/.docs/raw/reference/processors/system-prompt-scrubber.mdx +2 -2
  90. package/.docs/raw/reference/processors/tool-call-filter.mdx +125 -0
  91. package/.docs/raw/reference/processors/working-memory-processor.mdx +221 -0
  92. package/.docs/raw/reference/storage/cloudflare-d1.mdx +37 -0
  93. package/.docs/raw/reference/storage/convex.mdx +164 -0
  94. package/.docs/raw/reference/storage/lance.mdx +33 -0
  95. package/.docs/raw/reference/storage/libsql.mdx +37 -0
  96. package/.docs/raw/reference/storage/mongodb.mdx +39 -0
  97. package/.docs/raw/reference/storage/mssql.mdx +37 -0
  98. package/.docs/raw/reference/storage/postgresql.mdx +37 -0
  99. package/.docs/raw/reference/streaming/ChunkType.mdx +1 -1
  100. package/.docs/raw/reference/streaming/agents/stream.mdx +56 -1
  101. package/.docs/raw/reference/streaming/workflows/observeStream.mdx +7 -9
  102. package/.docs/raw/reference/streaming/workflows/{resumeStreamVNext.mdx → resumeStream.mdx} +51 -11
  103. package/.docs/raw/reference/streaming/workflows/stream.mdx +83 -24
  104. package/.docs/raw/reference/tools/mcp-client.mdx +74 -17
  105. package/.docs/raw/reference/vectors/convex.mdx +429 -0
  106. package/.docs/raw/reference/vectors/duckdb.mdx +462 -0
  107. package/.docs/raw/reference/vectors/elasticsearch.mdx +310 -0
  108. package/.docs/raw/reference/voice/google.mdx +159 -20
  109. package/.docs/raw/reference/workflows/run-methods/restart.mdx +142 -0
  110. package/.docs/raw/reference/workflows/run-methods/resume.mdx +44 -0
  111. package/.docs/raw/reference/workflows/run-methods/start.mdx +44 -0
  112. package/.docs/raw/reference/workflows/run.mdx +13 -5
  113. package/.docs/raw/reference/workflows/step.mdx +13 -0
  114. package/.docs/raw/reference/workflows/workflow.mdx +19 -0
  115. package/.docs/raw/server-db/mastra-server.mdx +30 -1
  116. package/.docs/raw/server-db/request-context.mdx +0 -1
  117. package/.docs/raw/server-db/storage.mdx +11 -0
  118. package/.docs/raw/streaming/overview.mdx +6 -6
  119. package/.docs/raw/streaming/tool-streaming.mdx +2 -2
  120. package/.docs/raw/streaming/workflow-streaming.mdx +5 -11
  121. package/.docs/raw/workflows/error-handling.mdx +1 -0
  122. package/.docs/raw/workflows/human-in-the-loop.mdx +4 -4
  123. package/.docs/raw/workflows/overview.mdx +56 -44
  124. package/.docs/raw/workflows/snapshots.mdx +1 -0
  125. package/.docs/raw/workflows/suspend-and-resume.mdx +85 -16
  126. package/.docs/raw/workflows/time-travel.mdx +313 -0
  127. package/.docs/raw/workflows/workflow-state.mdx +191 -0
  128. package/CHANGELOG.md +8 -0
  129. package/package.json +4 -4
  130. package/.docs/raw/agents/human-in-the-loop-with-tools.mdx +0 -91
  131. package/.docs/raw/reference/streaming/workflows/observeStreamVNext.mdx +0 -47
  132. package/.docs/raw/reference/streaming/workflows/streamVNext.mdx +0 -153
@@ -21,10 +21,9 @@ An introduction to agents, and how they compare to workflows on [YouTube (7 minu
21
21
  :::
22
22
 
23
23
  ## Setting up agents
24
+ ### Installation
24
25
 
25
- <Tabs>
26
- <TabItem value="mastra-model-router" label="Model router">
27
- <Steps>
26
+ <Steps>
28
27
 
29
28
  <StepItem>
30
29
 
@@ -70,56 +69,6 @@ export const testAgent = new Agent({
70
69
  </StepItem>
71
70
 
72
71
  </Steps>
73
- </TabItem>
74
- <TabItem value="vercel-ai-sdk" label="Vercel AI SDK">
75
- <Steps>
76
-
77
- <StepItem>
78
-
79
- Include the Mastra core package alongside the Vercel AI SDK provider you want to use:
80
-
81
- ```bash
82
- npm install @mastra/core@beta @ai-sdk/openai
83
- ```
84
-
85
- </StepItem>
86
-
87
- <StepItem>
88
-
89
- Set the corresponding environment variable for your provider. For OpenAI via the AI SDK:
90
-
91
- ```bash title=".env" copy
92
- OPENAI_API_KEY=<your-api-key>
93
- ```
94
-
95
- :::note
96
-
97
- See the [AI SDK Providers](https://ai-sdk.dev/providers/ai-sdk-providers) in the Vercel AI SDK docs for additional configuration options.
98
-
99
- :::
100
-
101
- </StepItem>
102
-
103
- <StepItem>
104
-
105
- To create an agent in Mastra, use the `Agent` class. Every agent must include `instructions` to define its behavior, and a `model` parameter to specify the LLM provider and model. When using the Vercel AI SDK, provide the client to your agent's `model` field:
106
-
107
- ```typescript title="src/mastra/agents/test-agent.ts" copy
108
- import { openai } from "@ai-sdk/openai";
109
- import { Agent } from "@mastra/core/agent";
110
-
111
- export const testAgent = new Agent({
112
- id: "test-agent",
113
- name: "Test Agent",
114
- instructions: "You are a helpful assistant.",
115
- model: openai("gpt-5.1"),
116
- });
117
- ```
118
-
119
- </StepItem>
120
- </Steps>
121
- </TabItem>
122
- </Tabs>
123
72
 
124
73
  ### Instruction formats
125
74
 
@@ -296,9 +245,9 @@ const response = await testAgent.generate(
296
245
  console.log(response.object);
297
246
  ```
298
247
 
299
- ### With Tool Calling
248
+ ### Structuring sub agent
300
249
 
301
- Use the `model` property to ensure that your agent can execute multi-step LLM calls with tool calling.
250
+ Use the `model` property to have a separate agent generate the structured output for you.
302
251
 
303
252
  ```typescript showLineNumbers copy
304
253
  import { z } from "zod";
@@ -361,7 +310,22 @@ const response = await testAgentThatDoesntSupportStructuredOutput.generate(
361
310
  console.log(response.object);
362
311
  ```
363
312
 
364
- ## Working with images
313
+ :::info[Gemini 2.5 with tools]
314
+
315
+ Gemini 2.5 models do not support combining `response_format` (structured output) with function calling (tools) in the same API call. If your agent has tools and you're using `structuredOutput` with a Gemini 2.5 model, you must set `jsonPromptInjection: true` to avoid the error `Function calling with a response mime type: 'application/json' is unsupported`.
316
+
317
+ ```typescript
318
+ const response = await agentWithTools.generate("Your prompt", {
319
+ structuredOutput: {
320
+ schema: yourSchema,
321
+ jsonPromptInjection: true, // Required for Gemini 2.5 when tools are present
322
+ },
323
+ });
324
+ ```
325
+
326
+ :::
327
+
328
+ ## Analyzing images
365
329
 
366
330
  Agents can analyze and describe images by processing both the visual content and any text within them. To enable image analysis, pass an object with `type: 'image'` and the image URL in the `content` array. You can combine image content with text prompts to guide the agent's analysis.
367
331
 
@@ -386,7 +350,8 @@ const response = await testAgent.generate([
386
350
  console.log(response.text);
387
351
  ```
388
352
 
389
- ### Using `maxSteps`
353
+
354
+ ## Using `maxSteps`
390
355
 
391
356
  The `maxSteps` parameter controls the maximum number of sequential LLM calls an agent can make. Each step includes generating a response, executing any tool calls, and processing the result. Limiting steps helps prevent infinite loops, reduce latency, and control token usage for agents that use tools. The default is 1, but can be increased:
392
357
 
@@ -398,7 +363,7 @@ const response = await testAgent.generate("Help me organize my day", {
398
363
  console.log(response.text);
399
364
  ```
400
365
 
401
- ### Using `onStepFinish`
366
+ ## Using `onStepFinish`
402
367
 
403
368
  You can monitor the progress of multi-step operations using the `onStepFinish` callback. This is useful for debugging or providing progress updates to users.
404
369
 
@@ -0,0 +1,279 @@
1
+ ---
2
+ title: "Processors | Agents | Mastra Docs"
3
+ description: "Learn how to use input and output processors to transform, validate, and control messages in Mastra agents."
4
+ ---
5
+
6
+ # Processors
7
+
8
+ Processors transform, validate, or control messages as they pass through an agent. They run at specific points in the agent's execution pipeline, allowing you to modify inputs before they reach the language model or outputs before they're returned to users.
9
+
10
+ Processors are configured as:
11
+
12
+ - **`inputProcessors`**: Run before messages reach the language model.
13
+ - **`outputProcessors`**: Run after the language model generates a response, but before it's returned to users.
14
+
15
+ Some processors implement both input and output logic and can be used in either array depending on where the transformation should occur.
16
+
17
+ ## When to use processors
18
+
19
+ Use processors to:
20
+
21
+ - Normalize or validate user input
22
+ - Add guardrails to your agent
23
+ - Detect and prevent prompt injection or jailbreak attempts
24
+ - Moderate content for safety or compliance
25
+ - Transform messages (e.g., translate languages, filter tool calls)
26
+ - Limit token usage or message history length
27
+ - Redact sensitive information (PII)
28
+ - Apply custom business logic to messages
29
+
30
+ Mastra includes several processors for common use cases. You can also create custom processors for application-specific requirements.
31
+
32
+ ## Adding processors to an agent
33
+
34
+ Import and instantiate the processor, then pass it to the agent's `inputProcessors` or `outputProcessors` array:
35
+
36
+ ```typescript {3,9-15} title="src/mastra/agents/moderated-agent.ts" showLineNumbers copy
37
+ import { openai } from "@ai-sdk/openai";
38
+ import { Agent } from "@mastra/core/agent";
39
+ import { ModerationProcessor } from "@mastra/core/processors";
40
+
41
+ export const moderatedAgent = new Agent({
42
+ name: "moderated-agent",
43
+ instructions: "You are a helpful assistant",
44
+ model: openai("gpt-4o-mini"),
45
+ inputProcessors: [
46
+ new ModerationProcessor({
47
+ model: openai("gpt-4.1-nano"),
48
+ categories: ["hate", "harassment", "violence"],
49
+ threshold: 0.7,
50
+ strategy: "block",
51
+ }),
52
+ ],
53
+ });
54
+ ```
55
+
56
+ ## Execution order
57
+
58
+ Processors run in the order they appear in the array:
59
+
60
+ ```typescript
61
+ inputProcessors: [
62
+ new UnicodeNormalizer(),
63
+ new PromptInjectionDetector(),
64
+ new ModerationProcessor(),
65
+ ];
66
+ ```
67
+
68
+ For output processors, the order determines the sequence of transformations applied to the model's response.
69
+
70
+ ### With memory enabled
71
+
72
+ When memory is enabled on an agent, memory processors are automatically added to the pipeline:
73
+
74
+ **Input processors:**
75
+ ```
76
+ [Memory Processors] → [Your inputProcessors]
77
+ ```
78
+ Memory loads conversation history first, then your processors run.
79
+
80
+ **Output processors:**
81
+ ```
82
+ [Your outputProcessors] → [Memory Processors]
83
+ ```
84
+ Your processors run first, then memory persists messages.
85
+
86
+ This ordering ensures that if your output guardrail calls `abort()`, memory processors are skipped and no messages are saved. See [Memory Processors](/docs/v1/memory/memory-processors#processor-execution-order) for details.
87
+
88
+ ## Creating custom processors
89
+
90
+ Custom processors implement the `Processor` interface:
91
+
92
+ ### Custom input processor
93
+
94
+ ```typescript title="src/mastra/processors/custom-input.ts" showLineNumbers copy
95
+ import type {
96
+ Processor,
97
+ MastraDBMessage,
98
+ RequestContext,
99
+ } from "@mastra/core";
100
+
101
+ export class CustomInputProcessor implements Processor {
102
+ id = "custom-input";
103
+
104
+ async processInput({
105
+ messages,
106
+ systemMessages,
107
+ context,
108
+ }: {
109
+ messages: MastraDBMessage[];
110
+ systemMessages: CoreMessage[];
111
+ context: RequestContext;
112
+ }): Promise<MastraDBMessage[]> {
113
+ // Transform messages before they reach the LLM
114
+ return messages.map((msg) => ({
115
+ ...msg,
116
+ content: {
117
+ ...msg.content,
118
+ content: msg.content.content.toLowerCase(),
119
+ },
120
+ }));
121
+ }
122
+ }
123
+ ```
124
+
125
+ The `processInput` method receives:
126
+ - `messages`: User and assistant messages (not system messages)
127
+ - `systemMessages`: All system messages (agent instructions, memory context, user-provided system prompts)
128
+ - `messageList`: The full MessageList instance for advanced use cases
129
+ - `abort`: Function to stop processing and return early
130
+ - `requestContext`: Execution metadata like `threadId` and `resourceId`
131
+
132
+ The method can return:
133
+ - `MastraDBMessage[]` — Transformed messages array (backward compatible)
134
+ - `{ messages: MastraDBMessage[]; systemMessages: CoreMessage[] }` — Both messages and modified system messages
135
+
136
+ The framework handles both return formats, so modifying system messages is optional and existing processors continue to work.
137
+
138
+ ### Modifying system messages
139
+
140
+ To modify system messages (e.g., trim verbose prompts for smaller models), return an object with both `messages` and `systemMessages`:
141
+
142
+ ```typescript title="src/mastra/processors/system-trimmer.ts" showLineNumbers copy
143
+ import type { Processor, CoreMessage, MastraDBMessage } from "@mastra/core";
144
+
145
+ export class SystemTrimmer implements Processor {
146
+ id = "system-trimmer";
147
+
148
+ async processInput({
149
+ messages,
150
+ systemMessages,
151
+ }): Promise<{ messages: MastraDBMessage[]; systemMessages: CoreMessage[] }> {
152
+ // Trim system messages for smaller models
153
+ const trimmedSystemMessages = systemMessages.map((msg) => ({
154
+ ...msg,
155
+ content:
156
+ typeof msg.content === "string"
157
+ ? msg.content.substring(0, 500)
158
+ : msg.content,
159
+ }));
160
+
161
+ return { messages, systemMessages: trimmedSystemMessages };
162
+ }
163
+ }
164
+ ```
165
+
166
+ This is useful for:
167
+ - Trimming verbose system prompts for models with smaller context windows
168
+ - Filtering or modifying semantic recall content to prevent "prompt too long" errors
169
+ - Dynamically adjusting system instructions based on the conversation
170
+
171
+ ### Custom output processor
172
+
173
+ ```typescript title="src/mastra/processors/custom-output.ts" showLineNumbers copy
174
+ import type {
175
+ Processor,
176
+ MastraDBMessage,
177
+ RequestContext,
178
+ } from "@mastra/core";
179
+
180
+ export class CustomOutputProcessor implements Processor {
181
+ id = "custom-output";
182
+
183
+ async processOutputResult({
184
+ messages,
185
+ context,
186
+ }: {
187
+ messages: MastraDBMessage[];
188
+ context: RequestContext;
189
+ }): Promise<MastraDBMessage[]> {
190
+ // Transform messages after the LLM generates them
191
+ return messages.filter((msg) => msg.role !== "system");
192
+ }
193
+
194
+ async processOutputStream({
195
+ stream,
196
+ context,
197
+ }: {
198
+ stream: ReadableStream;
199
+ context: RequestContext;
200
+ }): Promise<ReadableStream> {
201
+ // Transform streaming responses
202
+ return stream;
203
+ }
204
+ }
205
+ ```
206
+
207
+ ## Built-in Utility Processors
208
+
209
+ Mastra provides utility processors for common tasks:
210
+
211
+ **For security and validation processors**, see the [Guardrails](/docs/v1/agents/guardrails) page for input/output guardrails and moderation processors.
212
+ **For memory-specific processors**, see the [Memory Processors](/docs/v1/memory/memory-processors) page for processors that handle message history, semantic recall, and working memory.
213
+
214
+ ### TokenLimiter
215
+
216
+ Prevents context window overflow by removing older messages when the total token count exceeds a specified limit.
217
+
218
+ ```typescript copy showLineNumbers {9-12}
219
+ import { Agent } from "@mastra/core/agent";
220
+ import { TokenLimiter } from "@mastra/core/processors";
221
+ import { openai } from "@ai-sdk/openai";
222
+
223
+ const agent = new Agent({
224
+ name: "my-agent",
225
+ model: openai("gpt-4o"),
226
+ inputProcessors: [
227
+ // Ensure the total tokens don't exceed ~127k
228
+ new TokenLimiter(127000),
229
+ ],
230
+ });
231
+ ```
232
+
233
+ The `TokenLimiter` uses the `o200k_base` encoding by default (suitable for GPT-4o). You can specify other encodings for different models:
234
+
235
+ ```typescript copy showLineNumbers {6-9}
236
+ import cl100k_base from "js-tiktoken/ranks/cl100k_base";
237
+
238
+ const agent = new Agent({
239
+ name: "my-agent",
240
+ inputProcessors: [
241
+ new TokenLimiter({
242
+ limit: 16000, // Example limit for a 16k context model
243
+ encoding: cl100k_base,
244
+ }),
245
+ ],
246
+ });
247
+ ```
248
+
249
+ ### ToolCallFilter
250
+
251
+ Removes tool calls from messages sent to the LLM, saving tokens by excluding potentially verbose tool interactions.
252
+
253
+ ```typescript copy showLineNumbers {5-14}
254
+ import { Agent } from "@mastra/core/agent";
255
+ import { ToolCallFilter, TokenLimiter } from "@mastra/core/processors";
256
+ import { openai } from "@ai-sdk/openai";
257
+
258
+ const agent = new Agent({
259
+ name: "my-agent",
260
+ model: openai("gpt-4o"),
261
+ inputProcessors: [
262
+ // Example 1: Remove all tool calls/results
263
+ new ToolCallFilter(),
264
+
265
+ // Example 2: Remove only specific tool calls
266
+ new ToolCallFilter({ exclude: ["generateImageTool"] }),
267
+
268
+ // Always place TokenLimiter last
269
+ new TokenLimiter(127000),
270
+ ],
271
+ });
272
+ ```
273
+
274
+ > **Note:** The example above filters tool calls and limits tokens for the LLM, but these filtered messages will still be saved to memory. To also filter messages before they're saved to memory, manually add memory processors before utility processors. See [Memory Processors](/docs/v1/memory/memory-processors#manual-control-and-deduplication) for details.
275
+
276
+ ## Related documentation
277
+
278
+ - [Guardrails](/docs/v1/agents/guardrails) - Security and validation processors
279
+ - [Memory Processors](/docs/v1/memory/memory-processors) - Memory-specific processors and automatic integration
@@ -15,7 +15,6 @@ Standalone Mastra applications can be deployed to popular cloud providers, see o
15
15
  - [Netlify](/docs/v1/deployment/cloud-providers/netlify-deployer)
16
16
  - [Vercel](/docs/v1/deployment/cloud-providers/vercel-deployer)
17
17
 
18
-
19
18
  For self-hosted Node.js server deployment, see the [Creating A Mastra Server](/docs/v1/deployment/building-mastra) guide.
20
19
 
21
20
  ## Prerequisites
@@ -30,33 +29,27 @@ Before deploying to a cloud provider, ensure you have:
30
29
 
31
30
  ## LibSQLStore
32
31
 
33
- `LibSQLStore` writes to the local filesystem, which is not supported in cloud environments that use ephemeral file systems. If you're deploying to platforms like **AWS Lambda**, **Azure App Services**, or **Digital Ocean App Platform**, you **must remove** all usage of `LibSQLStore`.
32
+ `LibSQLStore` writes to the local filesystem, which is not supported in cloud environments that use ephemeral file systems. If you're deploying to platforms like AWS Lambda, Netlify, Azure App Services, or Digital Ocean App Platform, you must remove all usage of `LibSQLStore`.
34
33
 
35
- Specifically, ensure you've removed it from both `src/mastra/index.ts` and `src/mastra/agents/weather-agent.ts`:
34
+ Specifically, ensure you've removed it from all Mastra files, including configuration on the main Mastra instance and any agents using it, e.g. for memory storage.
36
35
 
37
- ```typescript title="src/mastra/index.ts" showLineNumbers
38
- export const mastra = new Mastra({
39
- // ...
40
- storage: new LibSQLStore({
41
- id: 'mastra-storage',
42
- // [!code --]
43
- // stores telemetry, scorer results, ... into memory storage, if it needs to persist, change to file:../mastra.db // [!code --]
44
- url: ":memory:", // [!code --]
45
- }), //[!code --]
46
- });
36
+ ```diff title="src/mastra/index.ts"
37
+ export const mastra = new Mastra({
38
+ - storage: new LibSQLStore({
39
+ - id: 'mastra-storage',
40
+ - url: ":memory:",
41
+ - }),
42
+ });
47
43
  ```
48
44
 
49
- ```typescript title="src/mastra/agents/weather-agent.ts" showLineNumbers
50
- export const weatherAgent = new Agent({
51
- id: "weather-agent",
52
- // ..
53
- memory: new Memory({
54
- // [!code --]
55
- storage: new LibSQLStore({
56
- id: 'mastra-storage',
57
- // [!code --]
58
- url: "file:../mastra.db", // path is relative to the .mastra/output directory // [!code --]
59
- }), // [!code --]
60
- }), // [!code --]
61
- });
45
+ ```diff
46
+ export const weatherAgent = new Agent({
47
+ id: "weather-agent",
48
+ memory: new Memory({
49
+ - storage: new LibSQLStore({
50
+ - id: 'mastra-storage',
51
+ - url: "file:../mastra.db",
52
+ - }),
53
+ }),
54
+ });
62
55
  ```
@@ -15,12 +15,11 @@ npm install @mastra/deployer-netlify@beta
15
15
 
16
16
  ## Usage example
17
17
 
18
- ```typescript title="src/mastra/index.ts" showLineNumbers copy
18
+ ```typescript title="src/mastra/index.ts" copy
19
19
  import { Mastra } from "@mastra/core";
20
20
  import { NetlifyDeployer } from "@mastra/deployer-netlify";
21
21
 
22
22
  export const mastra = new Mastra({
23
- // ...
24
23
  deployer: new NetlifyDeployer(),
25
24
  });
26
25
  ```
@@ -43,32 +42,64 @@ Your project is now configured with automatic deployments which occur whenever y
43
42
 
44
43
  ## Manual deployment
45
44
 
46
- Manual deployments are also possible using the [Netlify CLI](https://docs.netlify.com/cli/get-started/). With the Netlify CLI installed run the following from your project root to deploy your application.
45
+ Manual deployments are also possible using the [Netlify CLI](https://docs.netlify.com/cli/get-started/). With the Netlify CLI installed run the following from your project root to deploy your application. You can also run `netlify dev` from your project root to test your Mastra application locally.
47
46
 
48
47
  ```bash copy
49
48
  netlify deploy --prod
50
49
  ```
51
50
 
52
- > You can also run `netlify dev` from your project root to test your Mastra application locally.
51
+ :::warning
53
52
 
54
- ## Build output
53
+ When using `netlify deploy` instead of continuous deployment, you need to create a `netlify.toml` file with these contents:
54
+
55
+ ```toml
56
+ [build]
57
+ command = "npm run build"
58
+ publish = ".netlify/v1/functions"
55
59
 
56
- The build output for Mastra applications using the `NetlifyDeployer` includes all agents, tools, and workflows in your project, along with Mastra specific files required to run your application on Netlify.
60
+ [functions]
61
+ directory = ".netlify/v1/functions"
62
+ node_bundler = "none"
63
+ included_files = [".netlify/v1/functions/**"]
57
64
 
65
+ [[redirects]]
66
+ from = "/*"
67
+ to = "/.netlify/functions/api/:splat"
68
+ status = 200
69
+
70
+ [build.environment]
71
+ NODE_VERSION = "22.13.0"
58
72
  ```
59
- .netlify/
60
- └── v1/
61
- ├── functions/
62
- │ └── api/
63
- │ └── index.mjs
64
- └── config.json
65
- package.json
73
+
74
+ Adjust the `build.command` to your project.
75
+
76
+ :::
77
+
78
+ ## Build output
79
+
80
+ The build output for Mastra applications using the `NetlifyDeployer` includes all agents, tools, and workflows in your project, along with Mastra-specific files required to run your application on Netlify.
81
+
82
+ ```bash
83
+ your-project/
84
+ └── .netlify/
85
+ └── v1/
86
+ ├── config.json
87
+ └── functions/
88
+ └── api/
89
+ ├── index.js
90
+ ├── package.json
91
+ └── node_modules/
66
92
  ```
67
93
 
68
94
  The `NetlifyDeployer` automatically generates a `config.json` configuration file in `.netlify/v1` with the following settings:
69
95
 
70
96
  ```json
71
97
  {
98
+ "functions": {
99
+ "directory": ".netlify/v1/functions",
100
+ "node_bundler": "none",
101
+ "included_files": [".netlify/v1/functions/**"]
102
+ },
72
103
  "redirects": [
73
104
  {
74
105
  "force": true,
@@ -49,8 +49,6 @@ describe('Weather Agent Tests', () => {
49
49
  });
50
50
  ```
51
51
 
52
- View the full example [here](/examples/v1/evals/running-in-ci)
53
-
54
52
  ## Understanding Results
55
53
 
56
54
  The `runEvals` function returns an object with:
@@ -1,5 +1,5 @@
1
1
  ---
2
- title: "Manual Install | Guides"
2
+ title: "Manual Install | Getting Started"
3
3
  description: Set up a Mastra project manually without using the create mastra CLI.
4
4
  ---
5
5
 
@@ -11,7 +11,7 @@ import StepItem from "@site/src/components/StepItem";
11
11
  # Manual Install
12
12
 
13
13
  :::info
14
- Use this guide to manually build a standalone Mastra server step by step. In most cases, it's quicker to follow the [Standalone Server Quickstart](/guides/v1/getting-started/quickstart), which achieves the same result using the [`mastra create`](/reference/v1/cli/create-mastra) command. For existing projects, you can also use [`mastra init`](/reference/v1/cli/mastra#mastra-init).
14
+ Use this guide to manually build a standalone Mastra server step by step. In most cases, it's quicker to follow a [getting-started guide](/docs/v1/getting-started/start), which achieves the same result using the [`mastra create`](/reference/v1/cli/create-mastra) command. For existing projects, you can also use [`mastra init`](/reference/v1/cli/mastra#mastra-init).
15
15
  :::
16
16
 
17
17
  If you prefer not to use our automatic CLI tool, you can set up your project yourself by following the guide below.
@@ -6,7 +6,7 @@ description: Choose how to get started with Mastra - quickstart, framework integ
6
6
  import { CardGrid, CardGridItem } from "@site/src/components/cards/card-grid";
7
7
 
8
8
  # Start
9
- Start a new Mastra project, or integrate Mastra with your preferred framework.
9
+ Create a new Mastra project, or integrate Mastra with your preferred framework to start building.
10
10
 
11
11
  ## New project
12
12
 
@@ -142,6 +142,12 @@ const { error, status, sendMessage, messages, regenerate, stop } = useChat({
142
142
  });
143
143
  ```
144
144
 
145
+ :::tip Agent streaming in workflows
146
+ When a workflow step pipes an agent's stream to the workflow writer (e.g., `await response.fullStream.pipeTo(writer)`), the agent's text chunks and tool calls are automatically streamed to the UI in real-time. This provides a seamless streaming experience even when agents are running inside workflow steps.
147
+
148
+ Learn more in [Workflow Streaming](/docs/v1/streaming/workflow-streaming#streaming-agent-text-chunks-to-ui).
149
+ :::
150
+
145
151
  ### `networkRoute()`
146
152
 
147
153
  Use the `networkRoute()` utility to create a route handler that automatically formats the agent network stream into an AI SDK-compatible format.
@@ -292,6 +298,7 @@ export async function POST(req: Request) {
292
298
 
293
299
  // Transform stream into AI SDK format and create UI messages stream
294
300
  const uiMessageStream = createUIMessageStream({
301
+ originalMessages: messages,
295
302
  execute: async ({ writer }) => {
296
303
  for await (const part of toAISdkStream(stream, { from: "agent" })!) {
297
304
  writer.write(part);
@@ -384,6 +391,7 @@ export async function POST(req: Request) {
384
391
  });
385
392
 
386
393
  const uiMessageStream = createUIMessageStream({
394
+ initialMessages: messages,
387
395
  execute: async ({ writer }) => {
388
396
  for await (const part of toAISdkStream(stream, {
389
397
  from: "agent",
@@ -13,7 +13,7 @@ import { VideoPlayer } from "@site/src/components/video-player";
13
13
 
14
14
  The `create mastra` CLI command is the quickest way to get started. It walks you through setup and creates example agents, workflows, and tools for you to run locally or adapt
15
15
 
16
- If you need more control over the setup, see the [manual installation guide](/guides/v1/getting-started/manual-install). You can also use [`mastra init`](/reference/v1/cli/mastra#mastra-init) for existing projects.
16
+ If you need more control over the setup, see the [manual installation guide](/docs/v1/getting-started/manual-install). You can also use [`mastra init`](/reference/v1/cli/mastra#mastra-init) for existing projects.
17
17
 
18
18
  ## Before you begin
19
19