@mastra/mcp-docs-server 1.0.0-beta.5 → 1.0.0-beta.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (163) hide show
  1. package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +9 -9
  2. package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +67 -67
  3. package/.docs/organized/changelogs/%40mastra%2Fastra.md +10 -10
  4. package/.docs/organized/changelogs/%40mastra%2Fchroma.md +12 -12
  5. package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +57 -57
  6. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +110 -110
  7. package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +57 -57
  8. package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +57 -57
  9. package/.docs/organized/changelogs/%40mastra%2Fcodemod.md +6 -0
  10. package/.docs/organized/changelogs/%40mastra%2Fconvex.md +60 -0
  11. package/.docs/organized/changelogs/%40mastra%2Fcore.md +358 -358
  12. package/.docs/organized/changelogs/%40mastra%2Fcouchbase.md +11 -11
  13. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +24 -24
  14. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +12 -12
  15. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +79 -79
  16. package/.docs/organized/changelogs/%40mastra%2Fduckdb.md +42 -0
  17. package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +57 -57
  18. package/.docs/organized/changelogs/%40mastra%2Felasticsearch.md +61 -0
  19. package/.docs/organized/changelogs/%40mastra%2Fevals.md +12 -12
  20. package/.docs/organized/changelogs/%40mastra%2Flance.md +57 -57
  21. package/.docs/organized/changelogs/%40mastra%2Flibsql.md +55 -55
  22. package/.docs/organized/changelogs/%40mastra%2Floggers.md +12 -12
  23. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +17 -17
  24. package/.docs/organized/changelogs/%40mastra%2Fmcp.md +125 -125
  25. package/.docs/organized/changelogs/%40mastra%2Fmemory.md +36 -36
  26. package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +57 -57
  27. package/.docs/organized/changelogs/%40mastra%2Fmssql.md +57 -57
  28. package/.docs/organized/changelogs/%40mastra%2Fopensearch.md +10 -10
  29. package/.docs/organized/changelogs/%40mastra%2Fpg.md +59 -59
  30. package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +10 -10
  31. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +77 -77
  32. package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +10 -10
  33. package/.docs/organized/changelogs/%40mastra%2Frag.md +43 -43
  34. package/.docs/organized/changelogs/%40mastra%2Freact.md +16 -0
  35. package/.docs/organized/changelogs/%40mastra%2Fs3vectors.md +9 -0
  36. package/.docs/organized/changelogs/%40mastra%2Fschema-compat.md +6 -0
  37. package/.docs/organized/changelogs/%40mastra%2Fserver.md +113 -113
  38. package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +10 -10
  39. package/.docs/organized/changelogs/%40mastra%2Fupstash.md +57 -57
  40. package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +10 -10
  41. package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +19 -19
  42. package/.docs/organized/changelogs/create-mastra.md +15 -15
  43. package/.docs/organized/changelogs/mastra.md +30 -30
  44. package/.docs/organized/code-examples/agui.md +1 -0
  45. package/.docs/organized/code-examples/ai-elements.md +1 -1
  46. package/.docs/organized/code-examples/ai-sdk-useChat.md +1 -1
  47. package/.docs/organized/code-examples/ai-sdk-v5.md +2 -1
  48. package/.docs/organized/code-examples/assistant-ui.md +1 -1
  49. package/.docs/organized/code-examples/bird-checker-with-nextjs-and-eval.md +1 -1
  50. package/.docs/organized/code-examples/bird-checker-with-nextjs.md +1 -1
  51. package/.docs/organized/code-examples/crypto-chatbot.md +1 -1
  52. package/.docs/organized/code-examples/mcp-server-adapters.md +721 -0
  53. package/.docs/organized/code-examples/server-app-access.md +342 -0
  54. package/.docs/organized/code-examples/server-express-adapter.md +87 -0
  55. package/.docs/organized/code-examples/server-hono-adapter.md +85 -0
  56. package/.docs/raw/agents/agent-approval.mdx +189 -0
  57. package/.docs/raw/agents/guardrails.mdx +13 -9
  58. package/.docs/raw/agents/networks.mdx +1 -0
  59. package/.docs/raw/agents/overview.mdx +8 -152
  60. package/.docs/raw/agents/processors.mdx +279 -0
  61. package/.docs/raw/agents/structured-output.mdx +224 -0
  62. package/.docs/raw/deployment/cloud-providers/index.mdx +19 -26
  63. package/.docs/raw/deployment/cloud-providers/netlify-deployer.mdx +44 -13
  64. package/.docs/raw/evals/running-in-ci.mdx +0 -2
  65. package/.docs/raw/{guides/getting-started → getting-started}/manual-install.mdx +2 -2
  66. package/.docs/raw/getting-started/start.mdx +1 -1
  67. package/.docs/raw/guides/build-your-ui/ai-sdk-ui.mdx +8 -0
  68. package/.docs/raw/guides/getting-started/quickstart.mdx +1 -1
  69. package/.docs/raw/guides/guide/whatsapp-chat-bot.mdx +421 -0
  70. package/.docs/raw/guides/index.mdx +3 -35
  71. package/.docs/raw/guides/migrations/upgrade-to-v1/agent.mdx +11 -0
  72. package/.docs/raw/guides/migrations/upgrade-to-v1/workflows.mdx +37 -0
  73. package/.docs/raw/index.mdx +1 -1
  74. package/.docs/raw/memory/memory-processors.mdx +265 -79
  75. package/.docs/raw/memory/working-memory.mdx +11 -2
  76. package/.docs/raw/observability/overview.mdx +0 -1
  77. package/.docs/raw/observability/tracing/bridges/otel.mdx +200 -0
  78. package/.docs/raw/observability/tracing/exporters/arize.mdx +36 -0
  79. package/.docs/raw/observability/tracing/exporters/braintrust.mdx +19 -0
  80. package/.docs/raw/observability/tracing/exporters/langfuse.mdx +83 -0
  81. package/.docs/raw/observability/tracing/exporters/langsmith.mdx +12 -0
  82. package/.docs/raw/observability/tracing/exporters/otel.mdx +34 -22
  83. package/.docs/raw/observability/tracing/exporters/posthog.mdx +20 -0
  84. package/.docs/raw/observability/tracing/overview.mdx +76 -6
  85. package/.docs/raw/observability/tracing/processors/sensitive-data-filter.mdx +0 -1
  86. package/.docs/raw/rag/retrieval.mdx +23 -6
  87. package/.docs/raw/rag/vector-databases.mdx +93 -2
  88. package/.docs/raw/reference/agents/generate.mdx +55 -6
  89. package/.docs/raw/reference/agents/network.mdx +44 -0
  90. package/.docs/raw/reference/client-js/memory.mdx +43 -0
  91. package/.docs/raw/reference/client-js/workflows.mdx +92 -63
  92. package/.docs/raw/reference/deployer/netlify.mdx +1 -2
  93. package/.docs/raw/reference/evals/scorer-utils.mdx +362 -0
  94. package/.docs/raw/reference/index.mdx +1 -0
  95. package/.docs/raw/reference/observability/tracing/bridges/otel.mdx +177 -0
  96. package/.docs/raw/reference/observability/tracing/configuration.mdx +0 -4
  97. package/.docs/raw/reference/observability/tracing/exporters/arize.mdx +29 -0
  98. package/.docs/raw/reference/observability/tracing/exporters/langfuse.mdx +43 -0
  99. package/.docs/raw/reference/observability/tracing/exporters/langsmith.mdx +17 -1
  100. package/.docs/raw/reference/observability/tracing/exporters/otel.mdx +33 -43
  101. package/.docs/raw/reference/observability/tracing/instances.mdx +0 -4
  102. package/.docs/raw/reference/observability/tracing/interfaces.mdx +29 -4
  103. package/.docs/raw/reference/observability/tracing/spans.mdx +0 -4
  104. package/.docs/raw/reference/processors/language-detector.mdx +9 -2
  105. package/.docs/raw/reference/processors/message-history-processor.mdx +131 -0
  106. package/.docs/raw/reference/processors/moderation-processor.mdx +10 -3
  107. package/.docs/raw/reference/processors/pii-detector.mdx +10 -3
  108. package/.docs/raw/reference/processors/processor-interface.mdx +502 -0
  109. package/.docs/raw/reference/processors/prompt-injection-detector.mdx +9 -2
  110. package/.docs/raw/reference/processors/semantic-recall-processor.mdx +197 -0
  111. package/.docs/raw/reference/processors/system-prompt-scrubber.mdx +2 -2
  112. package/.docs/raw/reference/processors/tool-call-filter.mdx +125 -0
  113. package/.docs/raw/reference/processors/working-memory-processor.mdx +221 -0
  114. package/.docs/raw/reference/server/create-route.mdx +314 -0
  115. package/.docs/raw/reference/server/express-adapter.mdx +193 -0
  116. package/.docs/raw/reference/server/hono-adapter.mdx +174 -0
  117. package/.docs/raw/reference/server/mastra-server.mdx +316 -0
  118. package/.docs/raw/reference/server/routes.mdx +250 -0
  119. package/.docs/raw/reference/storage/cloudflare-d1.mdx +37 -0
  120. package/.docs/raw/reference/storage/convex.mdx +164 -0
  121. package/.docs/raw/reference/storage/lance.mdx +33 -0
  122. package/.docs/raw/reference/storage/libsql.mdx +37 -0
  123. package/.docs/raw/reference/storage/mongodb.mdx +39 -0
  124. package/.docs/raw/reference/storage/mssql.mdx +37 -0
  125. package/.docs/raw/reference/storage/postgresql.mdx +37 -0
  126. package/.docs/raw/reference/streaming/ChunkType.mdx +1 -1
  127. package/.docs/raw/reference/streaming/agents/stream.mdx +56 -1
  128. package/.docs/raw/reference/streaming/workflows/observeStream.mdx +7 -9
  129. package/.docs/raw/reference/streaming/workflows/{resumeStreamVNext.mdx → resumeStream.mdx} +51 -11
  130. package/.docs/raw/reference/streaming/workflows/stream.mdx +83 -24
  131. package/.docs/raw/reference/streaming/workflows/timeTravelStream.mdx +170 -0
  132. package/.docs/raw/reference/tools/mcp-client.mdx +128 -18
  133. package/.docs/raw/reference/vectors/convex.mdx +429 -0
  134. package/.docs/raw/reference/vectors/duckdb.mdx +462 -0
  135. package/.docs/raw/reference/vectors/elasticsearch.mdx +310 -0
  136. package/.docs/raw/reference/voice/google.mdx +159 -20
  137. package/.docs/raw/reference/workflows/run-methods/restart.mdx +142 -0
  138. package/.docs/raw/reference/workflows/run-methods/resume.mdx +44 -0
  139. package/.docs/raw/reference/workflows/run-methods/start.mdx +44 -0
  140. package/.docs/raw/reference/workflows/run-methods/timeTravel.mdx +310 -0
  141. package/.docs/raw/reference/workflows/run.mdx +27 -5
  142. package/.docs/raw/reference/workflows/step.mdx +13 -0
  143. package/.docs/raw/reference/workflows/workflow.mdx +19 -0
  144. package/.docs/raw/server-db/custom-adapters.mdx +380 -0
  145. package/.docs/raw/server-db/mastra-server.mdx +16 -8
  146. package/.docs/raw/server-db/request-context.mdx +0 -1
  147. package/.docs/raw/server-db/server-adapters.mdx +286 -0
  148. package/.docs/raw/server-db/storage.mdx +11 -0
  149. package/.docs/raw/streaming/overview.mdx +6 -6
  150. package/.docs/raw/streaming/tool-streaming.mdx +2 -2
  151. package/.docs/raw/streaming/workflow-streaming.mdx +5 -11
  152. package/.docs/raw/workflows/error-handling.mdx +1 -0
  153. package/.docs/raw/workflows/human-in-the-loop.mdx +4 -4
  154. package/.docs/raw/workflows/overview.mdx +56 -44
  155. package/.docs/raw/workflows/snapshots.mdx +1 -0
  156. package/.docs/raw/workflows/suspend-and-resume.mdx +85 -16
  157. package/.docs/raw/workflows/time-travel.mdx +313 -0
  158. package/.docs/raw/workflows/workflow-state.mdx +191 -0
  159. package/CHANGELOG.md +16 -0
  160. package/package.json +4 -4
  161. package/.docs/raw/agents/human-in-the-loop-with-tools.mdx +0 -91
  162. package/.docs/raw/reference/streaming/workflows/observeStreamVNext.mdx +0 -47
  163. package/.docs/raw/reference/streaming/workflows/streamVNext.mdx +0 -153
@@ -33,7 +33,7 @@ export const moderatedAgent = new Agent({
33
33
  model: "openai/gpt-5.1",
34
34
  inputProcessors: [
35
35
  new ModerationProcessor({
36
- model: "openai/gpt-4.1-nano",
36
+ model: "openrouter/openai/gpt-oss-safeguard-20b",
37
37
  categories: ["hate", "harassment", "violence"],
38
38
  threshold: 0.7,
39
39
  strategy: "block",
@@ -82,7 +82,7 @@ export const secureAgent = new Agent({
82
82
  // ...
83
83
  inputProcessors: [
84
84
  new PromptInjectionDetector({
85
- model: "openai/gpt-4.1-nano",
85
+ model: "openrouter/openai/gpt-oss-safeguard-20b",
86
86
  threshold: 0.8,
87
87
  strategy: "rewrite",
88
88
  detectionTypes: ["injection", "jailbreak", "system-override"],
@@ -106,7 +106,7 @@ export const multilingualAgent = new Agent({
106
106
  // ...
107
107
  inputProcessors: [
108
108
  new LanguageDetector({
109
- model: "openai/gpt-4.1-nano",
109
+ model: "openrouter/openai/gpt-oss-safeguard-20b",
110
110
  targetLanguages: ["English", "en"],
111
111
  strategy: "translate",
112
112
  threshold: 0.8,
@@ -179,7 +179,7 @@ const scrubbedAgent = new Agent({
179
179
  name: "Scrubbed Agent",
180
180
  outputProcessors: [
181
181
  new SystemPromptScrubber({
182
- model: "openai/gpt-4.1-nano",
182
+ model: "openrouter/openai/gpt-oss-safeguard-20b",
183
183
  strategy: "redact",
184
184
  customPatterns: ["system prompt", "internal instructions"],
185
185
  includeDetections: true,
@@ -194,6 +194,10 @@ const scrubbedAgent = new Agent({
194
194
 
195
195
  > See [SystemPromptScrubber](/reference/v1/processors/system-prompt-scrubber) for a full list of configuration options.
196
196
 
197
+ :::note
198
+ When streaming responses over HTTP, Mastra redacts sensitive request data (system prompts, tool definitions, API keys) from stream chunks at the server level by default. See [Stream data redaction](/docs/v1/server-db/mastra-server#stream-data-redaction) for details.
199
+ :::
200
+
197
201
  ## Hybrid processors
198
202
 
199
203
  Hybrid processors can be applied either before messages are sent to the language model or before responses are returned to the user. They are useful for tasks like content moderation and PII redaction.
@@ -211,7 +215,7 @@ export const moderatedAgent = new Agent({
211
215
  // ...
212
216
  inputProcessors: [
213
217
  new ModerationProcessor({
214
- model: "openai/gpt-4.1-nano",
218
+ model: "openrouter/openai/gpt-oss-safeguard-20b",
215
219
  threshold: 0.7,
216
220
  strategy: "block",
217
221
  categories: ["hate", "harassment", "violence"],
@@ -240,7 +244,7 @@ export const privateAgent = new Agent({
240
244
  // ...
241
245
  inputProcessors: [
242
246
  new PIIDetector({
243
- model: "openai/gpt-4.1-nano",
247
+ model: "openrouter/openai/gpt-oss-safeguard-20b",
244
248
  threshold: 0.6,
245
249
  strategy: "redact",
246
250
  redactionMethod: "mask",
@@ -364,6 +368,6 @@ If the built-in processors don’t cover your needs, you can create your own by
364
368
 
365
369
  Available examples:
366
370
 
367
- - [Message Length Limiter](/examples/v1/processors/message-length-limiter)
368
- - [Response Length Limiter](/examples/v1/processors/response-length-limiter)
369
- - [Response Validator](/examples/v1/processors/response-validator)
371
+ - [Message Length Limiter](https://github.com/mastra-ai/mastra/tree/main/examples/processors-message-length-limiter)
372
+ - [Response Length Limiter](https://github.com/mastra-ai/mastra/tree/main/examples/processors-response-length-limiter)
373
+ - [Response Validator](https://github.com/mastra-ai/mastra/tree/main/examples/processors-response-validator)
@@ -240,3 +240,4 @@ network-execution-event-step-finish
240
240
  - [Agent Memory](./agent-memory)
241
241
  - [Workflows Overview](../workflows/overview)
242
242
  - [Request Context](/docs/v1/server-db/request-context)
243
+ - [Supervisor example](https://github.com/mastra-ai/mastra/tree/main/examples/supervisor-agent)
@@ -21,10 +21,9 @@ An introduction to agents, and how they compare to workflows on [YouTube (7 minu
21
21
  :::
22
22
 
23
23
  ## Setting up agents
24
+ ### Installation
24
25
 
25
- <Tabs>
26
- <TabItem value="mastra-model-router" label="Model router">
27
- <Steps>
26
+ <Steps>
28
27
 
29
28
  <StepItem>
30
29
 
@@ -70,56 +69,6 @@ export const testAgent = new Agent({
70
69
  </StepItem>
71
70
 
72
71
  </Steps>
73
- </TabItem>
74
- <TabItem value="vercel-ai-sdk" label="Vercel AI SDK">
75
- <Steps>
76
-
77
- <StepItem>
78
-
79
- Include the Mastra core package alongside the Vercel AI SDK provider you want to use:
80
-
81
- ```bash
82
- npm install @mastra/core@beta @ai-sdk/openai
83
- ```
84
-
85
- </StepItem>
86
-
87
- <StepItem>
88
-
89
- Set the corresponding environment variable for your provider. For OpenAI via the AI SDK:
90
-
91
- ```bash title=".env" copy
92
- OPENAI_API_KEY=<your-api-key>
93
- ```
94
-
95
- :::note
96
-
97
- See the [AI SDK Providers](https://ai-sdk.dev/providers/ai-sdk-providers) in the Vercel AI SDK docs for additional configuration options.
98
-
99
- :::
100
-
101
- </StepItem>
102
-
103
- <StepItem>
104
-
105
- To create an agent in Mastra, use the `Agent` class. Every agent must include `instructions` to define its behavior, and a `model` parameter to specify the LLM provider and model. When using the Vercel AI SDK, provide the client to your agent's `model` field:
106
-
107
- ```typescript title="src/mastra/agents/test-agent.ts" copy
108
- import { openai } from "@ai-sdk/openai";
109
- import { Agent } from "@mastra/core/agent";
110
-
111
- export const testAgent = new Agent({
112
- id: "test-agent",
113
- name: "Test Agent",
114
- instructions: "You are a helpful assistant.",
115
- model: openai("gpt-5.1"),
116
- });
117
- ```
118
-
119
- </StepItem>
120
- </Steps>
121
- </TabItem>
122
- </Tabs>
123
72
 
124
73
  ### Instruction formats
125
74
 
@@ -263,105 +212,11 @@ for await (const chunk of stream.textStream) {
263
212
 
264
213
  ## Structured output
265
214
 
266
- Agents can return structured, type-safe data by defining the expected output using either [Zod](https://zod.dev/) or [JSON Schema](https://json-schema.org/). We recommend Zod for better TypeScript support and developer experience. The parsed result is available on `response.object`, allowing you to work directly with validated and typed data.
267
-
268
- ### Using Zod
269
-
270
- Define the `output` shape using [Zod](https://zod.dev/):
271
-
272
- ```typescript showLineNumbers copy
273
- import { z } from "zod";
274
-
275
- const response = await testAgent.generate(
276
- [
277
- {
278
- role: "system",
279
- content: "Provide a summary and keywords for the following text:",
280
- },
281
- {
282
- role: "user",
283
- content: "Monkey, Ice Cream, Boat",
284
- },
285
- ],
286
- {
287
- structuredOutput: {
288
- schema: z.object({
289
- summary: z.string(),
290
- keywords: z.array(z.string()),
291
- }),
292
- },
293
- },
294
- );
295
-
296
- console.log(response.object);
297
- ```
298
-
299
- ### With Tool Calling
300
-
301
- Use the `model` property to ensure that your agent can execute multi-step LLM calls with tool calling.
302
-
303
- ```typescript showLineNumbers copy
304
- import { z } from "zod";
305
-
306
- const response = await testAgentWithTools.generate(
307
- [
308
- {
309
- role: "system",
310
- content: "Provide a summary and keywords for the following text:",
311
- },
312
- {
313
- role: "user",
314
- content: "Please use your test tool and let me know the results",
315
- },
316
- ],
317
- {
318
- structuredOutput: {
319
- schema: z.object({
320
- summary: z.string(),
321
- keywords: z.array(z.string()),
322
- }),
323
- model: "openai/gpt-5.1",
324
- },
325
- },
326
- );
327
-
328
- console.log(response.object);
329
- console.log(response.toolResults);
330
- ```
331
-
332
- ### Response format
333
-
334
- By default `structuredOutput` will use `response_format` to pass the schema to the model provider. If the model provider does not natively support `response_format` it's possible that this will error or not give the desired results. To keep using the same model use `jsonPromptInjection` to bypass response format and inject a system prompt message to coerce the model to return structured output.
335
-
336
- ```typescript showLineNumbers copy
337
- import { z } from "zod";
338
-
339
- const response = await testAgentThatDoesntSupportStructuredOutput.generate(
340
- [
341
- {
342
- role: "system",
343
- content: "Provide a summary and keywords for the following text:",
344
- },
345
- {
346
- role: "user",
347
- content: "Monkey, Ice Cream, Boat",
348
- },
349
- ],
350
- {
351
- structuredOutput: {
352
- schema: z.object({
353
- summary: z.string(),
354
- keywords: z.array(z.string()),
355
- }),
356
- jsonPromptInjection: true,
357
- },
358
- },
359
- );
215
+ Agents can return structured, type-safe data using Zod or JSON Schema. The parsed result is available on `response.object`.
360
216
 
361
- console.log(response.object);
362
- ```
217
+ > See [Structured Output](/docs/v1/agents/structured-output) for more information.
363
218
 
364
- ## Working with images
219
+ ## Analyzing images
365
220
 
366
221
  Agents can analyze and describe images by processing both the visual content and any text within them. To enable image analysis, pass an object with `type: 'image'` and the image URL in the `content` array. You can combine image content with text prompts to guide the agent's analysis.
367
222
 
@@ -386,7 +241,8 @@ const response = await testAgent.generate([
386
241
  console.log(response.text);
387
242
  ```
388
243
 
389
- ### Using `maxSteps`
244
+
245
+ ## Using `maxSteps`
390
246
 
391
247
  The `maxSteps` parameter controls the maximum number of sequential LLM calls an agent can make. Each step includes generating a response, executing any tool calls, and processing the result. Limiting steps helps prevent infinite loops, reduce latency, and control token usage for agents that use tools. The default is 1, but can be increased:
392
248
 
@@ -398,7 +254,7 @@ const response = await testAgent.generate("Help me organize my day", {
398
254
  console.log(response.text);
399
255
  ```
400
256
 
401
- ### Using `onStepFinish`
257
+ ## Using `onStepFinish`
402
258
 
403
259
  You can monitor the progress of multi-step operations using the `onStepFinish` callback. This is useful for debugging or providing progress updates to users.
404
260
 
@@ -0,0 +1,279 @@
1
+ ---
2
+ title: "Processors | Agents | Mastra Docs"
3
+ description: "Learn how to use input and output processors to transform, validate, and control messages in Mastra agents."
4
+ ---
5
+
6
+ # Processors
7
+
8
+ Processors transform, validate, or control messages as they pass through an agent. They run at specific points in the agent's execution pipeline, allowing you to modify inputs before they reach the language model or outputs before they're returned to users.
9
+
10
+ Processors are configured as:
11
+
12
+ - **`inputProcessors`**: Run before messages reach the language model.
13
+ - **`outputProcessors`**: Run after the language model generates a response, but before it's returned to users.
14
+
15
+ Some processors implement both input and output logic and can be used in either array depending on where the transformation should occur.
16
+
17
+ ## When to use processors
18
+
19
+ Use processors to:
20
+
21
+ - Normalize or validate user input
22
+ - Add guardrails to your agent
23
+ - Detect and prevent prompt injection or jailbreak attempts
24
+ - Moderate content for safety or compliance
25
+ - Transform messages (e.g., translate languages, filter tool calls)
26
+ - Limit token usage or message history length
27
+ - Redact sensitive information (PII)
28
+ - Apply custom business logic to messages
29
+
30
+ Mastra includes several processors for common use cases. You can also create custom processors for application-specific requirements.
31
+
32
+ ## Adding processors to an agent
33
+
34
+ Import and instantiate the processor, then pass it to the agent's `inputProcessors` or `outputProcessors` array:
35
+
36
+ ```typescript {3,9-15} title="src/mastra/agents/moderated-agent.ts" showLineNumbers copy
37
+ import { openai } from "@ai-sdk/openai";
38
+ import { Agent } from "@mastra/core/agent";
39
+ import { ModerationProcessor } from "@mastra/core/processors";
40
+
41
+ export const moderatedAgent = new Agent({
42
+ name: "moderated-agent",
43
+ instructions: "You are a helpful assistant",
44
+ model: openai("gpt-4o-mini"),
45
+ inputProcessors: [
46
+ new ModerationProcessor({
47
+ model: openai("gpt-4.1-nano"),
48
+ categories: ["hate", "harassment", "violence"],
49
+ threshold: 0.7,
50
+ strategy: "block",
51
+ }),
52
+ ],
53
+ });
54
+ ```
55
+
56
+ ## Execution order
57
+
58
+ Processors run in the order they appear in the array:
59
+
60
+ ```typescript
61
+ inputProcessors: [
62
+ new UnicodeNormalizer(),
63
+ new PromptInjectionDetector(),
64
+ new ModerationProcessor(),
65
+ ];
66
+ ```
67
+
68
+ For output processors, the order determines the sequence of transformations applied to the model's response.
69
+
70
+ ### With memory enabled
71
+
72
+ When memory is enabled on an agent, memory processors are automatically added to the pipeline:
73
+
74
+ **Input processors:**
75
+ ```
76
+ [Memory Processors] → [Your inputProcessors]
77
+ ```
78
+ Memory loads conversation history first, then your processors run.
79
+
80
+ **Output processors:**
81
+ ```
82
+ [Your outputProcessors] → [Memory Processors]
83
+ ```
84
+ Your processors run first, then memory persists messages.
85
+
86
+ This ordering ensures that if your output guardrail calls `abort()`, memory processors are skipped and no messages are saved. See [Memory Processors](/docs/v1/memory/memory-processors#processor-execution-order) for details.
87
+
88
+ ## Creating custom processors
89
+
90
+ Custom processors implement the `Processor` interface:
91
+
92
+ ### Custom input processor
93
+
94
+ ```typescript title="src/mastra/processors/custom-input.ts" showLineNumbers copy
95
+ import type {
96
+ Processor,
97
+ MastraDBMessage,
98
+ RequestContext,
99
+ } from "@mastra/core";
100
+
101
+ export class CustomInputProcessor implements Processor {
102
+ id = "custom-input";
103
+
104
+ async processInput({
105
+ messages,
106
+ systemMessages,
107
+ context,
108
+ }: {
109
+ messages: MastraDBMessage[];
110
+ systemMessages: CoreMessage[];
111
+ context: RequestContext;
112
+ }): Promise<MastraDBMessage[]> {
113
+ // Transform messages before they reach the LLM
114
+ return messages.map((msg) => ({
115
+ ...msg,
116
+ content: {
117
+ ...msg.content,
118
+ content: msg.content.content.toLowerCase(),
119
+ },
120
+ }));
121
+ }
122
+ }
123
+ ```
124
+
125
+ The `processInput` method receives:
126
+ - `messages`: User and assistant messages (not system messages)
127
+ - `systemMessages`: All system messages (agent instructions, memory context, user-provided system prompts)
128
+ - `messageList`: The full MessageList instance for advanced use cases
129
+ - `abort`: Function to stop processing and return early
130
+ - `requestContext`: Execution metadata like `threadId` and `resourceId`
131
+
132
+ The method can return:
133
+ - `MastraDBMessage[]` — Transformed messages array (backward compatible)
134
+ - `{ messages: MastraDBMessage[]; systemMessages: CoreMessage[] }` — Both messages and modified system messages
135
+
136
+ The framework handles both return formats, so modifying system messages is optional and existing processors continue to work.
137
+
138
+ ### Modifying system messages
139
+
140
+ To modify system messages (e.g., trim verbose prompts for smaller models), return an object with both `messages` and `systemMessages`:
141
+
142
+ ```typescript title="src/mastra/processors/system-trimmer.ts" showLineNumbers copy
143
+ import type { Processor, CoreMessage, MastraDBMessage } from "@mastra/core";
144
+
145
+ export class SystemTrimmer implements Processor {
146
+ id = "system-trimmer";
147
+
148
+ async processInput({
149
+ messages,
150
+ systemMessages,
151
+ }): Promise<{ messages: MastraDBMessage[]; systemMessages: CoreMessage[] }> {
152
+ // Trim system messages for smaller models
153
+ const trimmedSystemMessages = systemMessages.map((msg) => ({
154
+ ...msg,
155
+ content:
156
+ typeof msg.content === "string"
157
+ ? msg.content.substring(0, 500)
158
+ : msg.content,
159
+ }));
160
+
161
+ return { messages, systemMessages: trimmedSystemMessages };
162
+ }
163
+ }
164
+ ```
165
+
166
+ This is useful for:
167
+ - Trimming verbose system prompts for models with smaller context windows
168
+ - Filtering or modifying semantic recall content to prevent "prompt too long" errors
169
+ - Dynamically adjusting system instructions based on the conversation
170
+
171
+ ### Custom output processor
172
+
173
+ ```typescript title="src/mastra/processors/custom-output.ts" showLineNumbers copy
174
+ import type {
175
+ Processor,
176
+ MastraDBMessage,
177
+ RequestContext,
178
+ } from "@mastra/core";
179
+
180
+ export class CustomOutputProcessor implements Processor {
181
+ id = "custom-output";
182
+
183
+ async processOutputResult({
184
+ messages,
185
+ context,
186
+ }: {
187
+ messages: MastraDBMessage[];
188
+ context: RequestContext;
189
+ }): Promise<MastraDBMessage[]> {
190
+ // Transform messages after the LLM generates them
191
+ return messages.filter((msg) => msg.role !== "system");
192
+ }
193
+
194
+ async processOutputStream({
195
+ stream,
196
+ context,
197
+ }: {
198
+ stream: ReadableStream;
199
+ context: RequestContext;
200
+ }): Promise<ReadableStream> {
201
+ // Transform streaming responses
202
+ return stream;
203
+ }
204
+ }
205
+ ```
206
+
207
+ ## Built-in Utility Processors
208
+
209
+ Mastra provides utility processors for common tasks:
210
+
211
+ **For security and validation processors**, see the [Guardrails](/docs/v1/agents/guardrails) page for input/output guardrails and moderation processors.
212
+ **For memory-specific processors**, see the [Memory Processors](/docs/v1/memory/memory-processors) page for processors that handle message history, semantic recall, and working memory.
213
+
214
+ ### TokenLimiter
215
+
216
+ Prevents context window overflow by removing older messages when the total token count exceeds a specified limit.
217
+
218
+ ```typescript copy showLineNumbers {9-12}
219
+ import { Agent } from "@mastra/core/agent";
220
+ import { TokenLimiter } from "@mastra/core/processors";
221
+ import { openai } from "@ai-sdk/openai";
222
+
223
+ const agent = new Agent({
224
+ name: "my-agent",
225
+ model: openai("gpt-4o"),
226
+ inputProcessors: [
227
+ // Ensure the total tokens don't exceed ~127k
228
+ new TokenLimiter(127000),
229
+ ],
230
+ });
231
+ ```
232
+
233
+ The `TokenLimiter` uses the `o200k_base` encoding by default (suitable for GPT-4o). You can specify other encodings for different models:
234
+
235
+ ```typescript copy showLineNumbers {6-9}
236
+ import cl100k_base from "js-tiktoken/ranks/cl100k_base";
237
+
238
+ const agent = new Agent({
239
+ name: "my-agent",
240
+ inputProcessors: [
241
+ new TokenLimiter({
242
+ limit: 16000, // Example limit for a 16k context model
243
+ encoding: cl100k_base,
244
+ }),
245
+ ],
246
+ });
247
+ ```
248
+
249
+ ### ToolCallFilter
250
+
251
+ Removes tool calls from messages sent to the LLM, saving tokens by excluding potentially verbose tool interactions.
252
+
253
+ ```typescript copy showLineNumbers {5-14}
254
+ import { Agent } from "@mastra/core/agent";
255
+ import { ToolCallFilter, TokenLimiter } from "@mastra/core/processors";
256
+ import { openai } from "@ai-sdk/openai";
257
+
258
+ const agent = new Agent({
259
+ name: "my-agent",
260
+ model: openai("gpt-4o"),
261
+ inputProcessors: [
262
+ // Example 1: Remove all tool calls/results
263
+ new ToolCallFilter(),
264
+
265
+ // Example 2: Remove only specific tool calls
266
+ new ToolCallFilter({ exclude: ["generateImageTool"] }),
267
+
268
+ // Always place TokenLimiter last
269
+ new TokenLimiter(127000),
270
+ ],
271
+ });
272
+ ```
273
+
274
+ > **Note:** The example above filters tool calls and limits tokens for the LLM, but these filtered messages will still be saved to memory. To also filter messages before they're saved to memory, manually add memory processors before utility processors. See [Memory Processors](/docs/v1/memory/memory-processors#manual-control-and-deduplication) for details.
275
+
276
+ ## Related documentation
277
+
278
+ - [Guardrails](/docs/v1/agents/guardrails) - Security and validation processors
279
+ - [Memory Processors](/docs/v1/memory/memory-processors) - Memory-specific processors and automatic integration