@mastra/mcp-docs-server 1.0.0-beta.4 → 1.0.0-beta.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (286) hide show
  1. package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +201 -1
  2. package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +201 -1
  3. package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +201 -1
  4. package/.docs/organized/changelogs/%40mastra%2Fastra.md +201 -1
  5. package/.docs/organized/changelogs/%40mastra%2Fchroma.md +201 -1
  6. package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +201 -1
  7. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +201 -1
  8. package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +201 -1
  9. package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +201 -1
  10. package/.docs/organized/changelogs/%40mastra%2Fconvex.md +29 -0
  11. package/.docs/organized/changelogs/%40mastra%2Fcore.md +411 -211
  12. package/.docs/organized/changelogs/%40mastra%2Fcouchbase.md +201 -1
  13. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +201 -1
  14. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +201 -1
  15. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +201 -1
  16. package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +201 -1
  17. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +201 -1
  18. package/.docs/organized/changelogs/%40mastra%2Fduckdb.md +42 -0
  19. package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +201 -1
  20. package/.docs/organized/changelogs/%40mastra%2Felasticsearch.md +52 -0
  21. package/.docs/organized/changelogs/%40mastra%2Fevals.md +201 -1
  22. package/.docs/organized/changelogs/%40mastra%2Flance.md +201 -1
  23. package/.docs/organized/changelogs/%40mastra%2Flibsql.md +201 -1
  24. package/.docs/organized/changelogs/%40mastra%2Floggers.md +201 -1
  25. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +201 -1
  26. package/.docs/organized/changelogs/%40mastra%2Fmcp-registry-registry.md +201 -1
  27. package/.docs/organized/changelogs/%40mastra%2Fmcp.md +201 -1
  28. package/.docs/organized/changelogs/%40mastra%2Fmemory.md +201 -1
  29. package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +201 -1
  30. package/.docs/organized/changelogs/%40mastra%2Fmssql.md +201 -1
  31. package/.docs/organized/changelogs/%40mastra%2Fopensearch.md +201 -1
  32. package/.docs/organized/changelogs/%40mastra%2Fpg.md +201 -1
  33. package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +201 -1
  34. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +201 -1
  35. package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +201 -1
  36. package/.docs/organized/changelogs/%40mastra%2Frag.md +201 -1
  37. package/.docs/organized/changelogs/%40mastra%2Freact.md +89 -1
  38. package/.docs/organized/changelogs/%40mastra%2Fs3vectors.md +9 -0
  39. package/.docs/organized/changelogs/%40mastra%2Fschema-compat.md +42 -0
  40. package/.docs/organized/changelogs/%40mastra%2Fserver.md +201 -1
  41. package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +201 -1
  42. package/.docs/organized/changelogs/%40mastra%2Fupstash.md +201 -1
  43. package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +201 -1
  44. package/.docs/organized/changelogs/%40mastra%2Fvoice-azure.md +201 -1
  45. package/.docs/organized/changelogs/%40mastra%2Fvoice-cloudflare.md +201 -1
  46. package/.docs/organized/changelogs/%40mastra%2Fvoice-deepgram.md +201 -1
  47. package/.docs/organized/changelogs/%40mastra%2Fvoice-elevenlabs.md +201 -1
  48. package/.docs/organized/changelogs/%40mastra%2Fvoice-gladia.md +92 -1
  49. package/.docs/organized/changelogs/%40mastra%2Fvoice-google-gemini-live.md +67 -1
  50. package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +201 -1
  51. package/.docs/organized/changelogs/%40mastra%2Fvoice-murf.md +201 -1
  52. package/.docs/organized/changelogs/%40mastra%2Fvoice-openai-realtime.md +201 -1
  53. package/.docs/organized/changelogs/%40mastra%2Fvoice-openai.md +201 -1
  54. package/.docs/organized/changelogs/%40mastra%2Fvoice-playai.md +201 -1
  55. package/.docs/organized/changelogs/%40mastra%2Fvoice-sarvam.md +201 -1
  56. package/.docs/organized/changelogs/%40mastra%2Fvoice-speechify.md +201 -1
  57. package/.docs/organized/changelogs/create-mastra.md +201 -1
  58. package/.docs/organized/changelogs/mastra.md +201 -1
  59. package/.docs/organized/code-examples/agui.md +1 -0
  60. package/.docs/organized/code-examples/ai-sdk-v5.md +1 -0
  61. package/.docs/organized/code-examples/mcp-server-adapters.md +721 -0
  62. package/.docs/organized/code-examples/memory-with-processors.md +1 -1
  63. package/.docs/organized/code-examples/quick-start.md +1 -1
  64. package/.docs/organized/code-examples/server-app-access.md +342 -0
  65. package/.docs/raw/agents/adding-voice.mdx +7 -10
  66. package/.docs/raw/agents/agent-approval.mdx +189 -0
  67. package/.docs/raw/agents/guardrails.mdx +26 -23
  68. package/.docs/raw/agents/networks.mdx +2 -2
  69. package/.docs/raw/agents/overview.mdx +27 -62
  70. package/.docs/raw/agents/processors.mdx +279 -0
  71. package/.docs/raw/agents/using-tools.mdx +4 -5
  72. package/.docs/raw/course/01-first-agent/05-running-playground.md +5 -5
  73. package/.docs/raw/course/01-first-agent/09-testing-your-agent.md +3 -3
  74. package/.docs/raw/course/01-first-agent/13-testing-your-tool.md +3 -3
  75. package/.docs/raw/course/01-first-agent/17-testing-memory.md +2 -2
  76. package/.docs/raw/course/04-workflows/07-using-playground.md +1 -1
  77. package/.docs/raw/deployment/building-mastra.mdx +1 -1
  78. package/.docs/raw/deployment/cloud-providers/amazon-ec2.mdx +1 -1
  79. package/.docs/raw/deployment/cloud-providers/aws-lambda.mdx +1 -1
  80. package/.docs/raw/deployment/cloud-providers/azure-app-services.mdx +1 -1
  81. package/.docs/raw/deployment/cloud-providers/digital-ocean.mdx +1 -1
  82. package/.docs/raw/deployment/cloud-providers/index.mdx +20 -27
  83. package/.docs/raw/deployment/cloud-providers/netlify-deployer.mdx +44 -13
  84. package/.docs/raw/deployment/mastra-cloud/observability.mdx +19 -17
  85. package/.docs/raw/deployment/mastra-cloud/setting-up.mdx +1 -1
  86. package/.docs/raw/deployment/overview.mdx +2 -2
  87. package/.docs/raw/deployment/web-framework.mdx +5 -5
  88. package/.docs/raw/evals/custom-scorers.mdx +3 -5
  89. package/.docs/raw/evals/overview.mdx +2 -3
  90. package/.docs/raw/evals/running-in-ci.mdx +0 -2
  91. package/.docs/raw/{guides/guide → getting-started}/manual-install.mdx +2 -2
  92. package/.docs/raw/getting-started/project-structure.mdx +1 -1
  93. package/.docs/raw/getting-started/start.mdx +72 -0
  94. package/.docs/raw/getting-started/studio.mdx +1 -1
  95. package/.docs/raw/{frameworks/agentic-uis/ai-sdk.mdx → guides/build-your-ui/ai-sdk-ui.mdx} +113 -11
  96. package/.docs/raw/{frameworks/web-frameworks → guides/getting-started}/astro.mdx +23 -25
  97. package/.docs/raw/{frameworks/servers → guides/getting-started}/express.mdx +3 -4
  98. package/.docs/raw/guides/{quickstarts/nextjs.mdx → getting-started/next-js.mdx} +11 -11
  99. package/.docs/raw/guides/{quickstarts/standalone-server.mdx → getting-started/quickstart.mdx} +7 -7
  100. package/.docs/raw/{frameworks/web-frameworks → guides/getting-started}/sveltekit.mdx +23 -25
  101. package/.docs/raw/{frameworks/web-frameworks → guides/getting-started}/vite-react.mdx +7 -7
  102. package/.docs/raw/guides/guide/ai-recruiter.mdx +2 -3
  103. package/.docs/raw/guides/guide/chef-michel.mdx +2 -3
  104. package/.docs/raw/guides/guide/notes-mcp-server.mdx +2 -2
  105. package/.docs/raw/guides/guide/research-assistant.mdx +7 -8
  106. package/.docs/raw/guides/guide/stock-agent.mdx +4 -6
  107. package/.docs/raw/guides/guide/web-search.mdx +12 -10
  108. package/.docs/raw/guides/guide/whatsapp-chat-bot.mdx +421 -0
  109. package/.docs/raw/guides/index.mdx +3 -35
  110. package/.docs/raw/guides/migrations/agentnetwork.mdx +4 -4
  111. package/.docs/raw/guides/migrations/ai-sdk-v4-to-v5.mdx +1 -1
  112. package/.docs/raw/guides/migrations/upgrade-to-v1/agent.mdx +40 -0
  113. package/.docs/raw/guides/migrations/upgrade-to-v1/tools.mdx +5 -0
  114. package/.docs/raw/guides/migrations/upgrade-to-v1/workflows.mdx +51 -0
  115. package/.docs/raw/guides/migrations/vnext-to-standard-apis.mdx +2 -2
  116. package/.docs/raw/index.mdx +2 -2
  117. package/.docs/raw/mcp/overview.mdx +3 -5
  118. package/.docs/raw/memory/memory-processors.mdx +264 -79
  119. package/.docs/raw/memory/semantic-recall.mdx +7 -7
  120. package/.docs/raw/memory/storage/memory-with-libsql.mdx +2 -4
  121. package/.docs/raw/memory/storage/memory-with-mongodb.mdx +2 -4
  122. package/.docs/raw/memory/storage/memory-with-pg.mdx +2 -4
  123. package/.docs/raw/memory/storage/memory-with-upstash.mdx +2 -4
  124. package/.docs/raw/memory/threads-and-resources.mdx +3 -3
  125. package/.docs/raw/memory/working-memory.mdx +14 -7
  126. package/.docs/raw/{logging.mdx → observability/logging.mdx} +1 -1
  127. package/.docs/raw/observability/overview.mdx +2 -3
  128. package/.docs/raw/observability/tracing/bridges/otel.mdx +176 -0
  129. package/.docs/raw/observability/tracing/exporters/arize.mdx +17 -0
  130. package/.docs/raw/observability/tracing/exporters/braintrust.mdx +19 -0
  131. package/.docs/raw/observability/tracing/exporters/langfuse.mdx +20 -0
  132. package/.docs/raw/observability/tracing/exporters/langsmith.mdx +12 -0
  133. package/.docs/raw/observability/tracing/exporters/otel.mdx +25 -5
  134. package/.docs/raw/observability/tracing/exporters/posthog.mdx +107 -0
  135. package/.docs/raw/observability/tracing/overview.mdx +74 -8
  136. package/.docs/raw/observability/tracing/processors/sensitive-data-filter.mdx +0 -1
  137. package/.docs/raw/rag/chunking-and-embedding.mdx +16 -17
  138. package/.docs/raw/rag/overview.mdx +3 -2
  139. package/.docs/raw/rag/retrieval.mdx +43 -38
  140. package/.docs/raw/rag/vector-databases.mdx +93 -2
  141. package/.docs/raw/reference/agents/agent.mdx +7 -10
  142. package/.docs/raw/reference/agents/generate.mdx +55 -6
  143. package/.docs/raw/reference/agents/generateLegacy.mdx +2 -2
  144. package/.docs/raw/reference/agents/getLLM.mdx +1 -1
  145. package/.docs/raw/reference/agents/network.mdx +46 -3
  146. package/.docs/raw/reference/cli/mastra.mdx +2 -1
  147. package/.docs/raw/reference/client-js/agents.mdx +3 -3
  148. package/.docs/raw/reference/client-js/memory.mdx +43 -0
  149. package/.docs/raw/reference/client-js/workflows.mdx +92 -63
  150. package/.docs/raw/reference/core/getLogger.mdx +1 -1
  151. package/.docs/raw/reference/core/listLogs.mdx +1 -1
  152. package/.docs/raw/reference/core/listLogsByRunId.mdx +1 -1
  153. package/.docs/raw/reference/core/mastra-model-gateway.mdx +5 -19
  154. package/.docs/raw/reference/core/setLogger.mdx +1 -1
  155. package/.docs/raw/reference/core/setTelemetry.mdx +1 -1
  156. package/.docs/raw/reference/deployer/netlify.mdx +1 -2
  157. package/.docs/raw/reference/evals/answer-relevancy.mdx +28 -98
  158. package/.docs/raw/reference/evals/answer-similarity.mdx +12 -258
  159. package/.docs/raw/reference/evals/bias.mdx +29 -87
  160. package/.docs/raw/reference/evals/completeness.mdx +31 -90
  161. package/.docs/raw/reference/evals/content-similarity.mdx +28 -88
  162. package/.docs/raw/reference/evals/context-precision.mdx +28 -130
  163. package/.docs/raw/reference/evals/context-relevance.mdx +11 -11
  164. package/.docs/raw/reference/evals/faithfulness.mdx +28 -101
  165. package/.docs/raw/reference/evals/hallucination.mdx +28 -103
  166. package/.docs/raw/reference/evals/keyword-coverage.mdx +28 -107
  167. package/.docs/raw/reference/evals/noise-sensitivity.mdx +11 -11
  168. package/.docs/raw/reference/evals/prompt-alignment.mdx +15 -15
  169. package/.docs/raw/reference/evals/scorer-utils.mdx +362 -0
  170. package/.docs/raw/reference/evals/textual-difference.mdx +27 -100
  171. package/.docs/raw/reference/evals/tone-consistency.mdx +25 -98
  172. package/.docs/raw/reference/evals/tool-call-accuracy.mdx +7 -7
  173. package/.docs/raw/reference/evals/toxicity.mdx +29 -92
  174. package/.docs/raw/reference/index.mdx +1 -0
  175. package/.docs/raw/reference/memory/memory-class.mdx +5 -7
  176. package/.docs/raw/reference/observability/tracing/bridges/otel.mdx +150 -0
  177. package/.docs/raw/reference/observability/tracing/configuration.mdx +0 -4
  178. package/.docs/raw/reference/observability/tracing/exporters/arize.mdx +4 -0
  179. package/.docs/raw/reference/observability/tracing/exporters/langsmith.mdx +17 -1
  180. package/.docs/raw/reference/observability/tracing/exporters/otel.mdx +6 -0
  181. package/.docs/raw/reference/observability/tracing/exporters/posthog.mdx +132 -0
  182. package/.docs/raw/reference/observability/tracing/instances.mdx +0 -4
  183. package/.docs/raw/reference/observability/tracing/interfaces.mdx +29 -4
  184. package/.docs/raw/reference/observability/tracing/spans.mdx +0 -4
  185. package/.docs/raw/reference/processors/batch-parts-processor.mdx +1 -1
  186. package/.docs/raw/reference/processors/language-detector.mdx +10 -3
  187. package/.docs/raw/reference/processors/message-history-processor.mdx +131 -0
  188. package/.docs/raw/reference/processors/moderation-processor.mdx +12 -5
  189. package/.docs/raw/reference/processors/pii-detector.mdx +12 -5
  190. package/.docs/raw/reference/processors/processor-interface.mdx +502 -0
  191. package/.docs/raw/reference/processors/prompt-injection-detector.mdx +10 -3
  192. package/.docs/raw/reference/processors/semantic-recall-processor.mdx +197 -0
  193. package/.docs/raw/reference/processors/system-prompt-scrubber.mdx +3 -4
  194. package/.docs/raw/reference/processors/token-limiter-processor.mdx +2 -2
  195. package/.docs/raw/reference/processors/tool-call-filter.mdx +125 -0
  196. package/.docs/raw/reference/processors/unicode-normalizer.mdx +1 -1
  197. package/.docs/raw/reference/processors/working-memory-processor.mdx +221 -0
  198. package/.docs/raw/reference/rag/embeddings.mdx +5 -5
  199. package/.docs/raw/reference/rag/rerank.mdx +1 -2
  200. package/.docs/raw/reference/rag/rerankWithScorer.mdx +0 -1
  201. package/.docs/raw/reference/storage/cloudflare-d1.mdx +37 -0
  202. package/.docs/raw/reference/storage/convex.mdx +164 -0
  203. package/.docs/raw/reference/storage/lance.mdx +33 -0
  204. package/.docs/raw/reference/storage/libsql.mdx +37 -0
  205. package/.docs/raw/reference/storage/mongodb.mdx +39 -0
  206. package/.docs/raw/reference/storage/mssql.mdx +37 -0
  207. package/.docs/raw/reference/storage/postgresql.mdx +37 -0
  208. package/.docs/raw/reference/streaming/ChunkType.mdx +1 -1
  209. package/.docs/raw/reference/streaming/agents/stream.mdx +64 -2
  210. package/.docs/raw/reference/streaming/workflows/observeStream.mdx +7 -9
  211. package/.docs/raw/reference/streaming/workflows/{resumeStreamVNext.mdx → resumeStream.mdx} +51 -11
  212. package/.docs/raw/reference/streaming/workflows/stream.mdx +83 -24
  213. package/.docs/raw/reference/templates/overview.mdx +1 -4
  214. package/.docs/raw/reference/tools/client.mdx +1 -2
  215. package/.docs/raw/reference/tools/create-tool.mdx +132 -0
  216. package/.docs/raw/reference/tools/graph-rag-tool.mdx +5 -5
  217. package/.docs/raw/reference/tools/mcp-client.mdx +76 -21
  218. package/.docs/raw/reference/tools/mcp-server.mdx +1 -2
  219. package/.docs/raw/reference/tools/vector-query-tool.mdx +14 -15
  220. package/.docs/raw/reference/vectors/chroma.mdx +81 -1
  221. package/.docs/raw/reference/vectors/convex.mdx +429 -0
  222. package/.docs/raw/reference/vectors/couchbase.mdx +24 -17
  223. package/.docs/raw/reference/vectors/duckdb.mdx +462 -0
  224. package/.docs/raw/reference/vectors/elasticsearch.mdx +310 -0
  225. package/.docs/raw/reference/vectors/lance.mdx +38 -22
  226. package/.docs/raw/reference/vectors/libsql.mdx +35 -2
  227. package/.docs/raw/reference/vectors/mongodb.mdx +35 -2
  228. package/.docs/raw/reference/vectors/opensearch.mdx +37 -16
  229. package/.docs/raw/reference/vectors/pg.mdx +43 -36
  230. package/.docs/raw/reference/vectors/pinecone.mdx +48 -1
  231. package/.docs/raw/reference/vectors/qdrant.mdx +36 -1
  232. package/.docs/raw/reference/vectors/turbopuffer.mdx +74 -0
  233. package/.docs/raw/reference/voice/google.mdx +159 -20
  234. package/.docs/raw/reference/voice/openai-realtime.mdx +2 -2
  235. package/.docs/raw/reference/voice/voice.addInstructions.mdx +2 -3
  236. package/.docs/raw/reference/voice/voice.addTools.mdx +1 -1
  237. package/.docs/raw/reference/voice/voice.answer.mdx +1 -1
  238. package/.docs/raw/reference/voice/voice.close.mdx +1 -1
  239. package/.docs/raw/reference/voice/voice.connect.mdx +1 -1
  240. package/.docs/raw/reference/voice/voice.off.mdx +1 -1
  241. package/.docs/raw/reference/voice/voice.on.mdx +1 -1
  242. package/.docs/raw/reference/voice/voice.send.mdx +1 -1
  243. package/.docs/raw/reference/voice/voice.updateConfig.mdx +1 -1
  244. package/.docs/raw/reference/workflows/run-methods/restart.mdx +142 -0
  245. package/.docs/raw/reference/workflows/run-methods/resume.mdx +44 -0
  246. package/.docs/raw/reference/workflows/run-methods/start.mdx +44 -0
  247. package/.docs/raw/reference/workflows/run.mdx +13 -5
  248. package/.docs/raw/reference/workflows/step.mdx +13 -0
  249. package/.docs/raw/reference/workflows/workflow.mdx +19 -0
  250. package/.docs/raw/server-db/mastra-client.mdx +1 -2
  251. package/.docs/raw/server-db/mastra-server.mdx +30 -1
  252. package/.docs/raw/server-db/request-context.mdx +0 -1
  253. package/.docs/raw/server-db/storage.mdx +11 -0
  254. package/.docs/raw/streaming/overview.mdx +26 -15
  255. package/.docs/raw/streaming/tool-streaming.mdx +48 -5
  256. package/.docs/raw/streaming/workflow-streaming.mdx +5 -11
  257. package/.docs/raw/tools-mcp/advanced-usage.mdx +1 -2
  258. package/.docs/raw/tools-mcp/mcp-overview.mdx +3 -5
  259. package/.docs/raw/voice/overview.mdx +21 -41
  260. package/.docs/raw/voice/speech-to-speech.mdx +4 -4
  261. package/.docs/raw/voice/speech-to-text.mdx +1 -2
  262. package/.docs/raw/voice/text-to-speech.mdx +1 -2
  263. package/.docs/raw/workflows/control-flow.mdx +180 -0
  264. package/.docs/raw/workflows/error-handling.mdx +1 -0
  265. package/.docs/raw/workflows/human-in-the-loop.mdx +4 -4
  266. package/.docs/raw/workflows/overview.mdx +56 -44
  267. package/.docs/raw/workflows/snapshots.mdx +1 -0
  268. package/.docs/raw/workflows/suspend-and-resume.mdx +85 -16
  269. package/.docs/raw/workflows/time-travel.mdx +313 -0
  270. package/.docs/raw/workflows/workflow-state.mdx +191 -0
  271. package/CHANGELOG.md +18 -0
  272. package/dist/{chunk-5NJC7NRO.js → chunk-4CM2BQNP.js} +24 -4
  273. package/dist/prepare-docs/package-changes.d.ts.map +1 -1
  274. package/dist/prepare-docs/prepare.js +1 -1
  275. package/dist/stdio.js +1 -1
  276. package/package.json +7 -7
  277. package/.docs/raw/agents/human-in-the-loop-with-tools.mdx +0 -90
  278. package/.docs/raw/frameworks/agentic-uis/cedar-os.mdx +0 -102
  279. package/.docs/raw/frameworks/agentic-uis/openrouter.mdx +0 -179
  280. package/.docs/raw/frameworks/web-frameworks/next-js.mdx +0 -379
  281. package/.docs/raw/getting-started/quickstart.mdx +0 -27
  282. package/.docs/raw/getting-started/templates.mdx +0 -73
  283. package/.docs/raw/reference/streaming/workflows/observeStreamVNext.mdx +0 -47
  284. package/.docs/raw/reference/streaming/workflows/streamVNext.mdx +0 -153
  285. /package/.docs/raw/{frameworks/agentic-uis → guides/build-your-ui}/assistant-ui.mdx +0 -0
  286. /package/.docs/raw/{frameworks/agentic-uis → guides/build-your-ui}/copilotkit.mdx +0 -0
@@ -22,8 +22,7 @@ Use processors for content moderation, prompt injection prevention, response san
22
22
 
23
23
  Import and instantiate the relevant processor class, and pass it to your agent’s configuration using either the `inputProcessors` or `outputProcessors` option:
24
24
 
25
- ```typescript {3,9-17} title="src/mastra/agents/moderated-agent.ts" showLineNumbers copy
26
- import { openai } from "@ai-sdk/openai";
25
+ ```typescript {2,8-16} title="src/mastra/agents/moderated-agent.ts" showLineNumbers copy
27
26
  import { Agent } from "@mastra/core/agent";
28
27
  import { ModerationProcessor } from "@mastra/core/processors";
29
28
 
@@ -31,10 +30,10 @@ export const moderatedAgent = new Agent({
31
30
  id: "moderated-agent",
32
31
  name: "Moderated Agent",
33
32
  instructions: "You are a helpful assistant",
34
- model: openai("gpt-4o-mini"),
33
+ model: "openai/gpt-5.1",
35
34
  inputProcessors: [
36
35
  new ModerationProcessor({
37
- model: openai("gpt-4.1-nano"),
36
+ model: "openrouter/openai/gpt-oss-safeguard-20b",
38
37
  categories: ["hate", "harassment", "violence"],
39
38
  threshold: 0.7,
40
39
  strategy: "block",
@@ -52,7 +51,7 @@ Input processors are applied before user messages reach the language model. They
52
51
 
53
52
  The `UnicodeNormalizer` is an input processor that cleans and normalizes user input by unifying Unicode characters, standardizing whitespace, and removing problematic symbols, allowing the LLM to better understand user messages.
54
53
 
55
- ```typescript {6-9} title="src/mastra/agents/normalized-agent.ts" showLineNumbers copy
54
+ ```typescript {8-11} title="src/mastra/agents/normalized-agent.ts" showLineNumbers copy
56
55
  import { UnicodeNormalizer } from "@mastra/core/processors";
57
56
 
58
57
  export const normalizedAgent = new Agent({
@@ -74,7 +73,7 @@ export const normalizedAgent = new Agent({
74
73
 
75
74
  The `PromptInjectionDetector` is an input processor that scans user messages for prompt injection, jailbreak attempts, and system override patterns. It uses an LLM to classify risky input and can block or rewrite it before it reaches the model.
76
75
 
77
- ```typescript {6-11} title="src/mastra/agents/secure-agent.ts" showLineNumbers copy
76
+ ```typescript {8-13} title="src/mastra/agents/secure-agent.ts" showLineNumbers copy
78
77
  import { PromptInjectionDetector } from "@mastra/core/processors";
79
78
 
80
79
  export const secureAgent = new Agent({
@@ -83,7 +82,7 @@ export const secureAgent = new Agent({
83
82
  // ...
84
83
  inputProcessors: [
85
84
  new PromptInjectionDetector({
86
- model: openai("gpt-4.1-nano"),
85
+ model: "openrouter/openai/gpt-oss-safeguard-20b",
87
86
  threshold: 0.8,
88
87
  strategy: "rewrite",
89
88
  detectionTypes: ["injection", "jailbreak", "system-override"],
@@ -98,7 +97,7 @@ export const secureAgent = new Agent({
98
97
 
99
98
  The `LanguageDetector` is an input processor that detects and translates user messages into a target language, enabling multilingual support while maintaining consistent interaction. It uses an LLM to identify the language and perform the translation.
100
99
 
101
- ```typescript {6-11} title="src/mastra/agents/multilingual-agent.ts" showLineNumbers copy
100
+ ```typescript {8-13} title="src/mastra/agents/multilingual-agent.ts" showLineNumbers copy
102
101
  import { LanguageDetector } from "@mastra/core/processors";
103
102
 
104
103
  export const multilingualAgent = new Agent({
@@ -107,7 +106,7 @@ export const multilingualAgent = new Agent({
107
106
  // ...
108
107
  inputProcessors: [
109
108
  new LanguageDetector({
110
- model: openai("gpt-4.1-nano"),
109
+ model: "openrouter/openai/gpt-oss-safeguard-20b",
111
110
  targetLanguages: ["English", "en"],
112
111
  strategy: "translate",
113
112
  threshold: 0.8,
@@ -126,7 +125,7 @@ Output processors are applied after the language model generates a response, but
126
125
 
127
126
  The `BatchPartsProcessor` is an output processor that combines multiple stream parts before emitting them to the client. This reduces network overhead and improves the user experience by consolidating small chunks into larger batches.
128
127
 
129
- ```typescript {6-10} title="src/mastra/agents/batched-agent.ts" showLineNumbers copy
128
+ ```typescript {8-12} title="src/mastra/agents/batched-agent.ts" showLineNumbers copy
130
129
  import { BatchPartsProcessor } from "@mastra/core/processors";
131
130
 
132
131
  export const batchedAgent = new Agent({
@@ -149,7 +148,7 @@ export const batchedAgent = new Agent({
149
148
 
150
149
  The `TokenLimiterProcessor` is an output processor that limits the number of tokens in model responses. It helps manage cost and performance by truncating or blocking messages when the limit is exceeded.
151
150
 
152
- ```typescript {6-10, 13-15} title="src/mastra/agents/limited-agent.ts" showLineNumbers copy
151
+ ```typescript {8-12} title="src/mastra/agents/limited-agent.ts" showLineNumbers copy
153
152
  import { TokenLimiterProcessor } from "@mastra/core/processors";
154
153
 
155
154
  export const limitedAgent = new Agent({
@@ -172,7 +171,7 @@ export const limitedAgent = new Agent({
172
171
 
173
172
  The `SystemPromptScrubber` is an output processor that detects and redacts system prompts or other internal instructions from model responses. It helps prevent unintended disclosure of prompt content or configuration details that could introduce security risks. It uses an LLM to identify and redact sensitive content based on configured detection types.
174
173
 
175
- ```typescript {5-13} title="src/mastra/agents/scrubbed-agent.ts" copy showLineNumbers
174
+ ```typescript {7-16} title="src/mastra/agents/scrubbed-agent.ts" copy showLineNumbers
176
175
  import { SystemPromptScrubber } from "@mastra/core/processors";
177
176
 
178
177
  const scrubbedAgent = new Agent({
@@ -180,7 +179,7 @@ const scrubbedAgent = new Agent({
180
179
  name: "Scrubbed Agent",
181
180
  outputProcessors: [
182
181
  new SystemPromptScrubber({
183
- model: openai("gpt-4.1-nano"),
182
+ model: "openrouter/openai/gpt-oss-safeguard-20b",
184
183
  strategy: "redact",
185
184
  customPatterns: ["system prompt", "internal instructions"],
186
185
  includeDetections: true,
@@ -195,6 +194,10 @@ const scrubbedAgent = new Agent({
195
194
 
196
195
  > See [SystemPromptScrubber](/reference/v1/processors/system-prompt-scrubber) for a full list of configuration options.
197
196
 
197
+ :::note
198
+ When streaming responses over HTTP, Mastra redacts sensitive request data (system prompts, tool definitions, API keys) from stream chunks at the server level by default. See [Stream data redaction](/docs/v1/server-db/mastra-server#stream-data-redaction) for details.
199
+ :::
200
+
198
201
  ## Hybrid processors
199
202
 
200
203
  Hybrid processors can be applied either before messages are sent to the language model or before responses are returned to the user. They are useful for tasks like content moderation and PII redaction.
@@ -203,7 +206,7 @@ Hybrid processors can be applied either before messages are sent to the language
203
206
 
204
207
  The `ModerationProcessor` is a hybrid processor that detects inappropriate or harmful content across categories like hate, harassment, and violence. It can be used to moderate either user input or model output, depending on where it's applied. It uses an LLM to classify the message and can block or rewrite it based on your configuration.
205
208
 
206
- ```typescript {6-11, 14-16} title="src/mastra/agents/moderated-agent.ts" showLineNumbers copy
209
+ ```typescript {8-13,16-18} title="src/mastra/agents/moderated-agent.ts" showLineNumbers copy
207
210
  import { ModerationProcessor } from "@mastra/core/processors";
208
211
 
209
212
  export const moderatedAgent = new Agent({
@@ -212,7 +215,7 @@ export const moderatedAgent = new Agent({
212
215
  // ...
213
216
  inputProcessors: [
214
217
  new ModerationProcessor({
215
- model: openai("gpt-4.1-nano"),
218
+ model: "openrouter/openai/gpt-oss-safeguard-20b",
216
219
  threshold: 0.7,
217
220
  strategy: "block",
218
221
  categories: ["hate", "harassment", "violence"],
@@ -232,7 +235,7 @@ export const moderatedAgent = new Agent({
232
235
 
233
236
  The `PIIDetector` is a hybrid processor that detects and removes personally identifiable information such as emails, phone numbers, and credit cards. It can redact either user input or model output, depending on where it's applied. It uses an LLM to identify sensitive content based on configured detection types.
234
237
 
235
- ```typescript {6-13, 16-18} title="src/mastra/agents/private-agent.ts" showLineNumbers copy
238
+ ```typescript {8-15,18-20} title="src/mastra/agents/private-agent.ts" showLineNumbers copy
236
239
  import { PIIDetector } from "@mastra/core/processors";
237
240
 
238
241
  export const privateAgent = new Agent({
@@ -241,7 +244,7 @@ export const privateAgent = new Agent({
241
244
  // ...
242
245
  inputProcessors: [
243
246
  new PIIDetector({
244
- model: openai("gpt-4.1-nano"),
247
+ model: "openrouter/openai/gpt-oss-safeguard-20b",
245
248
  threshold: 0.6,
246
249
  strategy: "redact",
247
250
  redactionMethod: "mask",
@@ -306,7 +309,7 @@ Many of the built-in processors support a `strategy` parameter that controls how
306
309
 
307
310
  Most strategies allow the request to continue without interruption. When `block` is used, the processor calls its internal `abort()` function, which immediately stops the request and prevents any subsequent processors from running.
308
311
 
309
- ```typescript {8} title="src/mastra/agents/private-agent.ts" showLineNumbers copy
312
+ ```typescript {10} title="src/mastra/agents/private-agent.ts" showLineNumbers copy
310
313
  import { PIIDetector } from "@mastra/core/processors";
311
314
 
312
315
  export const privateAgent = new Agent({
@@ -330,7 +333,7 @@ For example, if an agent uses the `PIIDetector` with `strategy: "block"` and the
330
333
 
331
334
  #### `.generate()` example
332
335
 
333
- ```typescript {3-4, } showLineNumbers
336
+ ```typescript showLineNumbers
334
337
  const result = await agent.generate(
335
338
  "Is this credit card number valid?: 4543 1374 5089 4332",
336
339
  );
@@ -341,7 +344,7 @@ console.error(result.tripwireReason);
341
344
 
342
345
  #### `.stream()` example
343
346
 
344
- ```typescript {4-5} showLineNumbers
347
+ ```typescript showLineNumbers
345
348
  const stream = await agent.stream(
346
349
  "Is this credit card number valid?: 4543 1374 5089 4332",
347
350
  );
@@ -365,6 +368,6 @@ If the built-in processors don’t cover your needs, you can create your own by
365
368
 
366
369
  Available examples:
367
370
 
368
- - [Message Length Limiter](/examples/v1/processors/message-length-limiter)
369
- - [Response Length Limiter](/examples/v1/processors/response-length-limiter)
370
- - [Response Validator](/examples/v1/processors/response-validator)
371
+ - [Message Length Limiter](https://github.com/mastra-ai/mastra/tree/main/examples/processors-message-length-limiter)
372
+ - [Response Length Limiter](https://github.com/mastra-ai/mastra/tree/main/examples/processors-response-length-limiter)
373
+ - [Response Validator](https://github.com/mastra-ai/mastra/tree/main/examples/processors-response-validator)
@@ -24,7 +24,6 @@ Mastra agent networks operate using these principles:
24
24
  An agent network is built around a top-level routing agent that delegates tasks to agents, workflows, and tools defined in its configuration. Memory is configured on the routing agent using the `memory` option, and `instructions` define the agent's routing behavior.
25
25
 
26
26
  ```typescript {22-23,26,29} title="src/mastra/agents/routing-agent.ts" showLineNumbers copy
27
- import { openai } from "@ai-sdk/openai";
28
27
  import { Agent } from "@mastra/core/agent";
29
28
  import { Memory } from "@mastra/memory";
30
29
  import { LibSQLStore } from "@mastra/libsql";
@@ -44,7 +43,7 @@ export const routingAgent = new Agent({
44
43
  Always respond with a complete report—no bullet points.
45
44
  Write in full paragraphs, like a blog post.
46
45
  Do not answer with incomplete or uncertain information.`,
47
- model: openai("gpt-4o-mini"),
46
+ model: "openai/gpt-5.1",
48
47
  agents: {
49
48
  researchAgent,
50
49
  writingAgent,
@@ -241,3 +240,4 @@ network-execution-event-step-finish
241
240
  - [Agent Memory](./agent-memory)
242
241
  - [Workflows Overview](../workflows/overview)
243
242
  - [Request Context](/docs/v1/server-db/request-context)
243
+ - [Supervisor example](https://github.com/mastra-ai/mastra/tree/main/examples/supervisor-agent)
@@ -21,10 +21,9 @@ An introduction to agents, and how they compare to workflows on [YouTube (7 minu
21
21
  :::
22
22
 
23
23
  ## Setting up agents
24
+ ### Installation
24
25
 
25
- <Tabs>
26
- <TabItem value="mastra-model-router" label="Model router">
27
- <Steps>
26
+ <Steps>
28
27
 
29
28
  <StepItem>
30
29
 
@@ -63,63 +62,13 @@ export const testAgent = new Agent({
63
62
  id: "test-agent",
64
63
  name: "Test Agent",
65
64
  instructions: "You are a helpful assistant.",
66
- model: "openai/gpt-4o-mini",
65
+ model: "openai/gpt-5.1",
67
66
  });
68
67
  ```
69
68
 
70
69
  </StepItem>
71
70
 
72
71
  </Steps>
73
- </TabItem>
74
- <TabItem value="vercel-ai-sdk" label="Vercel AI SDK">
75
- <Steps>
76
-
77
- <StepItem>
78
-
79
- Include the Mastra core package alongside the Vercel AI SDK provider you want to use:
80
-
81
- ```bash
82
- npm install @mastra/core@beta @ai-sdk/openai
83
- ```
84
-
85
- </StepItem>
86
-
87
- <StepItem>
88
-
89
- Set the corresponding environment variable for your provider. For OpenAI via the AI SDK:
90
-
91
- ```bash title=".env" copy
92
- OPENAI_API_KEY=<your-api-key>
93
- ```
94
-
95
- :::note
96
-
97
- See the [AI SDK Providers](https://ai-sdk.dev/providers/ai-sdk-providers) in the Vercel AI SDK docs for additional configuration options.
98
-
99
- :::
100
-
101
- </StepItem>
102
-
103
- <StepItem>
104
-
105
- To create an agent in Mastra, use the `Agent` class. Every agent must include `instructions` to define its behavior, and a `model` parameter to specify the LLM provider and model. When using the Vercel AI SDK, provide the client to your agent's `model` field:
106
-
107
- ```typescript title="src/mastra/agents/test-agent.ts" copy
108
- import { openai } from "@ai-sdk/openai";
109
- import { Agent } from "@mastra/core/agent";
110
-
111
- export const testAgent = new Agent({
112
- id: "test-agent",
113
- name: "Test Agent",
114
- instructions: "You are a helpful assistant.",
115
- model: openai("gpt-4o-mini"),
116
- });
117
- ```
118
-
119
- </StepItem>
120
- </Steps>
121
- </TabItem>
122
- </Tabs>
123
72
 
124
73
  ### Instruction formats
125
74
 
@@ -296,9 +245,9 @@ const response = await testAgent.generate(
296
245
  console.log(response.object);
297
246
  ```
298
247
 
299
- ### With Tool Calling
248
+ ### Structuring sub agent
300
249
 
301
- Use the `model` property to ensure that your agent can execute multi-step LLM calls with tool calling.
250
+ Use the `model` property to have a separate agent generate the structured output for you.
302
251
 
303
252
  ```typescript showLineNumbers copy
304
253
  import { z } from "zod";
@@ -320,7 +269,7 @@ const response = await testAgentWithTools.generate(
320
269
  summary: z.string(),
321
270
  keywords: z.array(z.string()),
322
271
  }),
323
- model: "openai/gpt-4o",
272
+ model: "openai/gpt-5.1",
324
273
  },
325
274
  },
326
275
  );
@@ -361,7 +310,22 @@ const response = await testAgentThatDoesntSupportStructuredOutput.generate(
361
310
  console.log(response.object);
362
311
  ```
363
312
 
364
- ## Working with images
313
+ :::info[Gemini 2.5 with tools]
314
+
315
+ Gemini 2.5 models do not support combining `response_format` (structured output) with function calling (tools) in the same API call. If your agent has tools and you're using `structuredOutput` with a Gemini 2.5 model, you must set `jsonPromptInjection: true` to avoid the error `Function calling with a response mime type: 'application/json' is unsupported`.
316
+
317
+ ```typescript
318
+ const response = await agentWithTools.generate("Your prompt", {
319
+ structuredOutput: {
320
+ schema: yourSchema,
321
+ jsonPromptInjection: true, // Required for Gemini 2.5 when tools are present
322
+ },
323
+ });
324
+ ```
325
+
326
+ :::
327
+
328
+ ## Analyzing images
365
329
 
366
330
  Agents can analyze and describe images by processing both the visual content and any text within them. To enable image analysis, pass an object with `type: 'image'` and the image URL in the `content` array. You can combine image content with text prompts to guide the agent's analysis.
367
331
 
@@ -386,7 +350,8 @@ const response = await testAgent.generate([
386
350
  console.log(response.text);
387
351
  ```
388
352
 
389
- ### Using `maxSteps`
353
+
354
+ ## Using `maxSteps`
390
355
 
391
356
  The `maxSteps` parameter controls the maximum number of sequential LLM calls an agent can make. Each step includes generating a response, executing any tool calls, and processing the result. Limiting steps helps prevent infinite loops, reduce latency, and control token usage for agents that use tools. The default is 1, but can be increased:
392
357
 
@@ -398,7 +363,7 @@ const response = await testAgent.generate("Help me organize my day", {
398
363
  console.log(response.text);
399
364
  ```
400
365
 
401
- ### Using `onStepFinish`
366
+ ## Using `onStepFinish`
402
367
 
403
368
  You can monitor the progress of multi-step operations using the `onStepFinish` callback. This is useful for debugging or providing progress updates to users.
404
369
 
@@ -444,8 +409,8 @@ export const testAgent = new Agent({
444
409
  const userTier = requestContext.get("user-tier") as UserTier["user-tier"];
445
410
 
446
411
  return userTier === "enterprise"
447
- ? openai("gpt-4o-mini")
448
- : openai("gpt-4.1-nano");
412
+ ? "openai/gpt-5"
413
+ : "openai/gpt-4.1-nano";
449
414
  },
450
415
  });
451
416
  ```
@@ -0,0 +1,279 @@
1
+ ---
2
+ title: "Processors | Agents | Mastra Docs"
3
+ description: "Learn how to use input and output processors to transform, validate, and control messages in Mastra agents."
4
+ ---
5
+
6
+ # Processors
7
+
8
+ Processors transform, validate, or control messages as they pass through an agent. They run at specific points in the agent's execution pipeline, allowing you to modify inputs before they reach the language model or outputs before they're returned to users.
9
+
10
+ Processors are configured as:
11
+
12
+ - **`inputProcessors`**: Run before messages reach the language model.
13
+ - **`outputProcessors`**: Run after the language model generates a response, but before it's returned to users.
14
+
15
+ Some processors implement both input and output logic and can be used in either array depending on where the transformation should occur.
16
+
17
+ ## When to use processors
18
+
19
+ Use processors to:
20
+
21
+ - Normalize or validate user input
22
+ - Add guardrails to your agent
23
+ - Detect and prevent prompt injection or jailbreak attempts
24
+ - Moderate content for safety or compliance
25
+ - Transform messages (e.g., translate languages, filter tool calls)
26
+ - Limit token usage or message history length
27
+ - Redact sensitive information (PII)
28
+ - Apply custom business logic to messages
29
+
30
+ Mastra includes several processors for common use cases. You can also create custom processors for application-specific requirements.
31
+
32
+ ## Adding processors to an agent
33
+
34
+ Import and instantiate the processor, then pass it to the agent's `inputProcessors` or `outputProcessors` array:
35
+
36
+ ```typescript {3,9-15} title="src/mastra/agents/moderated-agent.ts" showLineNumbers copy
37
+ import { openai } from "@ai-sdk/openai";
38
+ import { Agent } from "@mastra/core/agent";
39
+ import { ModerationProcessor } from "@mastra/core/processors";
40
+
41
+ export const moderatedAgent = new Agent({
42
+ name: "moderated-agent",
43
+ instructions: "You are a helpful assistant",
44
+ model: openai("gpt-4o-mini"),
45
+ inputProcessors: [
46
+ new ModerationProcessor({
47
+ model: openai("gpt-4.1-nano"),
48
+ categories: ["hate", "harassment", "violence"],
49
+ threshold: 0.7,
50
+ strategy: "block",
51
+ }),
52
+ ],
53
+ });
54
+ ```
55
+
56
+ ## Execution order
57
+
58
+ Processors run in the order they appear in the array:
59
+
60
+ ```typescript
61
+ inputProcessors: [
62
+ new UnicodeNormalizer(),
63
+ new PromptInjectionDetector(),
64
+ new ModerationProcessor(),
65
+ ];
66
+ ```
67
+
68
+ For output processors, the order determines the sequence of transformations applied to the model's response.
69
+
70
+ ### With memory enabled
71
+
72
+ When memory is enabled on an agent, memory processors are automatically added to the pipeline:
73
+
74
+ **Input processors:**
75
+ ```
76
+ [Memory Processors] → [Your inputProcessors]
77
+ ```
78
+ Memory loads conversation history first, then your processors run.
79
+
80
+ **Output processors:**
81
+ ```
82
+ [Your outputProcessors] → [Memory Processors]
83
+ ```
84
+ Your processors run first, then memory persists messages.
85
+
86
+ This ordering ensures that if your output guardrail calls `abort()`, memory processors are skipped and no messages are saved. See [Memory Processors](/docs/v1/memory/memory-processors#processor-execution-order) for details.
87
+
88
+ ## Creating custom processors
89
+
90
+ Custom processors implement the `Processor` interface:
91
+
92
+ ### Custom input processor
93
+
94
+ ```typescript title="src/mastra/processors/custom-input.ts" showLineNumbers copy
95
+ import type {
96
+ Processor,
97
+ MastraDBMessage,
98
+ RequestContext,
99
+ } from "@mastra/core";
100
+
101
+ export class CustomInputProcessor implements Processor {
102
+ id = "custom-input";
103
+
104
+ async processInput({
105
+ messages,
106
+ systemMessages,
107
+ context,
108
+ }: {
109
+ messages: MastraDBMessage[];
110
+ systemMessages: CoreMessage[];
111
+ context: RequestContext;
112
+ }): Promise<MastraDBMessage[]> {
113
+ // Transform messages before they reach the LLM
114
+ return messages.map((msg) => ({
115
+ ...msg,
116
+ content: {
117
+ ...msg.content,
118
+ content: msg.content.content.toLowerCase(),
119
+ },
120
+ }));
121
+ }
122
+ }
123
+ ```
124
+
125
+ The `processInput` method receives:
126
+ - `messages`: User and assistant messages (not system messages)
127
+ - `systemMessages`: All system messages (agent instructions, memory context, user-provided system prompts)
128
+ - `messageList`: The full MessageList instance for advanced use cases
129
+ - `abort`: Function to stop processing and return early
130
+ - `requestContext`: Execution metadata like `threadId` and `resourceId`
131
+
132
+ The method can return:
133
+ - `MastraDBMessage[]` — Transformed messages array (backward compatible)
134
+ - `{ messages: MastraDBMessage[]; systemMessages: CoreMessage[] }` — Both messages and modified system messages
135
+
136
+ The framework handles both return formats, so modifying system messages is optional and existing processors continue to work.
137
+
138
+ ### Modifying system messages
139
+
140
+ To modify system messages (e.g., trim verbose prompts for smaller models), return an object with both `messages` and `systemMessages`:
141
+
142
+ ```typescript title="src/mastra/processors/system-trimmer.ts" showLineNumbers copy
143
+ import type { Processor, CoreMessage, MastraDBMessage } from "@mastra/core";
144
+
145
+ export class SystemTrimmer implements Processor {
146
+ id = "system-trimmer";
147
+
148
+ async processInput({
149
+ messages,
150
+ systemMessages,
151
+ }): Promise<{ messages: MastraDBMessage[]; systemMessages: CoreMessage[] }> {
152
+ // Trim system messages for smaller models
153
+ const trimmedSystemMessages = systemMessages.map((msg) => ({
154
+ ...msg,
155
+ content:
156
+ typeof msg.content === "string"
157
+ ? msg.content.substring(0, 500)
158
+ : msg.content,
159
+ }));
160
+
161
+ return { messages, systemMessages: trimmedSystemMessages };
162
+ }
163
+ }
164
+ ```
165
+
166
+ This is useful for:
167
+ - Trimming verbose system prompts for models with smaller context windows
168
+ - Filtering or modifying semantic recall content to prevent "prompt too long" errors
169
+ - Dynamically adjusting system instructions based on the conversation
170
+
171
+ ### Custom output processor
172
+
173
+ ```typescript title="src/mastra/processors/custom-output.ts" showLineNumbers copy
174
+ import type {
175
+ Processor,
176
+ MastraDBMessage,
177
+ RequestContext,
178
+ } from "@mastra/core";
179
+
180
+ export class CustomOutputProcessor implements Processor {
181
+ id = "custom-output";
182
+
183
+ async processOutputResult({
184
+ messages,
185
+ context,
186
+ }: {
187
+ messages: MastraDBMessage[];
188
+ context: RequestContext;
189
+ }): Promise<MastraDBMessage[]> {
190
+ // Transform messages after the LLM generates them
191
+ return messages.filter((msg) => msg.role !== "system");
192
+ }
193
+
194
+ async processOutputStream({
195
+ stream,
196
+ context,
197
+ }: {
198
+ stream: ReadableStream;
199
+ context: RequestContext;
200
+ }): Promise<ReadableStream> {
201
+ // Transform streaming responses
202
+ return stream;
203
+ }
204
+ }
205
+ ```
206
+
207
+ ## Built-in Utility Processors
208
+
209
+ Mastra provides utility processors for common tasks:
210
+
211
+ **For security and validation processors**, see the [Guardrails](/docs/v1/agents/guardrails) page for input/output guardrails and moderation processors.
212
+ **For memory-specific processors**, see the [Memory Processors](/docs/v1/memory/memory-processors) page for processors that handle message history, semantic recall, and working memory.
213
+
214
+ ### TokenLimiter
215
+
216
+ Prevents context window overflow by removing older messages when the total token count exceeds a specified limit.
217
+
218
+ ```typescript copy showLineNumbers {9-12}
219
+ import { Agent } from "@mastra/core/agent";
220
+ import { TokenLimiter } from "@mastra/core/processors";
221
+ import { openai } from "@ai-sdk/openai";
222
+
223
+ const agent = new Agent({
224
+ name: "my-agent",
225
+ model: openai("gpt-4o"),
226
+ inputProcessors: [
227
+ // Ensure the total tokens don't exceed ~127k
228
+ new TokenLimiter(127000),
229
+ ],
230
+ });
231
+ ```
232
+
233
+ The `TokenLimiter` uses the `o200k_base` encoding by default (suitable for GPT-4o). You can specify other encodings for different models:
234
+
235
+ ```typescript copy showLineNumbers {6-9}
236
+ import cl100k_base from "js-tiktoken/ranks/cl100k_base";
237
+
238
+ const agent = new Agent({
239
+ name: "my-agent",
240
+ inputProcessors: [
241
+ new TokenLimiter({
242
+ limit: 16000, // Example limit for a 16k context model
243
+ encoding: cl100k_base,
244
+ }),
245
+ ],
246
+ });
247
+ ```
248
+
249
+ ### ToolCallFilter
250
+
251
+ Removes tool calls from messages sent to the LLM, saving tokens by excluding potentially verbose tool interactions.
252
+
253
+ ```typescript copy showLineNumbers {5-14}
254
+ import { Agent } from "@mastra/core/agent";
255
+ import { ToolCallFilter, TokenLimiter } from "@mastra/core/processors";
256
+ import { openai } from "@ai-sdk/openai";
257
+
258
+ const agent = new Agent({
259
+ name: "my-agent",
260
+ model: openai("gpt-4o"),
261
+ inputProcessors: [
262
+ // Example 1: Remove all tool calls/results
263
+ new ToolCallFilter(),
264
+
265
+ // Example 2: Remove only specific tool calls
266
+ new ToolCallFilter({ exclude: ["generateImageTool"] }),
267
+
268
+ // Always place TokenLimiter last
269
+ new TokenLimiter(127000),
270
+ ],
271
+ });
272
+ ```
273
+
274
+ > **Note:** The example above filters tool calls and limits tokens for the LLM, but these filtered messages will still be saved to memory. To also filter messages before they're saved to memory, manually add memory processors before utility processors. See [Memory Processors](/docs/v1/memory/memory-processors#manual-control-and-deduplication) for details.
275
+
276
+ ## Related documentation
277
+
278
+ - [Guardrails](/docs/v1/agents/guardrails) - Security and validation processors
279
+ - [Memory Processors](/docs/v1/memory/memory-processors) - Memory-specific processors and automatic integration
@@ -47,8 +47,7 @@ To make a tool available to an agent, add it to `tools`. Mentioning available to
47
47
 
48
48
  An agent can use multiple tools to handle more complex tasks by delegating specific parts to individual tools. The agent decides which tools to use based on the user's message, the agent's instructions, and the tool descriptions and schemas.
49
49
 
50
- ```typescript {9,11} title="src/mastra/agents/weather-agent.ts" showLineNumbers copy
51
- import { openai } from "@ai-sdk/openai";
50
+ ```typescript {8,10} title="src/mastra/agents/weather-agent.ts" showLineNumbers copy
52
51
  import { Agent } from "@mastra/core/agent";
53
52
  import { weatherTool } from "../tools/weather-tool";
54
53
 
@@ -58,7 +57,7 @@ export const weatherAgent = new Agent({
58
57
  instructions: `
59
58
  You are a helpful weather assistant.
60
59
  Use the weatherTool to fetch current weather data.`,
61
- model: openai("gpt-4o-mini"),
60
+ model: "openai/gpt-5.1",
62
61
  tools: { weatherTool },
63
62
  });
64
63
  ```
@@ -67,7 +66,7 @@ export const weatherAgent = new Agent({
67
66
 
68
67
  The agent uses the tool's `inputSchema` to infer what data the tool expects. In this case, it extracts `London` as the `location` from the message and passes it to the tool's inputData parameter.
69
68
 
70
- ```typescript {5} title="src/test-tool.ts" showLineNumbers copy
69
+ ```typescript title="src/test-tool.ts" showLineNumbers copy
71
70
  import { mastra } from "./mastra";
72
71
 
73
72
  const agent = mastra.getAgent("weatherAgent");
@@ -79,7 +78,7 @@ const result = await agent.generate("What's the weather in London?");
79
78
 
80
79
  When multiple tools are available, the agent may choose to use one, several, or none, depending on what's needed to answer the query.
81
80
 
82
- ```typescript {6} title="src/mastra/agents/weather-agent.ts" showLineNumbers copy
81
+ ```typescript {8} title="src/mastra/agents/weather-agent.ts" showLineNumbers copy
83
82
  import { weatherTool } from "../tools/weather-tool";
84
83
  import { activitiesTool } from "../tools/activities-tool";
85
84