@mastra/mcp-docs-server 1.0.0-beta.4 → 1.0.0-beta.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (286) hide show
  1. package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +201 -1
  2. package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +201 -1
  3. package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +201 -1
  4. package/.docs/organized/changelogs/%40mastra%2Fastra.md +201 -1
  5. package/.docs/organized/changelogs/%40mastra%2Fchroma.md +201 -1
  6. package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +201 -1
  7. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +201 -1
  8. package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +201 -1
  9. package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +201 -1
  10. package/.docs/organized/changelogs/%40mastra%2Fconvex.md +29 -0
  11. package/.docs/organized/changelogs/%40mastra%2Fcore.md +411 -211
  12. package/.docs/organized/changelogs/%40mastra%2Fcouchbase.md +201 -1
  13. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +201 -1
  14. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +201 -1
  15. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +201 -1
  16. package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +201 -1
  17. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +201 -1
  18. package/.docs/organized/changelogs/%40mastra%2Fduckdb.md +42 -0
  19. package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +201 -1
  20. package/.docs/organized/changelogs/%40mastra%2Felasticsearch.md +52 -0
  21. package/.docs/organized/changelogs/%40mastra%2Fevals.md +201 -1
  22. package/.docs/organized/changelogs/%40mastra%2Flance.md +201 -1
  23. package/.docs/organized/changelogs/%40mastra%2Flibsql.md +201 -1
  24. package/.docs/organized/changelogs/%40mastra%2Floggers.md +201 -1
  25. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +201 -1
  26. package/.docs/organized/changelogs/%40mastra%2Fmcp-registry-registry.md +201 -1
  27. package/.docs/organized/changelogs/%40mastra%2Fmcp.md +201 -1
  28. package/.docs/organized/changelogs/%40mastra%2Fmemory.md +201 -1
  29. package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +201 -1
  30. package/.docs/organized/changelogs/%40mastra%2Fmssql.md +201 -1
  31. package/.docs/organized/changelogs/%40mastra%2Fopensearch.md +201 -1
  32. package/.docs/organized/changelogs/%40mastra%2Fpg.md +201 -1
  33. package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +201 -1
  34. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +201 -1
  35. package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +201 -1
  36. package/.docs/organized/changelogs/%40mastra%2Frag.md +201 -1
  37. package/.docs/organized/changelogs/%40mastra%2Freact.md +89 -1
  38. package/.docs/organized/changelogs/%40mastra%2Fs3vectors.md +9 -0
  39. package/.docs/organized/changelogs/%40mastra%2Fschema-compat.md +42 -0
  40. package/.docs/organized/changelogs/%40mastra%2Fserver.md +201 -1
  41. package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +201 -1
  42. package/.docs/organized/changelogs/%40mastra%2Fupstash.md +201 -1
  43. package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +201 -1
  44. package/.docs/organized/changelogs/%40mastra%2Fvoice-azure.md +201 -1
  45. package/.docs/organized/changelogs/%40mastra%2Fvoice-cloudflare.md +201 -1
  46. package/.docs/organized/changelogs/%40mastra%2Fvoice-deepgram.md +201 -1
  47. package/.docs/organized/changelogs/%40mastra%2Fvoice-elevenlabs.md +201 -1
  48. package/.docs/organized/changelogs/%40mastra%2Fvoice-gladia.md +92 -1
  49. package/.docs/organized/changelogs/%40mastra%2Fvoice-google-gemini-live.md +67 -1
  50. package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +201 -1
  51. package/.docs/organized/changelogs/%40mastra%2Fvoice-murf.md +201 -1
  52. package/.docs/organized/changelogs/%40mastra%2Fvoice-openai-realtime.md +201 -1
  53. package/.docs/organized/changelogs/%40mastra%2Fvoice-openai.md +201 -1
  54. package/.docs/organized/changelogs/%40mastra%2Fvoice-playai.md +201 -1
  55. package/.docs/organized/changelogs/%40mastra%2Fvoice-sarvam.md +201 -1
  56. package/.docs/organized/changelogs/%40mastra%2Fvoice-speechify.md +201 -1
  57. package/.docs/organized/changelogs/create-mastra.md +201 -1
  58. package/.docs/organized/changelogs/mastra.md +201 -1
  59. package/.docs/organized/code-examples/agui.md +1 -0
  60. package/.docs/organized/code-examples/ai-sdk-v5.md +1 -0
  61. package/.docs/organized/code-examples/mcp-server-adapters.md +721 -0
  62. package/.docs/organized/code-examples/memory-with-processors.md +1 -1
  63. package/.docs/organized/code-examples/quick-start.md +1 -1
  64. package/.docs/organized/code-examples/server-app-access.md +342 -0
  65. package/.docs/raw/agents/adding-voice.mdx +7 -10
  66. package/.docs/raw/agents/agent-approval.mdx +189 -0
  67. package/.docs/raw/agents/guardrails.mdx +26 -23
  68. package/.docs/raw/agents/networks.mdx +2 -2
  69. package/.docs/raw/agents/overview.mdx +27 -62
  70. package/.docs/raw/agents/processors.mdx +279 -0
  71. package/.docs/raw/agents/using-tools.mdx +4 -5
  72. package/.docs/raw/course/01-first-agent/05-running-playground.md +5 -5
  73. package/.docs/raw/course/01-first-agent/09-testing-your-agent.md +3 -3
  74. package/.docs/raw/course/01-first-agent/13-testing-your-tool.md +3 -3
  75. package/.docs/raw/course/01-first-agent/17-testing-memory.md +2 -2
  76. package/.docs/raw/course/04-workflows/07-using-playground.md +1 -1
  77. package/.docs/raw/deployment/building-mastra.mdx +1 -1
  78. package/.docs/raw/deployment/cloud-providers/amazon-ec2.mdx +1 -1
  79. package/.docs/raw/deployment/cloud-providers/aws-lambda.mdx +1 -1
  80. package/.docs/raw/deployment/cloud-providers/azure-app-services.mdx +1 -1
  81. package/.docs/raw/deployment/cloud-providers/digital-ocean.mdx +1 -1
  82. package/.docs/raw/deployment/cloud-providers/index.mdx +20 -27
  83. package/.docs/raw/deployment/cloud-providers/netlify-deployer.mdx +44 -13
  84. package/.docs/raw/deployment/mastra-cloud/observability.mdx +19 -17
  85. package/.docs/raw/deployment/mastra-cloud/setting-up.mdx +1 -1
  86. package/.docs/raw/deployment/overview.mdx +2 -2
  87. package/.docs/raw/deployment/web-framework.mdx +5 -5
  88. package/.docs/raw/evals/custom-scorers.mdx +3 -5
  89. package/.docs/raw/evals/overview.mdx +2 -3
  90. package/.docs/raw/evals/running-in-ci.mdx +0 -2
  91. package/.docs/raw/{guides/guide → getting-started}/manual-install.mdx +2 -2
  92. package/.docs/raw/getting-started/project-structure.mdx +1 -1
  93. package/.docs/raw/getting-started/start.mdx +72 -0
  94. package/.docs/raw/getting-started/studio.mdx +1 -1
  95. package/.docs/raw/{frameworks/agentic-uis/ai-sdk.mdx → guides/build-your-ui/ai-sdk-ui.mdx} +113 -11
  96. package/.docs/raw/{frameworks/web-frameworks → guides/getting-started}/astro.mdx +23 -25
  97. package/.docs/raw/{frameworks/servers → guides/getting-started}/express.mdx +3 -4
  98. package/.docs/raw/guides/{quickstarts/nextjs.mdx → getting-started/next-js.mdx} +11 -11
  99. package/.docs/raw/guides/{quickstarts/standalone-server.mdx → getting-started/quickstart.mdx} +7 -7
  100. package/.docs/raw/{frameworks/web-frameworks → guides/getting-started}/sveltekit.mdx +23 -25
  101. package/.docs/raw/{frameworks/web-frameworks → guides/getting-started}/vite-react.mdx +7 -7
  102. package/.docs/raw/guides/guide/ai-recruiter.mdx +2 -3
  103. package/.docs/raw/guides/guide/chef-michel.mdx +2 -3
  104. package/.docs/raw/guides/guide/notes-mcp-server.mdx +2 -2
  105. package/.docs/raw/guides/guide/research-assistant.mdx +7 -8
  106. package/.docs/raw/guides/guide/stock-agent.mdx +4 -6
  107. package/.docs/raw/guides/guide/web-search.mdx +12 -10
  108. package/.docs/raw/guides/guide/whatsapp-chat-bot.mdx +421 -0
  109. package/.docs/raw/guides/index.mdx +3 -35
  110. package/.docs/raw/guides/migrations/agentnetwork.mdx +4 -4
  111. package/.docs/raw/guides/migrations/ai-sdk-v4-to-v5.mdx +1 -1
  112. package/.docs/raw/guides/migrations/upgrade-to-v1/agent.mdx +40 -0
  113. package/.docs/raw/guides/migrations/upgrade-to-v1/tools.mdx +5 -0
  114. package/.docs/raw/guides/migrations/upgrade-to-v1/workflows.mdx +51 -0
  115. package/.docs/raw/guides/migrations/vnext-to-standard-apis.mdx +2 -2
  116. package/.docs/raw/index.mdx +2 -2
  117. package/.docs/raw/mcp/overview.mdx +3 -5
  118. package/.docs/raw/memory/memory-processors.mdx +264 -79
  119. package/.docs/raw/memory/semantic-recall.mdx +7 -7
  120. package/.docs/raw/memory/storage/memory-with-libsql.mdx +2 -4
  121. package/.docs/raw/memory/storage/memory-with-mongodb.mdx +2 -4
  122. package/.docs/raw/memory/storage/memory-with-pg.mdx +2 -4
  123. package/.docs/raw/memory/storage/memory-with-upstash.mdx +2 -4
  124. package/.docs/raw/memory/threads-and-resources.mdx +3 -3
  125. package/.docs/raw/memory/working-memory.mdx +14 -7
  126. package/.docs/raw/{logging.mdx → observability/logging.mdx} +1 -1
  127. package/.docs/raw/observability/overview.mdx +2 -3
  128. package/.docs/raw/observability/tracing/bridges/otel.mdx +176 -0
  129. package/.docs/raw/observability/tracing/exporters/arize.mdx +17 -0
  130. package/.docs/raw/observability/tracing/exporters/braintrust.mdx +19 -0
  131. package/.docs/raw/observability/tracing/exporters/langfuse.mdx +20 -0
  132. package/.docs/raw/observability/tracing/exporters/langsmith.mdx +12 -0
  133. package/.docs/raw/observability/tracing/exporters/otel.mdx +25 -5
  134. package/.docs/raw/observability/tracing/exporters/posthog.mdx +107 -0
  135. package/.docs/raw/observability/tracing/overview.mdx +74 -8
  136. package/.docs/raw/observability/tracing/processors/sensitive-data-filter.mdx +0 -1
  137. package/.docs/raw/rag/chunking-and-embedding.mdx +16 -17
  138. package/.docs/raw/rag/overview.mdx +3 -2
  139. package/.docs/raw/rag/retrieval.mdx +43 -38
  140. package/.docs/raw/rag/vector-databases.mdx +93 -2
  141. package/.docs/raw/reference/agents/agent.mdx +7 -10
  142. package/.docs/raw/reference/agents/generate.mdx +55 -6
  143. package/.docs/raw/reference/agents/generateLegacy.mdx +2 -2
  144. package/.docs/raw/reference/agents/getLLM.mdx +1 -1
  145. package/.docs/raw/reference/agents/network.mdx +46 -3
  146. package/.docs/raw/reference/cli/mastra.mdx +2 -1
  147. package/.docs/raw/reference/client-js/agents.mdx +3 -3
  148. package/.docs/raw/reference/client-js/memory.mdx +43 -0
  149. package/.docs/raw/reference/client-js/workflows.mdx +92 -63
  150. package/.docs/raw/reference/core/getLogger.mdx +1 -1
  151. package/.docs/raw/reference/core/listLogs.mdx +1 -1
  152. package/.docs/raw/reference/core/listLogsByRunId.mdx +1 -1
  153. package/.docs/raw/reference/core/mastra-model-gateway.mdx +5 -19
  154. package/.docs/raw/reference/core/setLogger.mdx +1 -1
  155. package/.docs/raw/reference/core/setTelemetry.mdx +1 -1
  156. package/.docs/raw/reference/deployer/netlify.mdx +1 -2
  157. package/.docs/raw/reference/evals/answer-relevancy.mdx +28 -98
  158. package/.docs/raw/reference/evals/answer-similarity.mdx +12 -258
  159. package/.docs/raw/reference/evals/bias.mdx +29 -87
  160. package/.docs/raw/reference/evals/completeness.mdx +31 -90
  161. package/.docs/raw/reference/evals/content-similarity.mdx +28 -88
  162. package/.docs/raw/reference/evals/context-precision.mdx +28 -130
  163. package/.docs/raw/reference/evals/context-relevance.mdx +11 -11
  164. package/.docs/raw/reference/evals/faithfulness.mdx +28 -101
  165. package/.docs/raw/reference/evals/hallucination.mdx +28 -103
  166. package/.docs/raw/reference/evals/keyword-coverage.mdx +28 -107
  167. package/.docs/raw/reference/evals/noise-sensitivity.mdx +11 -11
  168. package/.docs/raw/reference/evals/prompt-alignment.mdx +15 -15
  169. package/.docs/raw/reference/evals/scorer-utils.mdx +362 -0
  170. package/.docs/raw/reference/evals/textual-difference.mdx +27 -100
  171. package/.docs/raw/reference/evals/tone-consistency.mdx +25 -98
  172. package/.docs/raw/reference/evals/tool-call-accuracy.mdx +7 -7
  173. package/.docs/raw/reference/evals/toxicity.mdx +29 -92
  174. package/.docs/raw/reference/index.mdx +1 -0
  175. package/.docs/raw/reference/memory/memory-class.mdx +5 -7
  176. package/.docs/raw/reference/observability/tracing/bridges/otel.mdx +150 -0
  177. package/.docs/raw/reference/observability/tracing/configuration.mdx +0 -4
  178. package/.docs/raw/reference/observability/tracing/exporters/arize.mdx +4 -0
  179. package/.docs/raw/reference/observability/tracing/exporters/langsmith.mdx +17 -1
  180. package/.docs/raw/reference/observability/tracing/exporters/otel.mdx +6 -0
  181. package/.docs/raw/reference/observability/tracing/exporters/posthog.mdx +132 -0
  182. package/.docs/raw/reference/observability/tracing/instances.mdx +0 -4
  183. package/.docs/raw/reference/observability/tracing/interfaces.mdx +29 -4
  184. package/.docs/raw/reference/observability/tracing/spans.mdx +0 -4
  185. package/.docs/raw/reference/processors/batch-parts-processor.mdx +1 -1
  186. package/.docs/raw/reference/processors/language-detector.mdx +10 -3
  187. package/.docs/raw/reference/processors/message-history-processor.mdx +131 -0
  188. package/.docs/raw/reference/processors/moderation-processor.mdx +12 -5
  189. package/.docs/raw/reference/processors/pii-detector.mdx +12 -5
  190. package/.docs/raw/reference/processors/processor-interface.mdx +502 -0
  191. package/.docs/raw/reference/processors/prompt-injection-detector.mdx +10 -3
  192. package/.docs/raw/reference/processors/semantic-recall-processor.mdx +197 -0
  193. package/.docs/raw/reference/processors/system-prompt-scrubber.mdx +3 -4
  194. package/.docs/raw/reference/processors/token-limiter-processor.mdx +2 -2
  195. package/.docs/raw/reference/processors/tool-call-filter.mdx +125 -0
  196. package/.docs/raw/reference/processors/unicode-normalizer.mdx +1 -1
  197. package/.docs/raw/reference/processors/working-memory-processor.mdx +221 -0
  198. package/.docs/raw/reference/rag/embeddings.mdx +5 -5
  199. package/.docs/raw/reference/rag/rerank.mdx +1 -2
  200. package/.docs/raw/reference/rag/rerankWithScorer.mdx +0 -1
  201. package/.docs/raw/reference/storage/cloudflare-d1.mdx +37 -0
  202. package/.docs/raw/reference/storage/convex.mdx +164 -0
  203. package/.docs/raw/reference/storage/lance.mdx +33 -0
  204. package/.docs/raw/reference/storage/libsql.mdx +37 -0
  205. package/.docs/raw/reference/storage/mongodb.mdx +39 -0
  206. package/.docs/raw/reference/storage/mssql.mdx +37 -0
  207. package/.docs/raw/reference/storage/postgresql.mdx +37 -0
  208. package/.docs/raw/reference/streaming/ChunkType.mdx +1 -1
  209. package/.docs/raw/reference/streaming/agents/stream.mdx +64 -2
  210. package/.docs/raw/reference/streaming/workflows/observeStream.mdx +7 -9
  211. package/.docs/raw/reference/streaming/workflows/{resumeStreamVNext.mdx → resumeStream.mdx} +51 -11
  212. package/.docs/raw/reference/streaming/workflows/stream.mdx +83 -24
  213. package/.docs/raw/reference/templates/overview.mdx +1 -4
  214. package/.docs/raw/reference/tools/client.mdx +1 -2
  215. package/.docs/raw/reference/tools/create-tool.mdx +132 -0
  216. package/.docs/raw/reference/tools/graph-rag-tool.mdx +5 -5
  217. package/.docs/raw/reference/tools/mcp-client.mdx +76 -21
  218. package/.docs/raw/reference/tools/mcp-server.mdx +1 -2
  219. package/.docs/raw/reference/tools/vector-query-tool.mdx +14 -15
  220. package/.docs/raw/reference/vectors/chroma.mdx +81 -1
  221. package/.docs/raw/reference/vectors/convex.mdx +429 -0
  222. package/.docs/raw/reference/vectors/couchbase.mdx +24 -17
  223. package/.docs/raw/reference/vectors/duckdb.mdx +462 -0
  224. package/.docs/raw/reference/vectors/elasticsearch.mdx +310 -0
  225. package/.docs/raw/reference/vectors/lance.mdx +38 -22
  226. package/.docs/raw/reference/vectors/libsql.mdx +35 -2
  227. package/.docs/raw/reference/vectors/mongodb.mdx +35 -2
  228. package/.docs/raw/reference/vectors/opensearch.mdx +37 -16
  229. package/.docs/raw/reference/vectors/pg.mdx +43 -36
  230. package/.docs/raw/reference/vectors/pinecone.mdx +48 -1
  231. package/.docs/raw/reference/vectors/qdrant.mdx +36 -1
  232. package/.docs/raw/reference/vectors/turbopuffer.mdx +74 -0
  233. package/.docs/raw/reference/voice/google.mdx +159 -20
  234. package/.docs/raw/reference/voice/openai-realtime.mdx +2 -2
  235. package/.docs/raw/reference/voice/voice.addInstructions.mdx +2 -3
  236. package/.docs/raw/reference/voice/voice.addTools.mdx +1 -1
  237. package/.docs/raw/reference/voice/voice.answer.mdx +1 -1
  238. package/.docs/raw/reference/voice/voice.close.mdx +1 -1
  239. package/.docs/raw/reference/voice/voice.connect.mdx +1 -1
  240. package/.docs/raw/reference/voice/voice.off.mdx +1 -1
  241. package/.docs/raw/reference/voice/voice.on.mdx +1 -1
  242. package/.docs/raw/reference/voice/voice.send.mdx +1 -1
  243. package/.docs/raw/reference/voice/voice.updateConfig.mdx +1 -1
  244. package/.docs/raw/reference/workflows/run-methods/restart.mdx +142 -0
  245. package/.docs/raw/reference/workflows/run-methods/resume.mdx +44 -0
  246. package/.docs/raw/reference/workflows/run-methods/start.mdx +44 -0
  247. package/.docs/raw/reference/workflows/run.mdx +13 -5
  248. package/.docs/raw/reference/workflows/step.mdx +13 -0
  249. package/.docs/raw/reference/workflows/workflow.mdx +19 -0
  250. package/.docs/raw/server-db/mastra-client.mdx +1 -2
  251. package/.docs/raw/server-db/mastra-server.mdx +30 -1
  252. package/.docs/raw/server-db/request-context.mdx +0 -1
  253. package/.docs/raw/server-db/storage.mdx +11 -0
  254. package/.docs/raw/streaming/overview.mdx +26 -15
  255. package/.docs/raw/streaming/tool-streaming.mdx +48 -5
  256. package/.docs/raw/streaming/workflow-streaming.mdx +5 -11
  257. package/.docs/raw/tools-mcp/advanced-usage.mdx +1 -2
  258. package/.docs/raw/tools-mcp/mcp-overview.mdx +3 -5
  259. package/.docs/raw/voice/overview.mdx +21 -41
  260. package/.docs/raw/voice/speech-to-speech.mdx +4 -4
  261. package/.docs/raw/voice/speech-to-text.mdx +1 -2
  262. package/.docs/raw/voice/text-to-speech.mdx +1 -2
  263. package/.docs/raw/workflows/control-flow.mdx +180 -0
  264. package/.docs/raw/workflows/error-handling.mdx +1 -0
  265. package/.docs/raw/workflows/human-in-the-loop.mdx +4 -4
  266. package/.docs/raw/workflows/overview.mdx +56 -44
  267. package/.docs/raw/workflows/snapshots.mdx +1 -0
  268. package/.docs/raw/workflows/suspend-and-resume.mdx +85 -16
  269. package/.docs/raw/workflows/time-travel.mdx +313 -0
  270. package/.docs/raw/workflows/workflow-state.mdx +191 -0
  271. package/CHANGELOG.md +18 -0
  272. package/dist/{chunk-5NJC7NRO.js → chunk-4CM2BQNP.js} +24 -4
  273. package/dist/prepare-docs/package-changes.d.ts.map +1 -1
  274. package/dist/prepare-docs/prepare.js +1 -1
  275. package/dist/stdio.js +1 -1
  276. package/package.json +7 -7
  277. package/.docs/raw/agents/human-in-the-loop-with-tools.mdx +0 -90
  278. package/.docs/raw/frameworks/agentic-uis/cedar-os.mdx +0 -102
  279. package/.docs/raw/frameworks/agentic-uis/openrouter.mdx +0 -179
  280. package/.docs/raw/frameworks/web-frameworks/next-js.mdx +0 -379
  281. package/.docs/raw/getting-started/quickstart.mdx +0 -27
  282. package/.docs/raw/getting-started/templates.mdx +0 -73
  283. package/.docs/raw/reference/streaming/workflows/observeStreamVNext.mdx +0 -47
  284. package/.docs/raw/reference/streaming/workflows/streamVNext.mdx +0 -153
  285. /package/.docs/raw/{frameworks/agentic-uis → guides/build-your-ui}/assistant-ui.mdx +0 -0
  286. /package/.docs/raw/{frameworks/agentic-uis → guides/build-your-ui}/copilotkit.mdx +0 -0
@@ -5,133 +5,318 @@ description: "Learn how to use memory processors in Mastra to filter, trim, and
5
5
 
6
6
  # Memory Processors
7
7
 
8
- Memory Processors allow you to modify the list of messages retrieved from memory _before_ they are added to the agent's context window and sent to the LLM. This is useful for managing context size, filtering content, and optimizing performance.
8
+ Memory processors transform and filter messages as they pass through an agent with memory enabled. They manage context window limits, remove unnecessary content, and optimize the information sent to the language model.
9
9
 
10
- Processors operate on the messages retrieved based on your memory configuration (e.g., `lastMessages`, `semanticRecall`). They do **not** affect the new incoming user message.
10
+ When memory is enabled on an agent, Mastra adds memory processors to the agent's processor pipeline. These processors retrieve conversation history, working memory, and semantically relevant messages, then persist new messages after the model responds.
11
11
 
12
- ## Built-in Processors
12
+ Memory processors are [processors](/docs/v1/agents/processors) that operate specifically on memory-related messages and state.
13
13
 
14
- Mastra provides built-in processors:
14
+ ## Built-in Memory Processors
15
15
 
16
- ### `TokenLimiter`
16
+ Mastra automatically adds these processors when memory is enabled:
17
17
 
18
- This processor is used to prevent errors caused by exceeding the LLM's context window limit. It counts the tokens in the retrieved memory messages and removes the oldest messages until the total count is below the specified `limit`.
18
+ ### MessageHistory
19
19
 
20
- ```typescript copy showLineNumbers {9-12}
21
- import { Memory } from "@mastra/memory";
22
- import { TokenLimiter } from "@mastra/memory/processors";
20
+ Retrieves conversation history and persists new messages.
21
+
22
+ **When you configure:**
23
+
24
+ ```typescript
25
+ memory: new Memory({
26
+ lastMessages: 10,
27
+ });
28
+ ```
29
+
30
+ **Mastra internally:**
31
+
32
+ 1. Creates a `MessageHistory` processor with `limit: 10`
33
+ 2. Adds it to the agent's input processors (runs before the LLM)
34
+ 3. Adds it to the agent's output processors (runs after the LLM)
35
+
36
+ **What it does:**
37
+
38
+ - **Input**: Fetches the last 10 messages from storage and prepends them to the conversation
39
+ - **Output**: Persists new messages to storage after the model responds
40
+
41
+ **Example:**
42
+
43
+ ```typescript copy showLineNumbers
23
44
  import { Agent } from "@mastra/core/agent";
45
+ import { Memory } from "@mastra/memory";
46
+ import { LibSQLStore } from "@mastra/libsql";
24
47
  import { openai } from "@ai-sdk/openai";
25
48
 
26
49
  const agent = new Agent({
27
50
  id: "test-agent",
28
51
  name: "Test Agent",
29
- model: openai("gpt-4o"),
52
+ instructions: "You are a helpful assistant",
53
+ model: 'openai/gpt-4o',
30
54
  memory: new Memory({
31
- processors: [
32
- // Ensure the total tokens from memory don't exceed ~127k
33
- new TokenLimiter(127000),
34
- ],
55
+ storage: new LibSQLStore({
56
+ id: "memory-store",
57
+ url: "file:memory.db",
58
+ }),
59
+ lastMessages: 10, // MessageHistory processor automatically added
35
60
  }),
36
61
  });
37
62
  ```
38
63
 
39
- The `TokenLimiter` uses the `o200k_base` encoding by default (suitable for GPT-4o). You can specify other encodings if needed for different models:
64
+ ### SemanticRecall
40
65
 
41
- ```typescript copy showLineNumbers {6-9}
42
- // Import the encoding you need (e.g., for older OpenAI models)
43
- import cl100k_base from "js-tiktoken/ranks/cl100k_base";
66
+ Retrieves semantically relevant messages based on the current input and creates embeddings for new messages.
44
67
 
45
- const memoryForOlderModel = new Memory({
46
- processors: [
47
- new TokenLimiter({
48
- limit: 16000, // Example limit for a 16k context model
49
- encoding: cl100k_base,
50
- }),
51
- ],
68
+ **When you configure:**
69
+
70
+ ```typescript
71
+ memory: new Memory({
72
+ semanticRecall: { enabled: true },
73
+ vector: myVectorStore,
74
+ embedder: myEmbedder,
52
75
  });
53
76
  ```
54
77
 
55
- See the [OpenAI cookbook](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken#encodings) or [`js-tiktoken` repo](https://github.com/dqbd/tiktoken) for more on encodings.
78
+ **Mastra internally:**
79
+
80
+ 1. Creates a `SemanticRecall` processor
81
+ 2. Adds it to the agent's input processors (runs before the LLM)
82
+ 3. Adds it to the agent's output processors (runs after the LLM)
83
+ 4. Requires both a vector store and embedder to be configured
84
+
85
+ **What it does:**
56
86
 
57
- ### `ToolCallFilter`
87
+ - **Input**: Performs vector similarity search to find relevant past messages and prepends them to the conversation
88
+ - **Output**: Creates embeddings for new messages and stores them in the vector store for future retrieval
58
89
 
59
- This processor removes tool calls from the memory messages sent to the LLM. It saves tokens by excluding potentially verbose tool interactions from the context, which is useful if the details aren't needed for future interactions. It's also useful if you always want your agent to call a specific tool again and not rely on previous tool results in memory.
90
+ **Example:**
60
91
 
61
- ```typescript copy showLineNumbers {5-14}
92
+ ```typescript copy showLineNumbers
93
+ import { Agent } from "@mastra/core/agent";
62
94
  import { Memory } from "@mastra/memory";
63
- import { ToolCallFilter, TokenLimiter } from "@mastra/memory/processors";
95
+ import { LibSQLStore } from "@mastra/libsql";
96
+ import { PineconeVector } from "@mastra/pinecone";
97
+ import { OpenAIEmbedder } from "@mastra/openai";
98
+ import { openai } from "@ai-sdk/openai";
64
99
 
65
- const memoryFilteringTools = new Memory({
66
- processors: [
67
- // Example 1: Remove all tool calls/results
68
- new ToolCallFilter(),
100
+ const agent = new Agent({
101
+ name: "semantic-agent",
102
+ instructions: "You are a helpful assistant with semantic memory",
103
+ model: 'openai/gpt-4o',
104
+ memory: new Memory({
105
+ storage: new LibSQLStore({
106
+ id: "memory-store",
107
+ url: "file:memory.db",
108
+ }),
109
+ vector: new PineconeVector({
110
+ id: "memory-vector",
111
+ apiKey: process.env.PINECONE_API_KEY!,
112
+ environment: "us-east-1",
113
+ }),
114
+ embedder: new OpenAIEmbedder({
115
+ model: "text-embedding-3-small",
116
+ apiKey: process.env.OPENAI_API_KEY!,
117
+ }),
118
+ semanticRecall: { enabled: true }, // SemanticRecall processor automatically added
119
+ }),
120
+ });
121
+ ```
69
122
 
70
- // Example 2: Remove only noisy image generation tool calls/results
71
- new ToolCallFilter({ exclude: ["generateImageTool"] }),
123
+ ### WorkingMemory
72
124
 
73
- // Always place TokenLimiter last
74
- new TokenLimiter(127000),
75
- ],
125
+ Manages working memory state across conversations.
126
+
127
+ **When you configure:**
128
+
129
+ ```typescript
130
+ memory: new Memory({
131
+ workingMemory: { enabled: true },
76
132
  });
77
133
  ```
78
134
 
79
- ## Applying Multiple Processors
135
+ **Mastra internally:**
136
+
137
+ 1. Creates a `WorkingMemory` processor
138
+ 2. Adds it to the agent's input processors (runs before the LLM)
139
+ 3. Requires a storage adapter to be configured
80
140
 
81
- You can chain multiple processors. They execute in the order they appear in the `processors` array. The output of one processor becomes the input for the next.
141
+ **What it does:**
82
142
 
83
- **Order matters!** It's generally best practice to place `TokenLimiter` **last** in the chain. This ensures it operates on the final set of messages after other filtering has occurred, providing the most accurate token limit enforcement.
143
+ - **Input**: Retrieves working memory state for the current thread and prepends it to the conversation
144
+ - **Output**: No output processing
84
145
 
85
- ```typescript copy showLineNumbers {7-14}
146
+ **Example:**
147
+
148
+ ```typescript copy showLineNumbers
149
+ import { Agent } from "@mastra/core/agent";
86
150
  import { Memory } from "@mastra/memory";
87
- import { ToolCallFilter, TokenLimiter } from "@mastra/memory/processors";
88
- // Assume a hypothetical 'PIIFilter' custom processor exists
89
- // import { PIIFilter } from './custom-processors';
90
-
91
- const memoryWithMultipleProcessors = new Memory({
92
- processors: [
93
- // 1. Filter specific tool calls first
94
- new ToolCallFilter({ exclude: ["verboseDebugTool"] }),
95
- // 2. Apply custom filtering (e.g., remove hypothetical PII - use with caution)
96
- // new PIIFilter(),
97
- // 3. Apply token limiting as the final step
98
- new TokenLimiter(127000),
151
+ import { LibSQLStore } from "@mastra/libsql";
152
+ import { openai } from "@ai-sdk/openai";
153
+
154
+ const agent = new Agent({
155
+ name: "working-memory-agent",
156
+ instructions: "You are an assistant with working memory",
157
+ model: 'openai/gpt-4o',
158
+ memory: new Memory({
159
+ storage: new LibSQLStore({
160
+ id: "memory-store",
161
+ url: "file:memory.db",
162
+ }),
163
+ workingMemory: { enabled: true }, // WorkingMemory processor automatically added
164
+ }),
165
+ });
166
+ ```
167
+
168
+ ## Manual Control and Deduplication
169
+
170
+ If you manually add a memory processor to `inputProcessors` or `outputProcessors`, Mastra will **not** automatically add it. This gives you full control over processor ordering:
171
+
172
+ ```typescript copy showLineNumbers
173
+ import { Agent } from "@mastra/core/agent";
174
+ import { Memory } from "@mastra/memory";
175
+ import { MessageHistory } from "@mastra/memory/processors";
176
+ import { TokenLimiter } from "@mastra/core/processors";
177
+ import { LibSQLStore } from "@mastra/libsql";
178
+ import { openai } from "@ai-sdk/openai";
179
+
180
+ // Custom MessageHistory with different configuration
181
+ const customMessageHistory = new MessageHistory({
182
+ storage: new LibSQLStore({ id: "memory-store", url: "file:memory.db" }),
183
+ lastMessages: 20,
184
+ });
185
+
186
+ const agent = new Agent({
187
+ name: "custom-memory-agent",
188
+ instructions: "You are a helpful assistant",
189
+ model: 'openai/gpt-4o',
190
+ memory: new Memory({
191
+ storage: new LibSQLStore({ id: "memory-store", url: "file:memory.db" }),
192
+ lastMessages: 10, // This would normally add MessageHistory(10)
193
+ }),
194
+ inputProcessors: [
195
+ customMessageHistory, // Your custom one is used instead
196
+ new TokenLimiter({ limit: 4000 }), // Runs after your custom MessageHistory
99
197
  ],
100
198
  });
101
199
  ```
102
200
 
103
- ## Creating Custom Processors
201
+ ## Processor Execution Order
202
+
203
+ Understanding the execution order is important when combining guardrails with memory:
204
+
205
+ ### Input Processors
206
+
207
+ ```
208
+ [Memory Processors] → [Your inputProcessors]
209
+ ```
210
+
211
+ 1. **Memory processors run FIRST**: `WorkingMemory`, `MessageHistory`, `SemanticRecall`
212
+ 2. **Your input processors run AFTER**: guardrails, filters, validators
213
+
214
+ This means memory loads conversation history before your processors can validate or filter the input.
215
+
216
+ ### Output Processors
217
+
218
+ ```
219
+ [Your outputProcessors] → [Memory Processors]
220
+ ```
221
+
222
+ 1. **Your output processors run FIRST**: guardrails, filters, validators
223
+ 2. **Memory processors run AFTER**: `SemanticRecall` (embeddings), `MessageHistory` (persistence)
224
+
225
+ This ordering is designed to be **safe by default**: if your output guardrail calls `abort()`, the memory processors never run and **no messages are saved**.
104
226
 
105
- You can create custom logic by extending the base `MemoryProcessor` class.
227
+ ## Guardrails and Memory
106
228
 
107
- ```typescript copy showLineNumbers {5-20,24-27}
108
- import { Memory, MemoryProcessorOpts, MemoryProcessor } from "@mastra/memory";
109
- import { CoreMessage } from "@mastra/core/llm";
229
+ The default execution order provides safe guardrail behavior:
110
230
 
111
- class ConversationOnlyFilter extends MemoryProcessor {
112
- constructor() {
113
- // Provide a name for easier debugging if needed
114
- super({ name: "ConversationOnlyFilter" });
115
- }
231
+ ### Output guardrails (recommended)
116
232
 
117
- process(
118
- messages: CoreMessage[],
119
- _opts: MemoryProcessorOpts = {}, // Options passed during memory retrieval, rarely needed here
120
- ): CoreMessage[] {
121
- // Filter messages based on role
122
- return messages.filter(
123
- (msg) => msg.role === "user" || msg.role === "assistant",
233
+ Output guardrails run **before** memory processors save messages. If a guardrail aborts:
234
+
235
+ - The tripwire is triggered
236
+ - Memory processors are skipped
237
+ - **No messages are persisted to storage**
238
+
239
+ ```typescript copy showLineNumbers
240
+ import { Agent } from "@mastra/core/agent";
241
+ import { Memory } from "@mastra/memory";
242
+ import { openai } from "@ai-sdk/openai";
243
+
244
+ // Output guardrail that blocks inappropriate content
245
+ const contentBlocker = {
246
+ id: "content-blocker",
247
+ processOutputResult: async ({ messages, abort }) => {
248
+ const hasInappropriateContent = messages.some((msg) =>
249
+ containsBadContent(msg)
124
250
  );
125
- }
251
+ if (hasInappropriateContent) {
252
+ abort("Content blocked by guardrail");
253
+ }
254
+ return messages;
255
+ },
256
+ };
257
+
258
+ const agent = new Agent({
259
+ name: "safe-agent",
260
+ instructions: "You are a helpful assistant",
261
+ model: 'openai/gpt-4o',
262
+ memory: new Memory({ lastMessages: 10 }),
263
+ // Your guardrail runs BEFORE memory saves
264
+ outputProcessors: [contentBlocker],
265
+ });
266
+
267
+ // If the guardrail aborts, nothing is saved to memory
268
+ const result = await agent.generate("Hello");
269
+ if (result.tripwire) {
270
+ console.log("Blocked:", result.tripwireReason);
271
+ // Memory is empty - no messages were persisted
126
272
  }
273
+ ```
127
274
 
128
- // Use the custom processor
129
- const memoryWithCustomFilter = new Memory({
130
- processors: [
131
- new ConversationOnlyFilter(),
132
- new TokenLimiter(127000), // Still apply token limiting
133
- ],
275
+ ### Input guardrails
276
+
277
+ Input guardrails run **after** memory processors load history. If a guardrail aborts:
278
+
279
+ - The tripwire is triggered
280
+ - The LLM is never called
281
+ - Output processors (including memory persistence) are skipped
282
+ - **No messages are persisted to storage**
283
+
284
+ ```typescript copy showLineNumbers
285
+ // Input guardrail that validates user input
286
+ const inputValidator = {
287
+ id: "input-validator",
288
+ processInput: async ({ messages, abort }) => {
289
+ const lastUserMessage = messages.findLast((m) => m.role === "user");
290
+ if (isInvalidInput(lastUserMessage)) {
291
+ abort("Invalid input detected");
292
+ }
293
+ return messages;
294
+ },
295
+ };
296
+
297
+ const agent = new Agent({
298
+ name: "validated-agent",
299
+ instructions: "You are a helpful assistant",
300
+ model: 'openai/gpt-4o',
301
+ memory: new Memory({ lastMessages: 10 }),
302
+ // Your guardrail runs AFTER memory loads history
303
+ inputProcessors: [inputValidator],
134
304
  });
135
305
  ```
136
306
 
307
+ ### Summary
308
+
309
+ | Guardrail Type | When it runs | If it aborts |
310
+ | -------------- | ------------ | ------------ |
311
+ | Input | After memory loads history | LLM not called, nothing saved |
312
+ | Output | Before memory saves | Nothing saved to storage |
313
+
314
+ Both scenarios are safe - guardrails prevent inappropriate content from being persisted to memory
315
+
316
+ ## Related documentation
317
+
318
+ - [Processors](/docs/v1/agents/processors) - General processor concepts and custom processor creation
319
+ - [Guardrails](/docs/v1/agents/guardrails) - Security and validation processors
320
+ - [Memory Overview](/docs/v1/memory/overview) - Memory types and configuration
321
+
137
322
  When creating custom processors avoid mutating the input `messages` array or its objects directly.
@@ -30,16 +30,15 @@ After getting a response from the LLM, all new messages (user, assistant, and to
30
30
 
31
31
  Semantic recall is enabled by default, so if you give your agent memory it will be included:
32
32
 
33
- ```typescript {9}
33
+ ```typescript {8}
34
34
  import { Agent } from "@mastra/core/agent";
35
35
  import { Memory } from "@mastra/memory";
36
- import { openai } from "@ai-sdk/openai";
37
36
 
38
37
  const agent = new Agent({
39
38
  id: "support-agent",
40
39
  name: "SupportAgent",
41
40
  instructions: "You are a helpful support agent.",
42
- model: openai("gpt-4o"),
41
+ model: "openai/gpt-5.1",
43
42
  memory: new Memory(),
44
43
  });
45
44
  ```
@@ -109,11 +108,12 @@ The simplest way is to use a `provider/model` string with autocomplete support:
109
108
  ```ts {7}
110
109
  import { Memory } from "@mastra/memory";
111
110
  import { Agent } from "@mastra/core/agent";
111
+ import { ModelRouterEmbeddingModel } from "@mastra/core/llm";
112
112
 
113
113
  const agent = new Agent({
114
114
  memory: new Memory({
115
115
  // ... other memory options
116
- embedder: "openai/text-embedding-3-small", // TypeScript autocomplete supported
116
+ embedder: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
117
117
  }),
118
118
  });
119
119
  ```
@@ -129,15 +129,15 @@ The model router automatically handles API key detection from environment variab
129
129
 
130
130
  You can also use AI SDK embedding models directly:
131
131
 
132
- ```ts {3,8}
132
+ ```ts {2,7}
133
133
  import { Memory } from "@mastra/memory";
134
134
  import { Agent } from "@mastra/core/agent";
135
- import { openai } from "@ai-sdk/openai";
135
+ import { ModelRouterEmbeddingModel } from "@mastra/core/llm";
136
136
 
137
137
  const agent = new Agent({
138
138
  memory: new Memory({
139
139
  // ... other memory options
140
- embedder: openai.embedding("text-embedding-3-small"),
140
+ embedder: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
141
141
  }),
142
142
  });
143
143
  ```
@@ -28,7 +28,6 @@ To add LibSQL memory to an agent use the `Memory` class and create a new `storag
28
28
  ```typescript title="src/mastra/agents/example-libsql-agent.ts" showLineNumbers copy
29
29
  import { Memory } from "@mastra/memory";
30
30
  import { Agent } from "@mastra/core/agent";
31
- import { openai } from "@ai-sdk/openai";
32
31
  import { LibSQLStore } from "@mastra/libsql";
33
32
 
34
33
  export const libsqlAgent = new Agent({
@@ -36,7 +35,7 @@ export const libsqlAgent = new Agent({
36
35
  name: "LibSQL Agent",
37
36
  instructions:
38
37
  "You are an AI agent with the ability to automatically recall memories from previous interactions.",
39
- model: openai("gpt-4o"),
38
+ model: "openai/gpt-5.1",
40
39
  memory: new Memory({
41
40
  storage: new LibSQLStore({
42
41
  id: 'libsql-agent-storage',
@@ -64,7 +63,6 @@ Add the following to your agent:
64
63
  ```typescript title="src/mastra/agents/example-libsql-agent.ts" showLineNumbers copy
65
64
  import { Memory } from "@mastra/memory";
66
65
  import { Agent } from "@mastra/core/agent";
67
- import { openai } from "@ai-sdk/openai";
68
66
  import { LibSQLStore, LibSQLVector } from "@mastra/libsql";
69
67
  import { fastembed } from "@mastra/fastembed";
70
68
 
@@ -73,7 +71,7 @@ export const libsqlAgent = new Agent({
73
71
  name: "LibSQL Agent",
74
72
  instructions:
75
73
  "You are an AI agent with the ability to automatically recall memories from previous interactions.",
76
- model: openai("gpt-4o"),
74
+ model: "openai/gpt-5.1",
77
75
  memory: new Memory({
78
76
  storage: new LibSQLStore({
79
77
  id: 'libsql-agent-storage',
@@ -30,7 +30,6 @@ To add MongoDB memory to an agent use the `Memory` class and create a new `stora
30
30
  ```typescript title="src/mastra/agents/example-mongodb-agent.ts" showLineNumbers copy
31
31
  import { Memory } from "@mastra/memory";
32
32
  import { Agent } from "@mastra/core/agent";
33
- import { openai } from "@ai-sdk/openai";
34
33
  import { MongoDBStore } from "@mastra/mongodb";
35
34
 
36
35
  export const mongodbAgent = new Agent({
@@ -38,7 +37,7 @@ export const mongodbAgent = new Agent({
38
37
  name: "mongodb-agent",
39
38
  instructions:
40
39
  "You are an AI agent with the ability to automatically recall memories from previous interactions.",
41
- model: openai("gpt-4o"),
40
+ model: "openai/gpt-5.1",
42
41
  memory: new Memory({
43
42
  storage: new MongoDBStore({
44
43
  url: process.env.MONGODB_URI!,
@@ -71,7 +70,6 @@ Add the following to your agent:
71
70
  ```typescript title="src/mastra/agents/example-mongodb-agent.ts" showLineNumbers copy
72
71
  import { Memory } from "@mastra/memory";
73
72
  import { Agent } from "@mastra/core/agent";
74
- import { openai } from "@ai-sdk/openai";
75
73
  import { MongoDBStore, MongoDBVector } from "@mastra/mongodb";
76
74
  import { fastembed } from "@mastra/fastembed";
77
75
 
@@ -80,7 +78,7 @@ export const mongodbAgent = new Agent({
80
78
  name: "mongodb-agent",
81
79
  instructions:
82
80
  "You are an AI agent with the ability to automatically recall memories from previous interactions.",
83
- model: openai("gpt-4o"),
81
+ model: "openai/gpt-5.1",
84
82
  memory: new Memory({
85
83
  storage: new MongoDBStore({
86
84
  url: process.env.MONGODB_URI!,
@@ -29,7 +29,6 @@ To add PostgreSQL memory to an agent use the `Memory` class and create a new `st
29
29
  ```typescript title="src/mastra/agents/example-pg-agent.ts" showLineNumbers copy
30
30
  import { Memory } from "@mastra/memory";
31
31
  import { Agent } from "@mastra/core/agent";
32
- import { openai } from "@ai-sdk/openai";
33
32
  import { PostgresStore } from "@mastra/pg";
34
33
 
35
34
  export const pgAgent = new Agent({
@@ -37,7 +36,7 @@ export const pgAgent = new Agent({
37
36
  name: "PG Agent",
38
37
  instructions:
39
38
  "You are an AI agent with the ability to automatically recall memories from previous interactions.",
40
- model: openai("gpt-4o"),
39
+ model: "openai/gpt-5.1",
41
40
  memory: new Memory({
42
41
  storage: new PostgresStore({
43
42
  id: 'pg-agent-storage',
@@ -65,7 +64,6 @@ Add the following to your agent:
65
64
  ```typescript title="src/mastra/agents/example-pg-agent.ts" showLineNumbers copy
66
65
  import { Memory } from "@mastra/memory";
67
66
  import { Agent } from "@mastra/core/agent";
68
- import { openai } from "@ai-sdk/openai";
69
67
  import { PostgresStore, PgVector } from "@mastra/pg";
70
68
  import { fastembed } from "@mastra/fastembed";
71
69
 
@@ -74,7 +72,7 @@ export const pgAgent = new Agent({
74
72
  name: "PG Agent",
75
73
  instructions:
76
74
  "You are an AI agent with the ability to automatically recall memories from previous interactions.",
77
- model: openai("gpt-4o"),
75
+ model: "openai/gpt-5.1",
78
76
  memory: new Memory({
79
77
  storage: new PostgresStore({
80
78
  id: 'pg-agent-storage',
@@ -34,7 +34,6 @@ To add Upstash memory to an agent use the `Memory` class and create a new `stora
34
34
  ```typescript title="src/mastra/agents/example-upstash-agent.ts" showLineNumbers copy
35
35
  import { Memory } from "@mastra/memory";
36
36
  import { Agent } from "@mastra/core/agent";
37
- import { openai } from "@ai-sdk/openai";
38
37
  import { UpstashStore } from "@mastra/upstash";
39
38
 
40
39
  export const upstashAgent = new Agent({
@@ -42,7 +41,7 @@ export const upstashAgent = new Agent({
42
41
  name: "Upstash Agent",
43
42
  instructions:
44
43
  "You are an AI agent with the ability to automatically recall memories from previous interactions.",
45
- model: openai("gpt-4o"),
44
+ model: "openai/gpt-5.1",
46
45
  memory: new Memory({
47
46
  storage: new UpstashStore({
48
47
  id: 'upstash-agent-storage',
@@ -71,7 +70,6 @@ Add the following to your agent:
71
70
  ```typescript title="src/mastra/agents/example-upstash-agent.ts" showLineNumbers copy
72
71
  import { Memory } from "@mastra/memory";
73
72
  import { Agent } from "@mastra/core/agent";
74
- import { openai } from "@ai-sdk/openai";
75
73
  import { UpstashStore, UpstashVector } from "@mastra/upstash";
76
74
  import { fastembed } from "@mastra/fastembed";
77
75
 
@@ -80,7 +78,7 @@ export const upstashAgent = new Agent({
80
78
  name: "Upstash Agent",
81
79
  instructions:
82
80
  "You are an AI agent with the ability to automatically recall memories from previous interactions.",
83
- model: openai("gpt-4o"),
81
+ model: "openai/gpt-5.1",
84
82
  memory: new Memory({
85
83
  storage: new UpstashStore({
86
84
  id: 'upstash-agent-storage',
@@ -56,7 +56,7 @@ export const testAgent = new Agent({
56
56
  options: {
57
57
  threads: {
58
58
  generateTitle: {
59
- model: openai("gpt-4.1-nano"),
59
+ model: "openai/gpt-4.1-nano",
60
60
  instructions:
61
61
  "Generate a concise title based on the user's first message",
62
62
  },
@@ -80,8 +80,8 @@ export const testAgent = new Agent({
80
80
  model: ({ requestContext }) => {
81
81
  const userTier = requestContext.get("userTier");
82
82
  return userTier === "premium"
83
- ? openai("gpt-4.1")
84
- : openai("gpt-4.1-nano");
83
+ ? "openai/gpt-5.1"
84
+ : "openai/gpt-4.1-nano";
85
85
  },
86
86
  instructions: ({ requestContext }) => {
87
87
  const language = requestContext.get("userLanguage") || "English";
@@ -24,17 +24,16 @@ Working memory can persist at two different scopes:
24
24
 
25
25
  Here's a minimal example of setting up an agent with working memory:
26
26
 
27
- ```typescript {12-15}
27
+ ```typescript {11-14}
28
28
  import { Agent } from "@mastra/core/agent";
29
29
  import { Memory } from "@mastra/memory";
30
- import { openai } from "@ai-sdk/openai";
31
30
 
32
31
  // Create agent with working memory enabled
33
32
  const agent = new Agent({
34
33
  id: "personal-assistant",
35
34
  name: "PersonalAssistant",
36
35
  instructions: "You are a helpful personal assistant.",
37
- model: openai("gpt-4o"),
36
+ model: "openai/gpt-5.1",
38
37
  memory: new Memory({
39
38
  options: {
40
39
  workingMemory: {
@@ -268,10 +267,18 @@ When a schema is provided, the agent receives the working memory as a JSON objec
268
267
  }
269
268
  ```
270
269
 
270
+ ### Merge Semantics for Schema-Based Memory
271
+
272
+ Schema-based working memory uses **merge semantics**, meaning the agent only needs to include fields it wants to add or update. Existing fields are preserved automatically.
273
+
274
+ - **Object fields are deep merged:** Only provided fields are updated; others remain unchanged
275
+ - **Set a field to `null` to delete it:** This explicitly removes the field from memory
276
+ - **Arrays are replaced entirely:** When an array field is provided, it replaces the existing array (arrays are not merged element-by-element)
277
+
271
278
  ## Choosing Between Template and Schema
272
279
 
273
- - Use a **template** (Markdown) if you want the agent to maintain memory as a free-form text block, such as a user profile or scratchpad.
274
- - Use a **schema** if you need structured, type-safe data that can be validated and programmatically accessed as JSON.
280
+ - Use a **template** (Markdown) if you want the agent to maintain memory as a free-form text block, such as a user profile or scratchpad. Templates use **replace semantics** — the agent must provide the complete memory content on each update.
281
+ - Use a **schema** if you need structured, type-safe data that can be validated and programmatically accessed as JSON. Schemas use **merge semantics** — the agent only provides fields to update, and existing fields are preserved.
275
282
  - Only one mode can be active at a time: setting both `template` and `schema` is not supported.
276
283
 
277
284
  ## Example: Multi-step Retention
@@ -378,6 +385,6 @@ await memory.updateWorkingMemory({
378
385
 
379
386
  ## Examples
380
387
 
381
- - [Working memory with template](/examples/v1/memory/working-memory-template)
382
- - [Working memory with schema](/examples/v1/memory/working-memory-schema)
388
+ - [Working memory with template](https://github.com/mastra-ai/mastra/tree/main/examples/memory-with-template)
389
+ - [Working memory with schema](https://github.com/mastra-ai/mastra/tree/main/examples/memory-with-schema)
383
390
  - [Per-resource working memory](https://github.com/mastra-ai/mastra/tree/main/examples/memory-per-resource-example) - Complete example showing resource-scoped memory persistence