@mastra/mcp-docs-server 1.0.0-beta.4 → 1.0.0-beta.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (286) hide show
  1. package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +201 -1
  2. package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +201 -1
  3. package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +201 -1
  4. package/.docs/organized/changelogs/%40mastra%2Fastra.md +201 -1
  5. package/.docs/organized/changelogs/%40mastra%2Fchroma.md +201 -1
  6. package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +201 -1
  7. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +201 -1
  8. package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +201 -1
  9. package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +201 -1
  10. package/.docs/organized/changelogs/%40mastra%2Fconvex.md +29 -0
  11. package/.docs/organized/changelogs/%40mastra%2Fcore.md +411 -211
  12. package/.docs/organized/changelogs/%40mastra%2Fcouchbase.md +201 -1
  13. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +201 -1
  14. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +201 -1
  15. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +201 -1
  16. package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +201 -1
  17. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +201 -1
  18. package/.docs/organized/changelogs/%40mastra%2Fduckdb.md +42 -0
  19. package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +201 -1
  20. package/.docs/organized/changelogs/%40mastra%2Felasticsearch.md +52 -0
  21. package/.docs/organized/changelogs/%40mastra%2Fevals.md +201 -1
  22. package/.docs/organized/changelogs/%40mastra%2Flance.md +201 -1
  23. package/.docs/organized/changelogs/%40mastra%2Flibsql.md +201 -1
  24. package/.docs/organized/changelogs/%40mastra%2Floggers.md +201 -1
  25. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +201 -1
  26. package/.docs/organized/changelogs/%40mastra%2Fmcp-registry-registry.md +201 -1
  27. package/.docs/organized/changelogs/%40mastra%2Fmcp.md +201 -1
  28. package/.docs/organized/changelogs/%40mastra%2Fmemory.md +201 -1
  29. package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +201 -1
  30. package/.docs/organized/changelogs/%40mastra%2Fmssql.md +201 -1
  31. package/.docs/organized/changelogs/%40mastra%2Fopensearch.md +201 -1
  32. package/.docs/organized/changelogs/%40mastra%2Fpg.md +201 -1
  33. package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +201 -1
  34. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +201 -1
  35. package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +201 -1
  36. package/.docs/organized/changelogs/%40mastra%2Frag.md +201 -1
  37. package/.docs/organized/changelogs/%40mastra%2Freact.md +89 -1
  38. package/.docs/organized/changelogs/%40mastra%2Fs3vectors.md +9 -0
  39. package/.docs/organized/changelogs/%40mastra%2Fschema-compat.md +42 -0
  40. package/.docs/organized/changelogs/%40mastra%2Fserver.md +201 -1
  41. package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +201 -1
  42. package/.docs/organized/changelogs/%40mastra%2Fupstash.md +201 -1
  43. package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +201 -1
  44. package/.docs/organized/changelogs/%40mastra%2Fvoice-azure.md +201 -1
  45. package/.docs/organized/changelogs/%40mastra%2Fvoice-cloudflare.md +201 -1
  46. package/.docs/organized/changelogs/%40mastra%2Fvoice-deepgram.md +201 -1
  47. package/.docs/organized/changelogs/%40mastra%2Fvoice-elevenlabs.md +201 -1
  48. package/.docs/organized/changelogs/%40mastra%2Fvoice-gladia.md +92 -1
  49. package/.docs/organized/changelogs/%40mastra%2Fvoice-google-gemini-live.md +67 -1
  50. package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +201 -1
  51. package/.docs/organized/changelogs/%40mastra%2Fvoice-murf.md +201 -1
  52. package/.docs/organized/changelogs/%40mastra%2Fvoice-openai-realtime.md +201 -1
  53. package/.docs/organized/changelogs/%40mastra%2Fvoice-openai.md +201 -1
  54. package/.docs/organized/changelogs/%40mastra%2Fvoice-playai.md +201 -1
  55. package/.docs/organized/changelogs/%40mastra%2Fvoice-sarvam.md +201 -1
  56. package/.docs/organized/changelogs/%40mastra%2Fvoice-speechify.md +201 -1
  57. package/.docs/organized/changelogs/create-mastra.md +201 -1
  58. package/.docs/organized/changelogs/mastra.md +201 -1
  59. package/.docs/organized/code-examples/agui.md +1 -0
  60. package/.docs/organized/code-examples/ai-sdk-v5.md +1 -0
  61. package/.docs/organized/code-examples/mcp-server-adapters.md +721 -0
  62. package/.docs/organized/code-examples/memory-with-processors.md +1 -1
  63. package/.docs/organized/code-examples/quick-start.md +1 -1
  64. package/.docs/organized/code-examples/server-app-access.md +342 -0
  65. package/.docs/raw/agents/adding-voice.mdx +7 -10
  66. package/.docs/raw/agents/agent-approval.mdx +189 -0
  67. package/.docs/raw/agents/guardrails.mdx +26 -23
  68. package/.docs/raw/agents/networks.mdx +2 -2
  69. package/.docs/raw/agents/overview.mdx +27 -62
  70. package/.docs/raw/agents/processors.mdx +279 -0
  71. package/.docs/raw/agents/using-tools.mdx +4 -5
  72. package/.docs/raw/course/01-first-agent/05-running-playground.md +5 -5
  73. package/.docs/raw/course/01-first-agent/09-testing-your-agent.md +3 -3
  74. package/.docs/raw/course/01-first-agent/13-testing-your-tool.md +3 -3
  75. package/.docs/raw/course/01-first-agent/17-testing-memory.md +2 -2
  76. package/.docs/raw/course/04-workflows/07-using-playground.md +1 -1
  77. package/.docs/raw/deployment/building-mastra.mdx +1 -1
  78. package/.docs/raw/deployment/cloud-providers/amazon-ec2.mdx +1 -1
  79. package/.docs/raw/deployment/cloud-providers/aws-lambda.mdx +1 -1
  80. package/.docs/raw/deployment/cloud-providers/azure-app-services.mdx +1 -1
  81. package/.docs/raw/deployment/cloud-providers/digital-ocean.mdx +1 -1
  82. package/.docs/raw/deployment/cloud-providers/index.mdx +20 -27
  83. package/.docs/raw/deployment/cloud-providers/netlify-deployer.mdx +44 -13
  84. package/.docs/raw/deployment/mastra-cloud/observability.mdx +19 -17
  85. package/.docs/raw/deployment/mastra-cloud/setting-up.mdx +1 -1
  86. package/.docs/raw/deployment/overview.mdx +2 -2
  87. package/.docs/raw/deployment/web-framework.mdx +5 -5
  88. package/.docs/raw/evals/custom-scorers.mdx +3 -5
  89. package/.docs/raw/evals/overview.mdx +2 -3
  90. package/.docs/raw/evals/running-in-ci.mdx +0 -2
  91. package/.docs/raw/{guides/guide → getting-started}/manual-install.mdx +2 -2
  92. package/.docs/raw/getting-started/project-structure.mdx +1 -1
  93. package/.docs/raw/getting-started/start.mdx +72 -0
  94. package/.docs/raw/getting-started/studio.mdx +1 -1
  95. package/.docs/raw/{frameworks/agentic-uis/ai-sdk.mdx → guides/build-your-ui/ai-sdk-ui.mdx} +113 -11
  96. package/.docs/raw/{frameworks/web-frameworks → guides/getting-started}/astro.mdx +23 -25
  97. package/.docs/raw/{frameworks/servers → guides/getting-started}/express.mdx +3 -4
  98. package/.docs/raw/guides/{quickstarts/nextjs.mdx → getting-started/next-js.mdx} +11 -11
  99. package/.docs/raw/guides/{quickstarts/standalone-server.mdx → getting-started/quickstart.mdx} +7 -7
  100. package/.docs/raw/{frameworks/web-frameworks → guides/getting-started}/sveltekit.mdx +23 -25
  101. package/.docs/raw/{frameworks/web-frameworks → guides/getting-started}/vite-react.mdx +7 -7
  102. package/.docs/raw/guides/guide/ai-recruiter.mdx +2 -3
  103. package/.docs/raw/guides/guide/chef-michel.mdx +2 -3
  104. package/.docs/raw/guides/guide/notes-mcp-server.mdx +2 -2
  105. package/.docs/raw/guides/guide/research-assistant.mdx +7 -8
  106. package/.docs/raw/guides/guide/stock-agent.mdx +4 -6
  107. package/.docs/raw/guides/guide/web-search.mdx +12 -10
  108. package/.docs/raw/guides/guide/whatsapp-chat-bot.mdx +421 -0
  109. package/.docs/raw/guides/index.mdx +3 -35
  110. package/.docs/raw/guides/migrations/agentnetwork.mdx +4 -4
  111. package/.docs/raw/guides/migrations/ai-sdk-v4-to-v5.mdx +1 -1
  112. package/.docs/raw/guides/migrations/upgrade-to-v1/agent.mdx +40 -0
  113. package/.docs/raw/guides/migrations/upgrade-to-v1/tools.mdx +5 -0
  114. package/.docs/raw/guides/migrations/upgrade-to-v1/workflows.mdx +51 -0
  115. package/.docs/raw/guides/migrations/vnext-to-standard-apis.mdx +2 -2
  116. package/.docs/raw/index.mdx +2 -2
  117. package/.docs/raw/mcp/overview.mdx +3 -5
  118. package/.docs/raw/memory/memory-processors.mdx +264 -79
  119. package/.docs/raw/memory/semantic-recall.mdx +7 -7
  120. package/.docs/raw/memory/storage/memory-with-libsql.mdx +2 -4
  121. package/.docs/raw/memory/storage/memory-with-mongodb.mdx +2 -4
  122. package/.docs/raw/memory/storage/memory-with-pg.mdx +2 -4
  123. package/.docs/raw/memory/storage/memory-with-upstash.mdx +2 -4
  124. package/.docs/raw/memory/threads-and-resources.mdx +3 -3
  125. package/.docs/raw/memory/working-memory.mdx +14 -7
  126. package/.docs/raw/{logging.mdx → observability/logging.mdx} +1 -1
  127. package/.docs/raw/observability/overview.mdx +2 -3
  128. package/.docs/raw/observability/tracing/bridges/otel.mdx +176 -0
  129. package/.docs/raw/observability/tracing/exporters/arize.mdx +17 -0
  130. package/.docs/raw/observability/tracing/exporters/braintrust.mdx +19 -0
  131. package/.docs/raw/observability/tracing/exporters/langfuse.mdx +20 -0
  132. package/.docs/raw/observability/tracing/exporters/langsmith.mdx +12 -0
  133. package/.docs/raw/observability/tracing/exporters/otel.mdx +25 -5
  134. package/.docs/raw/observability/tracing/exporters/posthog.mdx +107 -0
  135. package/.docs/raw/observability/tracing/overview.mdx +74 -8
  136. package/.docs/raw/observability/tracing/processors/sensitive-data-filter.mdx +0 -1
  137. package/.docs/raw/rag/chunking-and-embedding.mdx +16 -17
  138. package/.docs/raw/rag/overview.mdx +3 -2
  139. package/.docs/raw/rag/retrieval.mdx +43 -38
  140. package/.docs/raw/rag/vector-databases.mdx +93 -2
  141. package/.docs/raw/reference/agents/agent.mdx +7 -10
  142. package/.docs/raw/reference/agents/generate.mdx +55 -6
  143. package/.docs/raw/reference/agents/generateLegacy.mdx +2 -2
  144. package/.docs/raw/reference/agents/getLLM.mdx +1 -1
  145. package/.docs/raw/reference/agents/network.mdx +46 -3
  146. package/.docs/raw/reference/cli/mastra.mdx +2 -1
  147. package/.docs/raw/reference/client-js/agents.mdx +3 -3
  148. package/.docs/raw/reference/client-js/memory.mdx +43 -0
  149. package/.docs/raw/reference/client-js/workflows.mdx +92 -63
  150. package/.docs/raw/reference/core/getLogger.mdx +1 -1
  151. package/.docs/raw/reference/core/listLogs.mdx +1 -1
  152. package/.docs/raw/reference/core/listLogsByRunId.mdx +1 -1
  153. package/.docs/raw/reference/core/mastra-model-gateway.mdx +5 -19
  154. package/.docs/raw/reference/core/setLogger.mdx +1 -1
  155. package/.docs/raw/reference/core/setTelemetry.mdx +1 -1
  156. package/.docs/raw/reference/deployer/netlify.mdx +1 -2
  157. package/.docs/raw/reference/evals/answer-relevancy.mdx +28 -98
  158. package/.docs/raw/reference/evals/answer-similarity.mdx +12 -258
  159. package/.docs/raw/reference/evals/bias.mdx +29 -87
  160. package/.docs/raw/reference/evals/completeness.mdx +31 -90
  161. package/.docs/raw/reference/evals/content-similarity.mdx +28 -88
  162. package/.docs/raw/reference/evals/context-precision.mdx +28 -130
  163. package/.docs/raw/reference/evals/context-relevance.mdx +11 -11
  164. package/.docs/raw/reference/evals/faithfulness.mdx +28 -101
  165. package/.docs/raw/reference/evals/hallucination.mdx +28 -103
  166. package/.docs/raw/reference/evals/keyword-coverage.mdx +28 -107
  167. package/.docs/raw/reference/evals/noise-sensitivity.mdx +11 -11
  168. package/.docs/raw/reference/evals/prompt-alignment.mdx +15 -15
  169. package/.docs/raw/reference/evals/scorer-utils.mdx +362 -0
  170. package/.docs/raw/reference/evals/textual-difference.mdx +27 -100
  171. package/.docs/raw/reference/evals/tone-consistency.mdx +25 -98
  172. package/.docs/raw/reference/evals/tool-call-accuracy.mdx +7 -7
  173. package/.docs/raw/reference/evals/toxicity.mdx +29 -92
  174. package/.docs/raw/reference/index.mdx +1 -0
  175. package/.docs/raw/reference/memory/memory-class.mdx +5 -7
  176. package/.docs/raw/reference/observability/tracing/bridges/otel.mdx +150 -0
  177. package/.docs/raw/reference/observability/tracing/configuration.mdx +0 -4
  178. package/.docs/raw/reference/observability/tracing/exporters/arize.mdx +4 -0
  179. package/.docs/raw/reference/observability/tracing/exporters/langsmith.mdx +17 -1
  180. package/.docs/raw/reference/observability/tracing/exporters/otel.mdx +6 -0
  181. package/.docs/raw/reference/observability/tracing/exporters/posthog.mdx +132 -0
  182. package/.docs/raw/reference/observability/tracing/instances.mdx +0 -4
  183. package/.docs/raw/reference/observability/tracing/interfaces.mdx +29 -4
  184. package/.docs/raw/reference/observability/tracing/spans.mdx +0 -4
  185. package/.docs/raw/reference/processors/batch-parts-processor.mdx +1 -1
  186. package/.docs/raw/reference/processors/language-detector.mdx +10 -3
  187. package/.docs/raw/reference/processors/message-history-processor.mdx +131 -0
  188. package/.docs/raw/reference/processors/moderation-processor.mdx +12 -5
  189. package/.docs/raw/reference/processors/pii-detector.mdx +12 -5
  190. package/.docs/raw/reference/processors/processor-interface.mdx +502 -0
  191. package/.docs/raw/reference/processors/prompt-injection-detector.mdx +10 -3
  192. package/.docs/raw/reference/processors/semantic-recall-processor.mdx +197 -0
  193. package/.docs/raw/reference/processors/system-prompt-scrubber.mdx +3 -4
  194. package/.docs/raw/reference/processors/token-limiter-processor.mdx +2 -2
  195. package/.docs/raw/reference/processors/tool-call-filter.mdx +125 -0
  196. package/.docs/raw/reference/processors/unicode-normalizer.mdx +1 -1
  197. package/.docs/raw/reference/processors/working-memory-processor.mdx +221 -0
  198. package/.docs/raw/reference/rag/embeddings.mdx +5 -5
  199. package/.docs/raw/reference/rag/rerank.mdx +1 -2
  200. package/.docs/raw/reference/rag/rerankWithScorer.mdx +0 -1
  201. package/.docs/raw/reference/storage/cloudflare-d1.mdx +37 -0
  202. package/.docs/raw/reference/storage/convex.mdx +164 -0
  203. package/.docs/raw/reference/storage/lance.mdx +33 -0
  204. package/.docs/raw/reference/storage/libsql.mdx +37 -0
  205. package/.docs/raw/reference/storage/mongodb.mdx +39 -0
  206. package/.docs/raw/reference/storage/mssql.mdx +37 -0
  207. package/.docs/raw/reference/storage/postgresql.mdx +37 -0
  208. package/.docs/raw/reference/streaming/ChunkType.mdx +1 -1
  209. package/.docs/raw/reference/streaming/agents/stream.mdx +64 -2
  210. package/.docs/raw/reference/streaming/workflows/observeStream.mdx +7 -9
  211. package/.docs/raw/reference/streaming/workflows/{resumeStreamVNext.mdx → resumeStream.mdx} +51 -11
  212. package/.docs/raw/reference/streaming/workflows/stream.mdx +83 -24
  213. package/.docs/raw/reference/templates/overview.mdx +1 -4
  214. package/.docs/raw/reference/tools/client.mdx +1 -2
  215. package/.docs/raw/reference/tools/create-tool.mdx +132 -0
  216. package/.docs/raw/reference/tools/graph-rag-tool.mdx +5 -5
  217. package/.docs/raw/reference/tools/mcp-client.mdx +76 -21
  218. package/.docs/raw/reference/tools/mcp-server.mdx +1 -2
  219. package/.docs/raw/reference/tools/vector-query-tool.mdx +14 -15
  220. package/.docs/raw/reference/vectors/chroma.mdx +81 -1
  221. package/.docs/raw/reference/vectors/convex.mdx +429 -0
  222. package/.docs/raw/reference/vectors/couchbase.mdx +24 -17
  223. package/.docs/raw/reference/vectors/duckdb.mdx +462 -0
  224. package/.docs/raw/reference/vectors/elasticsearch.mdx +310 -0
  225. package/.docs/raw/reference/vectors/lance.mdx +38 -22
  226. package/.docs/raw/reference/vectors/libsql.mdx +35 -2
  227. package/.docs/raw/reference/vectors/mongodb.mdx +35 -2
  228. package/.docs/raw/reference/vectors/opensearch.mdx +37 -16
  229. package/.docs/raw/reference/vectors/pg.mdx +43 -36
  230. package/.docs/raw/reference/vectors/pinecone.mdx +48 -1
  231. package/.docs/raw/reference/vectors/qdrant.mdx +36 -1
  232. package/.docs/raw/reference/vectors/turbopuffer.mdx +74 -0
  233. package/.docs/raw/reference/voice/google.mdx +159 -20
  234. package/.docs/raw/reference/voice/openai-realtime.mdx +2 -2
  235. package/.docs/raw/reference/voice/voice.addInstructions.mdx +2 -3
  236. package/.docs/raw/reference/voice/voice.addTools.mdx +1 -1
  237. package/.docs/raw/reference/voice/voice.answer.mdx +1 -1
  238. package/.docs/raw/reference/voice/voice.close.mdx +1 -1
  239. package/.docs/raw/reference/voice/voice.connect.mdx +1 -1
  240. package/.docs/raw/reference/voice/voice.off.mdx +1 -1
  241. package/.docs/raw/reference/voice/voice.on.mdx +1 -1
  242. package/.docs/raw/reference/voice/voice.send.mdx +1 -1
  243. package/.docs/raw/reference/voice/voice.updateConfig.mdx +1 -1
  244. package/.docs/raw/reference/workflows/run-methods/restart.mdx +142 -0
  245. package/.docs/raw/reference/workflows/run-methods/resume.mdx +44 -0
  246. package/.docs/raw/reference/workflows/run-methods/start.mdx +44 -0
  247. package/.docs/raw/reference/workflows/run.mdx +13 -5
  248. package/.docs/raw/reference/workflows/step.mdx +13 -0
  249. package/.docs/raw/reference/workflows/workflow.mdx +19 -0
  250. package/.docs/raw/server-db/mastra-client.mdx +1 -2
  251. package/.docs/raw/server-db/mastra-server.mdx +30 -1
  252. package/.docs/raw/server-db/request-context.mdx +0 -1
  253. package/.docs/raw/server-db/storage.mdx +11 -0
  254. package/.docs/raw/streaming/overview.mdx +26 -15
  255. package/.docs/raw/streaming/tool-streaming.mdx +48 -5
  256. package/.docs/raw/streaming/workflow-streaming.mdx +5 -11
  257. package/.docs/raw/tools-mcp/advanced-usage.mdx +1 -2
  258. package/.docs/raw/tools-mcp/mcp-overview.mdx +3 -5
  259. package/.docs/raw/voice/overview.mdx +21 -41
  260. package/.docs/raw/voice/speech-to-speech.mdx +4 -4
  261. package/.docs/raw/voice/speech-to-text.mdx +1 -2
  262. package/.docs/raw/voice/text-to-speech.mdx +1 -2
  263. package/.docs/raw/workflows/control-flow.mdx +180 -0
  264. package/.docs/raw/workflows/error-handling.mdx +1 -0
  265. package/.docs/raw/workflows/human-in-the-loop.mdx +4 -4
  266. package/.docs/raw/workflows/overview.mdx +56 -44
  267. package/.docs/raw/workflows/snapshots.mdx +1 -0
  268. package/.docs/raw/workflows/suspend-and-resume.mdx +85 -16
  269. package/.docs/raw/workflows/time-travel.mdx +313 -0
  270. package/.docs/raw/workflows/workflow-state.mdx +191 -0
  271. package/CHANGELOG.md +18 -0
  272. package/dist/{chunk-5NJC7NRO.js → chunk-4CM2BQNP.js} +24 -4
  273. package/dist/prepare-docs/package-changes.d.ts.map +1 -1
  274. package/dist/prepare-docs/prepare.js +1 -1
  275. package/dist/stdio.js +1 -1
  276. package/package.json +7 -7
  277. package/.docs/raw/agents/human-in-the-loop-with-tools.mdx +0 -90
  278. package/.docs/raw/frameworks/agentic-uis/cedar-os.mdx +0 -102
  279. package/.docs/raw/frameworks/agentic-uis/openrouter.mdx +0 -179
  280. package/.docs/raw/frameworks/web-frameworks/next-js.mdx +0 -379
  281. package/.docs/raw/getting-started/quickstart.mdx +0 -27
  282. package/.docs/raw/getting-started/templates.mdx +0 -73
  283. package/.docs/raw/reference/streaming/workflows/observeStreamVNext.mdx +0 -47
  284. package/.docs/raw/reference/streaming/workflows/streamVNext.mdx +0 -153
  285. /package/.docs/raw/{frameworks/agentic-uis → guides/build-your-ui}/assistant-ui.mdx +0 -0
  286. /package/.docs/raw/{frameworks/agentic-uis → guides/build-your-ui}/copilotkit.mdx +0 -0
@@ -0,0 +1,197 @@
1
+ ---
2
+ title: "Reference: Semantic Recall Processor | Processors"
3
+ description: "Documentation for the SemanticRecall processor in Mastra, which enables semantic search over conversation history using vector embeddings."
4
+ ---
5
+
6
+ # SemanticRecall
7
+
8
+ The `SemanticRecall` is a **hybrid processor** that enables semantic search over conversation history using vector embeddings. On input, it performs semantic search to find relevant historical messages. On output, it creates embeddings for new messages to enable future semantic retrieval.
9
+
10
+ ## Usage example
11
+
12
+ ```typescript copy
13
+ import { SemanticRecall } from "@mastra/core/processors";
14
+ import { openai } from "@ai-sdk/openai";
15
+
16
+ const processor = new SemanticRecall({
17
+ storage: memoryStorage,
18
+ vector: vectorStore,
19
+ embedder: openai.embedding("text-embedding-3-small"),
20
+ topK: 5,
21
+ messageRange: 2,
22
+ scope: "resource",
23
+ });
24
+ ```
25
+
26
+ ## Constructor parameters
27
+
28
+ <PropertiesTable
29
+ content={[
30
+ {
31
+ name: "options",
32
+ type: "SemanticRecallOptions",
33
+ description: "Configuration options for the semantic recall processor",
34
+ isOptional: false,
35
+ },
36
+ ]}
37
+ />
38
+
39
+ ### Options
40
+
41
+ <PropertiesTable
42
+ content={[
43
+ {
44
+ name: "storage",
45
+ type: "MemoryStorage",
46
+ description: "Storage instance for retrieving messages",
47
+ isOptional: false,
48
+ },
49
+ {
50
+ name: "vector",
51
+ type: "MastraVector",
52
+ description: "Vector store for semantic search",
53
+ isOptional: false,
54
+ },
55
+ {
56
+ name: "embedder",
57
+ type: "MastraEmbeddingModel<string>",
58
+ description: "Embedder for generating query embeddings",
59
+ isOptional: false,
60
+ },
61
+ {
62
+ name: "topK",
63
+ type: "number",
64
+ description: "Number of most similar messages to retrieve",
65
+ isOptional: true,
66
+ default: "4",
67
+ },
68
+ {
69
+ name: "messageRange",
70
+ type: "number | { before: number; after: number }",
71
+ description: "Number of context messages to include before/after each match. Can be a single number (same for both) or an object with separate values",
72
+ isOptional: true,
73
+ default: "1",
74
+ },
75
+ {
76
+ name: "scope",
77
+ type: "'thread' | 'resource'",
78
+ description: "Scope of semantic search. 'thread' searches within the current thread only. 'resource' searches across all threads for the resource",
79
+ isOptional: true,
80
+ default: "'resource'",
81
+ },
82
+ {
83
+ name: "threshold",
84
+ type: "number",
85
+ description: "Minimum similarity score threshold (0-1). Messages below this threshold are filtered out",
86
+ isOptional: true,
87
+ },
88
+ {
89
+ name: "indexName",
90
+ type: "string",
91
+ description: "Index name for the vector store. If not provided, auto-generated based on embedder model",
92
+ isOptional: true,
93
+ },
94
+ {
95
+ name: "logger",
96
+ type: "IMastraLogger",
97
+ description: "Optional logger instance for structured logging",
98
+ isOptional: true,
99
+ },
100
+ ]}
101
+ />
102
+
103
+ ## Returns
104
+
105
+ <PropertiesTable
106
+ content={[
107
+ {
108
+ name: "id",
109
+ type: "string",
110
+ description: "Processor identifier set to 'semantic-recall'",
111
+ isOptional: false,
112
+ },
113
+ {
114
+ name: "name",
115
+ type: "string",
116
+ description: "Processor display name set to 'SemanticRecall'",
117
+ isOptional: false,
118
+ },
119
+ {
120
+ name: "processInput",
121
+ type: "(args: { messages: MastraDBMessage[]; messageList: MessageList; abort: (reason?: string) => never; tracingContext?: TracingContext; requestContext?: RequestContext }) => Promise<MessageList | MastraDBMessage[]>",
122
+ description: "Performs semantic search on historical messages and adds relevant context to the message list",
123
+ isOptional: false,
124
+ },
125
+ {
126
+ name: "processOutputResult",
127
+ type: "(args: { messages: MastraDBMessage[]; messageList?: MessageList; abort: (reason?: string) => never; tracingContext?: TracingContext; requestContext?: RequestContext }) => Promise<MessageList | MastraDBMessage[]>",
128
+ description: "Creates embeddings for new messages to enable future semantic search",
129
+ isOptional: false,
130
+ },
131
+ ]}
132
+ />
133
+
134
+ ## Extended usage example
135
+
136
+ ```typescript title="src/mastra/agents/semantic-memory-agent.ts" showLineNumbers copy
137
+ import { Agent } from "@mastra/core/agent";
138
+ import { SemanticRecall, MessageHistory } from "@mastra/core/processors";
139
+ import { PostgresStorage } from "@mastra/pg";
140
+ import { PgVector } from "@mastra/pg";
141
+ import { openai } from "@ai-sdk/openai";
142
+
143
+ const storage = new PostgresStorage({
144
+ connectionString: process.env.DATABASE_URL,
145
+ });
146
+
147
+ const vector = new PgVector({
148
+ connectionString: process.env.DATABASE_URL,
149
+ });
150
+
151
+ const semanticRecall = new SemanticRecall({
152
+ storage,
153
+ vector,
154
+ embedder: openai.embedding("text-embedding-3-small"),
155
+ topK: 5,
156
+ messageRange: { before: 2, after: 1 },
157
+ scope: "resource",
158
+ threshold: 0.7,
159
+ });
160
+
161
+ export const agent = new Agent({
162
+ name: "semantic-memory-agent",
163
+ instructions: "You are a helpful assistant with semantic memory recall",
164
+ model: "openai:gpt-4o",
165
+ inputProcessors: [
166
+ semanticRecall,
167
+ new MessageHistory({ storage, lastMessages: 50 }),
168
+ ],
169
+ outputProcessors: [
170
+ semanticRecall,
171
+ new MessageHistory({ storage }),
172
+ ],
173
+ });
174
+ ```
175
+
176
+ ## Behavior
177
+
178
+ ### Input processing
179
+ 1. Extracts the user query from the last user message
180
+ 2. Generates embeddings for the query
181
+ 3. Performs vector search to find semantically similar messages
182
+ 4. Retrieves matched messages along with surrounding context (based on `messageRange`)
183
+ 5. For `scope: 'resource'`, formats cross-thread messages as a system message with timestamps
184
+ 6. Adds recalled messages with `source: 'memory'` tag
185
+
186
+ ### Output processing
187
+ 1. Extracts text content from new user and assistant messages
188
+ 2. Generates embeddings for each message
189
+ 3. Stores embeddings in the vector store with metadata (message ID, thread ID, resource ID, role, content, timestamp)
190
+ 4. Uses LRU caching for embeddings to avoid redundant API calls
191
+
192
+ ### Cross-thread recall
193
+ When `scope` is set to `'resource'`, the processor can recall messages from other threads. These cross-thread messages are formatted as a system message with timestamps and conversation labels to provide context about when and where the conversation occurred.
194
+
195
+ ## Related
196
+
197
+ - [Guardrails](/docs/v1/agents/guardrails)
@@ -10,11 +10,10 @@ The `SystemPromptScrubber` is an **output processor** that detects and handles s
10
10
  ## Usage example
11
11
 
12
12
  ```typescript copy
13
- import { openai } from "@ai-sdk/openai";
14
13
  import { SystemPromptScrubber } from "@mastra/core/processors";
15
14
 
16
15
  const processor = new SystemPromptScrubber({
17
- model: openai("gpt-4.1-nano"),
16
+ model: "openrouter/openai/gpt-oss-safeguard-20b",
18
17
  strategy: "redact",
19
18
  redactionMethod: "mask",
20
19
  includeDetections: true
@@ -131,7 +130,7 @@ import { BatchPartsProcessor, SystemPromptScrubber } from "@mastra/core/processo
131
130
  export const agent = new Agent({
132
131
  name: "scrubbed-agent",
133
132
  instructions: "You are a helpful assistant",
134
- model: "openai/gpt-4o-mini",
133
+ model: "openai/gpt-5.1",
135
134
  outputProcessors: [
136
135
  // Batch stream parts first to reduce LLM calls
137
136
  new BatchPartsProcessor({
@@ -139,7 +138,7 @@ export const agent = new Agent({
139
138
  }),
140
139
  // Then apply system prompt detection on batched content
141
140
  new SystemPromptScrubber({
142
- model: "openai/gpt-4.1-nano",
141
+ model: "openrouter/openai/gpt-oss-safeguard-20b",
143
142
  strategy: "redact",
144
143
  customPatterns: ["system prompt", "internal instructions"],
145
144
  includeDetections: true,
@@ -45,7 +45,7 @@ const processor = new TokenLimiterProcessor({
45
45
  {
46
46
  name: "encoding",
47
47
  type: "TiktokenBPE",
48
- description: "Optional encoding to use. Defaults to o200k_base which is used by gpt-4o",
48
+ description: "Optional encoding to use. Defaults to o200k_base which is used by gpt-5.1",
49
49
  isOptional: true,
50
50
  default: "o200k_base",
51
51
  },
@@ -124,7 +124,7 @@ import { TokenLimiterProcessor } from "@mastra/core/processors";
124
124
  export const agent = new Agent({
125
125
  name: "limited-agent",
126
126
  instructions: "You are a helpful assistant",
127
- model: "openai/gpt-4o-mini",
127
+ model: "openai/gpt-5.1",
128
128
  outputProcessors: [
129
129
  new TokenLimiterProcessor({
130
130
  limit: 1000,
@@ -0,0 +1,125 @@
1
+ ---
2
+ title: "Reference: Tool Call Filter | Processors"
3
+ description: "Documentation for the ToolCallFilter processor in Mastra, which filters out tool calls and results from messages."
4
+ ---
5
+
6
+ # ToolCallFilter
7
+
8
+ The `ToolCallFilter` is an **input processor** that filters out tool calls and their results from the message history before sending to the model. This is useful when you want to exclude specific tool interactions from context or remove all tool calls entirely.
9
+
10
+ ## Usage example
11
+
12
+ ```typescript copy
13
+ import { ToolCallFilter } from "@mastra/core/processors";
14
+
15
+ // Exclude all tool calls
16
+ const filterAll = new ToolCallFilter();
17
+
18
+ // Exclude specific tools by name
19
+ const filterSpecific = new ToolCallFilter({
20
+ exclude: ["searchDatabase", "sendEmail"],
21
+ });
22
+ ```
23
+
24
+ ## Constructor parameters
25
+
26
+ <PropertiesTable
27
+ content={[
28
+ {
29
+ name: "options",
30
+ type: "Options",
31
+ description: "Configuration options for the tool call filter",
32
+ isOptional: true,
33
+ },
34
+ ]}
35
+ />
36
+
37
+ ### Options
38
+
39
+ <PropertiesTable
40
+ content={[
41
+ {
42
+ name: "exclude",
43
+ type: "string[]",
44
+ description: "List of specific tool names to exclude. If not provided or undefined, all tool calls are excluded",
45
+ isOptional: true,
46
+ },
47
+ ]}
48
+ />
49
+
50
+ ## Returns
51
+
52
+ <PropertiesTable
53
+ content={[
54
+ {
55
+ name: "id",
56
+ type: "string",
57
+ description: "Processor identifier set to 'tool-call-filter'",
58
+ isOptional: false,
59
+ },
60
+ {
61
+ name: "name",
62
+ type: "string",
63
+ description: "Processor display name set to 'ToolCallFilter'",
64
+ isOptional: false,
65
+ },
66
+ {
67
+ name: "processInput",
68
+ type: "(args: { messages: MastraDBMessage[]; messageList: MessageList; abort: (reason?: string) => never; requestContext?: RequestContext }) => Promise<MessageList | MastraDBMessage[]>",
69
+ description: "Processes input messages to filter out tool calls and their results based on configuration",
70
+ isOptional: false,
71
+ },
72
+ ]}
73
+ />
74
+
75
+ ## Extended usage example
76
+
77
+ ```typescript title="src/mastra/agents/filtered-agent.ts" showLineNumbers copy
78
+ import { Agent } from "@mastra/core/agent";
79
+ import { ToolCallFilter } from "@mastra/core/processors";
80
+
81
+ export const agent = new Agent({
82
+ name: "filtered-agent",
83
+ instructions: "You are a helpful assistant",
84
+ model: "openai:gpt-4o",
85
+ tools: {
86
+ searchDatabase,
87
+ sendEmail,
88
+ getWeather,
89
+ },
90
+ inputProcessors: [
91
+ // Filter out database search tool calls from context
92
+ // to reduce token usage while keeping other tool interactions
93
+ new ToolCallFilter({
94
+ exclude: ["searchDatabase"],
95
+ }),
96
+ ],
97
+ });
98
+ ```
99
+
100
+ ## Filtering all tool calls
101
+
102
+ ```typescript copy
103
+ import { Agent } from "@mastra/core/agent";
104
+ import { ToolCallFilter } from "@mastra/core/processors";
105
+
106
+ export const agent = new Agent({
107
+ name: "no-tools-context-agent",
108
+ instructions: "You are a helpful assistant",
109
+ model: "openai:gpt-4o",
110
+ tools: {
111
+ searchDatabase,
112
+ sendEmail,
113
+ },
114
+ inputProcessors: [
115
+ // Remove all tool calls from the message history
116
+ // The agent can still use tools, but previous tool interactions
117
+ // won't be included in the context
118
+ new ToolCallFilter(),
119
+ ],
120
+ });
121
+ ```
122
+
123
+ ## Related
124
+
125
+ - [Guardrails](/docs/v1/agents/guardrails)
@@ -101,7 +101,7 @@ import { UnicodeNormalizer } from "@mastra/core/processors";
101
101
  export const agent = new Agent({
102
102
  name: "normalized-agent",
103
103
  instructions: "You are a helpful assistant",
104
- model: "openai/gpt-4o-mini",
104
+ model: "openai/gpt-5.1",
105
105
  inputProcessors: [
106
106
  new UnicodeNormalizer({
107
107
  stripControlChars: true,
@@ -0,0 +1,221 @@
1
+ ---
2
+ title: "Reference: Working Memory Processor | Processors"
3
+ description: "Documentation for the WorkingMemory processor in Mastra, which injects persistent user/context data as system instructions."
4
+ ---
5
+
6
+ # WorkingMemory
7
+
8
+ The `WorkingMemory` is an **input processor** that injects working memory data as a system message. It retrieves persistent information from storage and formats it as instructions for the LLM, enabling the agent to maintain context about users across conversations.
9
+
10
+ ## Usage example
11
+
12
+ ```typescript copy
13
+ import { WorkingMemory } from "@mastra/core/processors";
14
+
15
+ const processor = new WorkingMemory({
16
+ storage: memoryStorage,
17
+ scope: "resource",
18
+ template: {
19
+ format: "markdown",
20
+ content: `# User Profile
21
+ - **Name**:
22
+ - **Preferences**:
23
+ - **Goals**:
24
+ `,
25
+ },
26
+ });
27
+ ```
28
+
29
+ ## Constructor parameters
30
+
31
+ <PropertiesTable
32
+ content={[
33
+ {
34
+ name: "options",
35
+ type: "Options",
36
+ description: "Configuration options for the working memory processor",
37
+ isOptional: false,
38
+ },
39
+ ]}
40
+ />
41
+
42
+ ### Options
43
+
44
+ <PropertiesTable
45
+ content={[
46
+ {
47
+ name: "storage",
48
+ type: "MemoryStorage",
49
+ description: "Storage instance for retrieving working memory data",
50
+ isOptional: false,
51
+ },
52
+ {
53
+ name: "template",
54
+ type: "WorkingMemoryTemplate",
55
+ description: "Template defining the format and structure of working memory",
56
+ isOptional: true,
57
+ },
58
+ {
59
+ name: "scope",
60
+ type: "'thread' | 'resource'",
61
+ description: "Scope of working memory. 'thread' scopes to current thread, 'resource' shares across all threads for the resource",
62
+ isOptional: true,
63
+ default: "'resource'",
64
+ },
65
+ {
66
+ name: "useVNext",
67
+ type: "boolean",
68
+ description: "Use the next-generation instruction format with improved guidelines",
69
+ isOptional: true,
70
+ },
71
+ {
72
+ name: "templateProvider",
73
+ type: "{ getWorkingMemoryTemplate(args: { memoryConfig?: MemoryConfig }): Promise<WorkingMemoryTemplate | null> }",
74
+ description: "Dynamic template provider for runtime template resolution",
75
+ isOptional: true,
76
+ },
77
+ {
78
+ name: "logger",
79
+ type: "IMastraLogger",
80
+ description: "Optional logger instance for structured logging",
81
+ isOptional: true,
82
+ },
83
+ ]}
84
+ />
85
+
86
+ ### WorkingMemoryTemplate
87
+
88
+ <PropertiesTable
89
+ content={[
90
+ {
91
+ name: "format",
92
+ type: "'markdown' | 'json'",
93
+ description: "Format of the working memory content",
94
+ isOptional: false,
95
+ },
96
+ {
97
+ name: "content",
98
+ type: "string",
99
+ description: "Template content defining the structure of working memory data",
100
+ isOptional: false,
101
+ },
102
+ ]}
103
+ />
104
+
105
+ ## Returns
106
+
107
+ <PropertiesTable
108
+ content={[
109
+ {
110
+ name: "id",
111
+ type: "string",
112
+ description: "Processor identifier set to 'working-memory'",
113
+ isOptional: false,
114
+ },
115
+ {
116
+ name: "name",
117
+ type: "string",
118
+ description: "Processor display name set to 'WorkingMemory'",
119
+ isOptional: false,
120
+ },
121
+ {
122
+ name: "defaultWorkingMemoryTemplate",
123
+ type: "string",
124
+ description: "The default markdown template used when no custom template is provided",
125
+ isOptional: false,
126
+ },
127
+ {
128
+ name: "processInput",
129
+ type: "(args: { messages: MastraDBMessage[]; messageList: MessageList; abort: (reason?: string) => never; requestContext?: RequestContext }) => Promise<MessageList | MastraDBMessage[]>",
130
+ description: "Retrieves working memory and adds it as a system message to the message list",
131
+ isOptional: false,
132
+ },
133
+ ]}
134
+ />
135
+
136
+ ## Extended usage example
137
+
138
+ ```typescript title="src/mastra/agents/personalized-agent.ts" showLineNumbers copy
139
+ import { Agent } from "@mastra/core/agent";
140
+ import { WorkingMemory, MessageHistory } from "@mastra/core/processors";
141
+ import { PostgresStorage } from "@mastra/pg";
142
+
143
+ const storage = new PostgresStorage({
144
+ connectionString: process.env.DATABASE_URL,
145
+ });
146
+
147
+ export const agent = new Agent({
148
+ name: "personalized-agent",
149
+ instructions: "You are a helpful assistant that remembers user preferences",
150
+ model: "openai:gpt-4o",
151
+ inputProcessors: [
152
+ new WorkingMemory({
153
+ storage,
154
+ scope: "resource",
155
+ template: {
156
+ format: "markdown",
157
+ content: `# User Information
158
+ - **Name**:
159
+ - **Location**:
160
+ - **Preferences**:
161
+ - **Communication Style**:
162
+ - **Current Projects**:
163
+ `,
164
+ },
165
+ }),
166
+ new MessageHistory({ storage, lastMessages: 50 }),
167
+ ],
168
+ outputProcessors: [
169
+ new MessageHistory({ storage }),
170
+ ],
171
+ });
172
+ ```
173
+
174
+ ## JSON format example
175
+
176
+ ```typescript copy
177
+ import { WorkingMemory } from "@mastra/core/processors";
178
+
179
+ const processor = new WorkingMemory({
180
+ storage: memoryStorage,
181
+ scope: "resource",
182
+ template: {
183
+ format: "json",
184
+ content: JSON.stringify({
185
+ user: {
186
+ name: { type: "string" },
187
+ preferences: { type: "object" },
188
+ goals: { type: "array" },
189
+ },
190
+ }),
191
+ },
192
+ });
193
+ ```
194
+
195
+ ## Behavior
196
+
197
+ ### Input processing
198
+ 1. Retrieves `threadId` and `resourceId` from the request context
199
+ 2. Based on scope, fetches working memory from either:
200
+ - Thread metadata (`scope: 'thread'`)
201
+ - Resource record (`scope: 'resource'`)
202
+ 3. Resolves the template (from provider, options, or default)
203
+ 4. Generates system instructions that include:
204
+ - Guidelines for the LLM on storing and updating information
205
+ - The template structure
206
+ - Current working memory data
207
+ 5. Adds the instruction as a system message with `source: 'memory'` tag
208
+
209
+ ### Working memory updates
210
+ Working memory updates happen through the `updateWorkingMemory` tool provided by the Memory class, not through this processor. The processor only handles injecting the current working memory state into conversations.
211
+
212
+ ### Default template
213
+ If no template is provided, the processor uses a default markdown template with fields for:
214
+ - First Name, Last Name
215
+ - Location, Occupation
216
+ - Interests, Goals
217
+ - Events, Facts, Projects
218
+
219
+ ## Related
220
+
221
+ - [Guardrails](/docs/v1/agents/guardrails)
@@ -13,9 +13,10 @@ The `embed` function generates a vector embedding for a single text input:
13
13
 
14
14
  ```typescript
15
15
  import { embed } from "ai";
16
+ import { ModelRouterEmbeddingModel } from "@mastra/core/llm";
16
17
 
17
18
  const result = await embed({
18
- model: openai.embedding("text-embedding-3-small"),
19
+ model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
19
20
  value: "Your text to embed",
20
21
  maxRetries: 2, // optional, defaults to 2
21
22
  });
@@ -80,7 +81,7 @@ For embedding multiple texts at once, use the `embedMany` function:
80
81
  import { embedMany } from "ai";
81
82
 
82
83
  const result = await embedMany({
83
- model: openai.embedding("text-embedding-3-small"),
84
+ model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
84
85
  values: ["First text", "Second text", "Third text"],
85
86
  maxRetries: 2, // optional, defaults to 2
86
87
  });
@@ -142,17 +143,16 @@ const result = await embedMany({
142
143
 
143
144
  ```typescript
144
145
  import { embed, embedMany } from "ai";
145
- import { openai } from "@ai-sdk/openai";
146
146
 
147
147
  // Single embedding
148
148
  const singleResult = await embed({
149
- model: openai.embedding("text-embedding-3-small"),
149
+ model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
150
150
  value: "What is the meaning of life?",
151
151
  });
152
152
 
153
153
  // Multiple embeddings
154
154
  const multipleResult = await embedMany({
155
- model: openai.embedding("text-embedding-3-small"),
155
+ model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
156
156
  values: [
157
157
  "First question about life",
158
158
  "Second question about universe",
@@ -19,10 +19,9 @@ function rerank(
19
19
  ## Usage Example
20
20
 
21
21
  ```typescript
22
- import { openai } from "@ai-sdk/openai";
23
22
  import { rerank } from "@mastra/rag";
24
23
 
25
- const model = openai("gpt-4o-mini");
24
+ const model = "openai/gpt-5.1";
26
25
 
27
26
  const rerankedResults = await rerank(
28
27
  vectorSearchResults,
@@ -19,7 +19,6 @@ function rerankWithScorer({
19
19
  ## Usage Example
20
20
 
21
21
  ```typescript
22
- import { openai } from "@ai-sdk/openai";
23
22
  import { rerankWithScorer as rerank, CohereRelevanceScorer } from "@mastra/rag";
24
23
 
25
24
  const scorer = new CohereRelevanceScorer("rerank-v3.5");