@mastra/mcp-docs-server 1.0.0-beta.4 → 1.0.0-beta.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fastra.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fchroma.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fconvex.md +29 -0
- package/.docs/organized/changelogs/%40mastra%2Fcore.md +411 -211
- package/.docs/organized/changelogs/%40mastra%2Fcouchbase.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fduckdb.md +42 -0
- package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Felasticsearch.md +52 -0
- package/.docs/organized/changelogs/%40mastra%2Fevals.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Flance.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Flibsql.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Floggers.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmcp-registry-registry.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmcp.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmemory.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmssql.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fopensearch.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fpg.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Frag.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Freact.md +89 -1
- package/.docs/organized/changelogs/%40mastra%2Fs3vectors.md +9 -0
- package/.docs/organized/changelogs/%40mastra%2Fschema-compat.md +42 -0
- package/.docs/organized/changelogs/%40mastra%2Fserver.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fupstash.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-azure.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-cloudflare.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-deepgram.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-elevenlabs.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-gladia.md +92 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-google-gemini-live.md +67 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-murf.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-openai-realtime.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-openai.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-playai.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-sarvam.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-speechify.md +201 -1
- package/.docs/organized/changelogs/create-mastra.md +201 -1
- package/.docs/organized/changelogs/mastra.md +201 -1
- package/.docs/organized/code-examples/agui.md +1 -0
- package/.docs/organized/code-examples/ai-sdk-v5.md +1 -0
- package/.docs/organized/code-examples/mcp-server-adapters.md +721 -0
- package/.docs/organized/code-examples/memory-with-processors.md +1 -1
- package/.docs/organized/code-examples/quick-start.md +1 -1
- package/.docs/organized/code-examples/server-app-access.md +342 -0
- package/.docs/raw/agents/adding-voice.mdx +7 -10
- package/.docs/raw/agents/agent-approval.mdx +189 -0
- package/.docs/raw/agents/guardrails.mdx +26 -23
- package/.docs/raw/agents/networks.mdx +2 -2
- package/.docs/raw/agents/overview.mdx +27 -62
- package/.docs/raw/agents/processors.mdx +279 -0
- package/.docs/raw/agents/using-tools.mdx +4 -5
- package/.docs/raw/course/01-first-agent/05-running-playground.md +5 -5
- package/.docs/raw/course/01-first-agent/09-testing-your-agent.md +3 -3
- package/.docs/raw/course/01-first-agent/13-testing-your-tool.md +3 -3
- package/.docs/raw/course/01-first-agent/17-testing-memory.md +2 -2
- package/.docs/raw/course/04-workflows/07-using-playground.md +1 -1
- package/.docs/raw/deployment/building-mastra.mdx +1 -1
- package/.docs/raw/deployment/cloud-providers/amazon-ec2.mdx +1 -1
- package/.docs/raw/deployment/cloud-providers/aws-lambda.mdx +1 -1
- package/.docs/raw/deployment/cloud-providers/azure-app-services.mdx +1 -1
- package/.docs/raw/deployment/cloud-providers/digital-ocean.mdx +1 -1
- package/.docs/raw/deployment/cloud-providers/index.mdx +20 -27
- package/.docs/raw/deployment/cloud-providers/netlify-deployer.mdx +44 -13
- package/.docs/raw/deployment/mastra-cloud/observability.mdx +19 -17
- package/.docs/raw/deployment/mastra-cloud/setting-up.mdx +1 -1
- package/.docs/raw/deployment/overview.mdx +2 -2
- package/.docs/raw/deployment/web-framework.mdx +5 -5
- package/.docs/raw/evals/custom-scorers.mdx +3 -5
- package/.docs/raw/evals/overview.mdx +2 -3
- package/.docs/raw/evals/running-in-ci.mdx +0 -2
- package/.docs/raw/{guides/guide → getting-started}/manual-install.mdx +2 -2
- package/.docs/raw/getting-started/project-structure.mdx +1 -1
- package/.docs/raw/getting-started/start.mdx +72 -0
- package/.docs/raw/getting-started/studio.mdx +1 -1
- package/.docs/raw/{frameworks/agentic-uis/ai-sdk.mdx → guides/build-your-ui/ai-sdk-ui.mdx} +113 -11
- package/.docs/raw/{frameworks/web-frameworks → guides/getting-started}/astro.mdx +23 -25
- package/.docs/raw/{frameworks/servers → guides/getting-started}/express.mdx +3 -4
- package/.docs/raw/guides/{quickstarts/nextjs.mdx → getting-started/next-js.mdx} +11 -11
- package/.docs/raw/guides/{quickstarts/standalone-server.mdx → getting-started/quickstart.mdx} +7 -7
- package/.docs/raw/{frameworks/web-frameworks → guides/getting-started}/sveltekit.mdx +23 -25
- package/.docs/raw/{frameworks/web-frameworks → guides/getting-started}/vite-react.mdx +7 -7
- package/.docs/raw/guides/guide/ai-recruiter.mdx +2 -3
- package/.docs/raw/guides/guide/chef-michel.mdx +2 -3
- package/.docs/raw/guides/guide/notes-mcp-server.mdx +2 -2
- package/.docs/raw/guides/guide/research-assistant.mdx +7 -8
- package/.docs/raw/guides/guide/stock-agent.mdx +4 -6
- package/.docs/raw/guides/guide/web-search.mdx +12 -10
- package/.docs/raw/guides/guide/whatsapp-chat-bot.mdx +421 -0
- package/.docs/raw/guides/index.mdx +3 -35
- package/.docs/raw/guides/migrations/agentnetwork.mdx +4 -4
- package/.docs/raw/guides/migrations/ai-sdk-v4-to-v5.mdx +1 -1
- package/.docs/raw/guides/migrations/upgrade-to-v1/agent.mdx +40 -0
- package/.docs/raw/guides/migrations/upgrade-to-v1/tools.mdx +5 -0
- package/.docs/raw/guides/migrations/upgrade-to-v1/workflows.mdx +51 -0
- package/.docs/raw/guides/migrations/vnext-to-standard-apis.mdx +2 -2
- package/.docs/raw/index.mdx +2 -2
- package/.docs/raw/mcp/overview.mdx +3 -5
- package/.docs/raw/memory/memory-processors.mdx +264 -79
- package/.docs/raw/memory/semantic-recall.mdx +7 -7
- package/.docs/raw/memory/storage/memory-with-libsql.mdx +2 -4
- package/.docs/raw/memory/storage/memory-with-mongodb.mdx +2 -4
- package/.docs/raw/memory/storage/memory-with-pg.mdx +2 -4
- package/.docs/raw/memory/storage/memory-with-upstash.mdx +2 -4
- package/.docs/raw/memory/threads-and-resources.mdx +3 -3
- package/.docs/raw/memory/working-memory.mdx +14 -7
- package/.docs/raw/{logging.mdx → observability/logging.mdx} +1 -1
- package/.docs/raw/observability/overview.mdx +2 -3
- package/.docs/raw/observability/tracing/bridges/otel.mdx +176 -0
- package/.docs/raw/observability/tracing/exporters/arize.mdx +17 -0
- package/.docs/raw/observability/tracing/exporters/braintrust.mdx +19 -0
- package/.docs/raw/observability/tracing/exporters/langfuse.mdx +20 -0
- package/.docs/raw/observability/tracing/exporters/langsmith.mdx +12 -0
- package/.docs/raw/observability/tracing/exporters/otel.mdx +25 -5
- package/.docs/raw/observability/tracing/exporters/posthog.mdx +107 -0
- package/.docs/raw/observability/tracing/overview.mdx +74 -8
- package/.docs/raw/observability/tracing/processors/sensitive-data-filter.mdx +0 -1
- package/.docs/raw/rag/chunking-and-embedding.mdx +16 -17
- package/.docs/raw/rag/overview.mdx +3 -2
- package/.docs/raw/rag/retrieval.mdx +43 -38
- package/.docs/raw/rag/vector-databases.mdx +93 -2
- package/.docs/raw/reference/agents/agent.mdx +7 -10
- package/.docs/raw/reference/agents/generate.mdx +55 -6
- package/.docs/raw/reference/agents/generateLegacy.mdx +2 -2
- package/.docs/raw/reference/agents/getLLM.mdx +1 -1
- package/.docs/raw/reference/agents/network.mdx +46 -3
- package/.docs/raw/reference/cli/mastra.mdx +2 -1
- package/.docs/raw/reference/client-js/agents.mdx +3 -3
- package/.docs/raw/reference/client-js/memory.mdx +43 -0
- package/.docs/raw/reference/client-js/workflows.mdx +92 -63
- package/.docs/raw/reference/core/getLogger.mdx +1 -1
- package/.docs/raw/reference/core/listLogs.mdx +1 -1
- package/.docs/raw/reference/core/listLogsByRunId.mdx +1 -1
- package/.docs/raw/reference/core/mastra-model-gateway.mdx +5 -19
- package/.docs/raw/reference/core/setLogger.mdx +1 -1
- package/.docs/raw/reference/core/setTelemetry.mdx +1 -1
- package/.docs/raw/reference/deployer/netlify.mdx +1 -2
- package/.docs/raw/reference/evals/answer-relevancy.mdx +28 -98
- package/.docs/raw/reference/evals/answer-similarity.mdx +12 -258
- package/.docs/raw/reference/evals/bias.mdx +29 -87
- package/.docs/raw/reference/evals/completeness.mdx +31 -90
- package/.docs/raw/reference/evals/content-similarity.mdx +28 -88
- package/.docs/raw/reference/evals/context-precision.mdx +28 -130
- package/.docs/raw/reference/evals/context-relevance.mdx +11 -11
- package/.docs/raw/reference/evals/faithfulness.mdx +28 -101
- package/.docs/raw/reference/evals/hallucination.mdx +28 -103
- package/.docs/raw/reference/evals/keyword-coverage.mdx +28 -107
- package/.docs/raw/reference/evals/noise-sensitivity.mdx +11 -11
- package/.docs/raw/reference/evals/prompt-alignment.mdx +15 -15
- package/.docs/raw/reference/evals/scorer-utils.mdx +362 -0
- package/.docs/raw/reference/evals/textual-difference.mdx +27 -100
- package/.docs/raw/reference/evals/tone-consistency.mdx +25 -98
- package/.docs/raw/reference/evals/tool-call-accuracy.mdx +7 -7
- package/.docs/raw/reference/evals/toxicity.mdx +29 -92
- package/.docs/raw/reference/index.mdx +1 -0
- package/.docs/raw/reference/memory/memory-class.mdx +5 -7
- package/.docs/raw/reference/observability/tracing/bridges/otel.mdx +150 -0
- package/.docs/raw/reference/observability/tracing/configuration.mdx +0 -4
- package/.docs/raw/reference/observability/tracing/exporters/arize.mdx +4 -0
- package/.docs/raw/reference/observability/tracing/exporters/langsmith.mdx +17 -1
- package/.docs/raw/reference/observability/tracing/exporters/otel.mdx +6 -0
- package/.docs/raw/reference/observability/tracing/exporters/posthog.mdx +132 -0
- package/.docs/raw/reference/observability/tracing/instances.mdx +0 -4
- package/.docs/raw/reference/observability/tracing/interfaces.mdx +29 -4
- package/.docs/raw/reference/observability/tracing/spans.mdx +0 -4
- package/.docs/raw/reference/processors/batch-parts-processor.mdx +1 -1
- package/.docs/raw/reference/processors/language-detector.mdx +10 -3
- package/.docs/raw/reference/processors/message-history-processor.mdx +131 -0
- package/.docs/raw/reference/processors/moderation-processor.mdx +12 -5
- package/.docs/raw/reference/processors/pii-detector.mdx +12 -5
- package/.docs/raw/reference/processors/processor-interface.mdx +502 -0
- package/.docs/raw/reference/processors/prompt-injection-detector.mdx +10 -3
- package/.docs/raw/reference/processors/semantic-recall-processor.mdx +197 -0
- package/.docs/raw/reference/processors/system-prompt-scrubber.mdx +3 -4
- package/.docs/raw/reference/processors/token-limiter-processor.mdx +2 -2
- package/.docs/raw/reference/processors/tool-call-filter.mdx +125 -0
- package/.docs/raw/reference/processors/unicode-normalizer.mdx +1 -1
- package/.docs/raw/reference/processors/working-memory-processor.mdx +221 -0
- package/.docs/raw/reference/rag/embeddings.mdx +5 -5
- package/.docs/raw/reference/rag/rerank.mdx +1 -2
- package/.docs/raw/reference/rag/rerankWithScorer.mdx +0 -1
- package/.docs/raw/reference/storage/cloudflare-d1.mdx +37 -0
- package/.docs/raw/reference/storage/convex.mdx +164 -0
- package/.docs/raw/reference/storage/lance.mdx +33 -0
- package/.docs/raw/reference/storage/libsql.mdx +37 -0
- package/.docs/raw/reference/storage/mongodb.mdx +39 -0
- package/.docs/raw/reference/storage/mssql.mdx +37 -0
- package/.docs/raw/reference/storage/postgresql.mdx +37 -0
- package/.docs/raw/reference/streaming/ChunkType.mdx +1 -1
- package/.docs/raw/reference/streaming/agents/stream.mdx +64 -2
- package/.docs/raw/reference/streaming/workflows/observeStream.mdx +7 -9
- package/.docs/raw/reference/streaming/workflows/{resumeStreamVNext.mdx → resumeStream.mdx} +51 -11
- package/.docs/raw/reference/streaming/workflows/stream.mdx +83 -24
- package/.docs/raw/reference/templates/overview.mdx +1 -4
- package/.docs/raw/reference/tools/client.mdx +1 -2
- package/.docs/raw/reference/tools/create-tool.mdx +132 -0
- package/.docs/raw/reference/tools/graph-rag-tool.mdx +5 -5
- package/.docs/raw/reference/tools/mcp-client.mdx +76 -21
- package/.docs/raw/reference/tools/mcp-server.mdx +1 -2
- package/.docs/raw/reference/tools/vector-query-tool.mdx +14 -15
- package/.docs/raw/reference/vectors/chroma.mdx +81 -1
- package/.docs/raw/reference/vectors/convex.mdx +429 -0
- package/.docs/raw/reference/vectors/couchbase.mdx +24 -17
- package/.docs/raw/reference/vectors/duckdb.mdx +462 -0
- package/.docs/raw/reference/vectors/elasticsearch.mdx +310 -0
- package/.docs/raw/reference/vectors/lance.mdx +38 -22
- package/.docs/raw/reference/vectors/libsql.mdx +35 -2
- package/.docs/raw/reference/vectors/mongodb.mdx +35 -2
- package/.docs/raw/reference/vectors/opensearch.mdx +37 -16
- package/.docs/raw/reference/vectors/pg.mdx +43 -36
- package/.docs/raw/reference/vectors/pinecone.mdx +48 -1
- package/.docs/raw/reference/vectors/qdrant.mdx +36 -1
- package/.docs/raw/reference/vectors/turbopuffer.mdx +74 -0
- package/.docs/raw/reference/voice/google.mdx +159 -20
- package/.docs/raw/reference/voice/openai-realtime.mdx +2 -2
- package/.docs/raw/reference/voice/voice.addInstructions.mdx +2 -3
- package/.docs/raw/reference/voice/voice.addTools.mdx +1 -1
- package/.docs/raw/reference/voice/voice.answer.mdx +1 -1
- package/.docs/raw/reference/voice/voice.close.mdx +1 -1
- package/.docs/raw/reference/voice/voice.connect.mdx +1 -1
- package/.docs/raw/reference/voice/voice.off.mdx +1 -1
- package/.docs/raw/reference/voice/voice.on.mdx +1 -1
- package/.docs/raw/reference/voice/voice.send.mdx +1 -1
- package/.docs/raw/reference/voice/voice.updateConfig.mdx +1 -1
- package/.docs/raw/reference/workflows/run-methods/restart.mdx +142 -0
- package/.docs/raw/reference/workflows/run-methods/resume.mdx +44 -0
- package/.docs/raw/reference/workflows/run-methods/start.mdx +44 -0
- package/.docs/raw/reference/workflows/run.mdx +13 -5
- package/.docs/raw/reference/workflows/step.mdx +13 -0
- package/.docs/raw/reference/workflows/workflow.mdx +19 -0
- package/.docs/raw/server-db/mastra-client.mdx +1 -2
- package/.docs/raw/server-db/mastra-server.mdx +30 -1
- package/.docs/raw/server-db/request-context.mdx +0 -1
- package/.docs/raw/server-db/storage.mdx +11 -0
- package/.docs/raw/streaming/overview.mdx +26 -15
- package/.docs/raw/streaming/tool-streaming.mdx +48 -5
- package/.docs/raw/streaming/workflow-streaming.mdx +5 -11
- package/.docs/raw/tools-mcp/advanced-usage.mdx +1 -2
- package/.docs/raw/tools-mcp/mcp-overview.mdx +3 -5
- package/.docs/raw/voice/overview.mdx +21 -41
- package/.docs/raw/voice/speech-to-speech.mdx +4 -4
- package/.docs/raw/voice/speech-to-text.mdx +1 -2
- package/.docs/raw/voice/text-to-speech.mdx +1 -2
- package/.docs/raw/workflows/control-flow.mdx +180 -0
- package/.docs/raw/workflows/error-handling.mdx +1 -0
- package/.docs/raw/workflows/human-in-the-loop.mdx +4 -4
- package/.docs/raw/workflows/overview.mdx +56 -44
- package/.docs/raw/workflows/snapshots.mdx +1 -0
- package/.docs/raw/workflows/suspend-and-resume.mdx +85 -16
- package/.docs/raw/workflows/time-travel.mdx +313 -0
- package/.docs/raw/workflows/workflow-state.mdx +191 -0
- package/CHANGELOG.md +18 -0
- package/dist/{chunk-5NJC7NRO.js → chunk-4CM2BQNP.js} +24 -4
- package/dist/prepare-docs/package-changes.d.ts.map +1 -1
- package/dist/prepare-docs/prepare.js +1 -1
- package/dist/stdio.js +1 -1
- package/package.json +7 -7
- package/.docs/raw/agents/human-in-the-loop-with-tools.mdx +0 -90
- package/.docs/raw/frameworks/agentic-uis/cedar-os.mdx +0 -102
- package/.docs/raw/frameworks/agentic-uis/openrouter.mdx +0 -179
- package/.docs/raw/frameworks/web-frameworks/next-js.mdx +0 -379
- package/.docs/raw/getting-started/quickstart.mdx +0 -27
- package/.docs/raw/getting-started/templates.mdx +0 -73
- package/.docs/raw/reference/streaming/workflows/observeStreamVNext.mdx +0 -47
- package/.docs/raw/reference/streaming/workflows/streamVNext.mdx +0 -153
- /package/.docs/raw/{frameworks/agentic-uis → guides/build-your-ui}/assistant-ui.mdx +0 -0
- /package/.docs/raw/{frameworks/agentic-uis → guides/build-your-ui}/copilotkit.mdx +0 -0
|
@@ -0,0 +1,197 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: "Reference: Semantic Recall Processor | Processors"
|
|
3
|
+
description: "Documentation for the SemanticRecall processor in Mastra, which enables semantic search over conversation history using vector embeddings."
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# SemanticRecall
|
|
7
|
+
|
|
8
|
+
The `SemanticRecall` is a **hybrid processor** that enables semantic search over conversation history using vector embeddings. On input, it performs semantic search to find relevant historical messages. On output, it creates embeddings for new messages to enable future semantic retrieval.
|
|
9
|
+
|
|
10
|
+
## Usage example
|
|
11
|
+
|
|
12
|
+
```typescript copy
|
|
13
|
+
import { SemanticRecall } from "@mastra/core/processors";
|
|
14
|
+
import { openai } from "@ai-sdk/openai";
|
|
15
|
+
|
|
16
|
+
const processor = new SemanticRecall({
|
|
17
|
+
storage: memoryStorage,
|
|
18
|
+
vector: vectorStore,
|
|
19
|
+
embedder: openai.embedding("text-embedding-3-small"),
|
|
20
|
+
topK: 5,
|
|
21
|
+
messageRange: 2,
|
|
22
|
+
scope: "resource",
|
|
23
|
+
});
|
|
24
|
+
```
|
|
25
|
+
|
|
26
|
+
## Constructor parameters
|
|
27
|
+
|
|
28
|
+
<PropertiesTable
|
|
29
|
+
content={[
|
|
30
|
+
{
|
|
31
|
+
name: "options",
|
|
32
|
+
type: "SemanticRecallOptions",
|
|
33
|
+
description: "Configuration options for the semantic recall processor",
|
|
34
|
+
isOptional: false,
|
|
35
|
+
},
|
|
36
|
+
]}
|
|
37
|
+
/>
|
|
38
|
+
|
|
39
|
+
### Options
|
|
40
|
+
|
|
41
|
+
<PropertiesTable
|
|
42
|
+
content={[
|
|
43
|
+
{
|
|
44
|
+
name: "storage",
|
|
45
|
+
type: "MemoryStorage",
|
|
46
|
+
description: "Storage instance for retrieving messages",
|
|
47
|
+
isOptional: false,
|
|
48
|
+
},
|
|
49
|
+
{
|
|
50
|
+
name: "vector",
|
|
51
|
+
type: "MastraVector",
|
|
52
|
+
description: "Vector store for semantic search",
|
|
53
|
+
isOptional: false,
|
|
54
|
+
},
|
|
55
|
+
{
|
|
56
|
+
name: "embedder",
|
|
57
|
+
type: "MastraEmbeddingModel<string>",
|
|
58
|
+
description: "Embedder for generating query embeddings",
|
|
59
|
+
isOptional: false,
|
|
60
|
+
},
|
|
61
|
+
{
|
|
62
|
+
name: "topK",
|
|
63
|
+
type: "number",
|
|
64
|
+
description: "Number of most similar messages to retrieve",
|
|
65
|
+
isOptional: true,
|
|
66
|
+
default: "4",
|
|
67
|
+
},
|
|
68
|
+
{
|
|
69
|
+
name: "messageRange",
|
|
70
|
+
type: "number | { before: number; after: number }",
|
|
71
|
+
description: "Number of context messages to include before/after each match. Can be a single number (same for both) or an object with separate values",
|
|
72
|
+
isOptional: true,
|
|
73
|
+
default: "1",
|
|
74
|
+
},
|
|
75
|
+
{
|
|
76
|
+
name: "scope",
|
|
77
|
+
type: "'thread' | 'resource'",
|
|
78
|
+
description: "Scope of semantic search. 'thread' searches within the current thread only. 'resource' searches across all threads for the resource",
|
|
79
|
+
isOptional: true,
|
|
80
|
+
default: "'resource'",
|
|
81
|
+
},
|
|
82
|
+
{
|
|
83
|
+
name: "threshold",
|
|
84
|
+
type: "number",
|
|
85
|
+
description: "Minimum similarity score threshold (0-1). Messages below this threshold are filtered out",
|
|
86
|
+
isOptional: true,
|
|
87
|
+
},
|
|
88
|
+
{
|
|
89
|
+
name: "indexName",
|
|
90
|
+
type: "string",
|
|
91
|
+
description: "Index name for the vector store. If not provided, auto-generated based on embedder model",
|
|
92
|
+
isOptional: true,
|
|
93
|
+
},
|
|
94
|
+
{
|
|
95
|
+
name: "logger",
|
|
96
|
+
type: "IMastraLogger",
|
|
97
|
+
description: "Optional logger instance for structured logging",
|
|
98
|
+
isOptional: true,
|
|
99
|
+
},
|
|
100
|
+
]}
|
|
101
|
+
/>
|
|
102
|
+
|
|
103
|
+
## Returns
|
|
104
|
+
|
|
105
|
+
<PropertiesTable
|
|
106
|
+
content={[
|
|
107
|
+
{
|
|
108
|
+
name: "id",
|
|
109
|
+
type: "string",
|
|
110
|
+
description: "Processor identifier set to 'semantic-recall'",
|
|
111
|
+
isOptional: false,
|
|
112
|
+
},
|
|
113
|
+
{
|
|
114
|
+
name: "name",
|
|
115
|
+
type: "string",
|
|
116
|
+
description: "Processor display name set to 'SemanticRecall'",
|
|
117
|
+
isOptional: false,
|
|
118
|
+
},
|
|
119
|
+
{
|
|
120
|
+
name: "processInput",
|
|
121
|
+
type: "(args: { messages: MastraDBMessage[]; messageList: MessageList; abort: (reason?: string) => never; tracingContext?: TracingContext; requestContext?: RequestContext }) => Promise<MessageList | MastraDBMessage[]>",
|
|
122
|
+
description: "Performs semantic search on historical messages and adds relevant context to the message list",
|
|
123
|
+
isOptional: false,
|
|
124
|
+
},
|
|
125
|
+
{
|
|
126
|
+
name: "processOutputResult",
|
|
127
|
+
type: "(args: { messages: MastraDBMessage[]; messageList?: MessageList; abort: (reason?: string) => never; tracingContext?: TracingContext; requestContext?: RequestContext }) => Promise<MessageList | MastraDBMessage[]>",
|
|
128
|
+
description: "Creates embeddings for new messages to enable future semantic search",
|
|
129
|
+
isOptional: false,
|
|
130
|
+
},
|
|
131
|
+
]}
|
|
132
|
+
/>
|
|
133
|
+
|
|
134
|
+
## Extended usage example
|
|
135
|
+
|
|
136
|
+
```typescript title="src/mastra/agents/semantic-memory-agent.ts" showLineNumbers copy
|
|
137
|
+
import { Agent } from "@mastra/core/agent";
|
|
138
|
+
import { SemanticRecall, MessageHistory } from "@mastra/core/processors";
|
|
139
|
+
import { PostgresStorage } from "@mastra/pg";
|
|
140
|
+
import { PgVector } from "@mastra/pg";
|
|
141
|
+
import { openai } from "@ai-sdk/openai";
|
|
142
|
+
|
|
143
|
+
const storage = new PostgresStorage({
|
|
144
|
+
connectionString: process.env.DATABASE_URL,
|
|
145
|
+
});
|
|
146
|
+
|
|
147
|
+
const vector = new PgVector({
|
|
148
|
+
connectionString: process.env.DATABASE_URL,
|
|
149
|
+
});
|
|
150
|
+
|
|
151
|
+
const semanticRecall = new SemanticRecall({
|
|
152
|
+
storage,
|
|
153
|
+
vector,
|
|
154
|
+
embedder: openai.embedding("text-embedding-3-small"),
|
|
155
|
+
topK: 5,
|
|
156
|
+
messageRange: { before: 2, after: 1 },
|
|
157
|
+
scope: "resource",
|
|
158
|
+
threshold: 0.7,
|
|
159
|
+
});
|
|
160
|
+
|
|
161
|
+
export const agent = new Agent({
|
|
162
|
+
name: "semantic-memory-agent",
|
|
163
|
+
instructions: "You are a helpful assistant with semantic memory recall",
|
|
164
|
+
model: "openai:gpt-4o",
|
|
165
|
+
inputProcessors: [
|
|
166
|
+
semanticRecall,
|
|
167
|
+
new MessageHistory({ storage, lastMessages: 50 }),
|
|
168
|
+
],
|
|
169
|
+
outputProcessors: [
|
|
170
|
+
semanticRecall,
|
|
171
|
+
new MessageHistory({ storage }),
|
|
172
|
+
],
|
|
173
|
+
});
|
|
174
|
+
```
|
|
175
|
+
|
|
176
|
+
## Behavior
|
|
177
|
+
|
|
178
|
+
### Input processing
|
|
179
|
+
1. Extracts the user query from the last user message
|
|
180
|
+
2. Generates embeddings for the query
|
|
181
|
+
3. Performs vector search to find semantically similar messages
|
|
182
|
+
4. Retrieves matched messages along with surrounding context (based on `messageRange`)
|
|
183
|
+
5. For `scope: 'resource'`, formats cross-thread messages as a system message with timestamps
|
|
184
|
+
6. Adds recalled messages with `source: 'memory'` tag
|
|
185
|
+
|
|
186
|
+
### Output processing
|
|
187
|
+
1. Extracts text content from new user and assistant messages
|
|
188
|
+
2. Generates embeddings for each message
|
|
189
|
+
3. Stores embeddings in the vector store with metadata (message ID, thread ID, resource ID, role, content, timestamp)
|
|
190
|
+
4. Uses LRU caching for embeddings to avoid redundant API calls
|
|
191
|
+
|
|
192
|
+
### Cross-thread recall
|
|
193
|
+
When `scope` is set to `'resource'`, the processor can recall messages from other threads. These cross-thread messages are formatted as a system message with timestamps and conversation labels to provide context about when and where the conversation occurred.
|
|
194
|
+
|
|
195
|
+
## Related
|
|
196
|
+
|
|
197
|
+
- [Guardrails](/docs/v1/agents/guardrails)
|
|
@@ -10,11 +10,10 @@ The `SystemPromptScrubber` is an **output processor** that detects and handles s
|
|
|
10
10
|
## Usage example
|
|
11
11
|
|
|
12
12
|
```typescript copy
|
|
13
|
-
import { openai } from "@ai-sdk/openai";
|
|
14
13
|
import { SystemPromptScrubber } from "@mastra/core/processors";
|
|
15
14
|
|
|
16
15
|
const processor = new SystemPromptScrubber({
|
|
17
|
-
model: openai
|
|
16
|
+
model: "openrouter/openai/gpt-oss-safeguard-20b",
|
|
18
17
|
strategy: "redact",
|
|
19
18
|
redactionMethod: "mask",
|
|
20
19
|
includeDetections: true
|
|
@@ -131,7 +130,7 @@ import { BatchPartsProcessor, SystemPromptScrubber } from "@mastra/core/processo
|
|
|
131
130
|
export const agent = new Agent({
|
|
132
131
|
name: "scrubbed-agent",
|
|
133
132
|
instructions: "You are a helpful assistant",
|
|
134
|
-
model: "openai/gpt-
|
|
133
|
+
model: "openai/gpt-5.1",
|
|
135
134
|
outputProcessors: [
|
|
136
135
|
// Batch stream parts first to reduce LLM calls
|
|
137
136
|
new BatchPartsProcessor({
|
|
@@ -139,7 +138,7 @@ export const agent = new Agent({
|
|
|
139
138
|
}),
|
|
140
139
|
// Then apply system prompt detection on batched content
|
|
141
140
|
new SystemPromptScrubber({
|
|
142
|
-
model: "openai/gpt-
|
|
141
|
+
model: "openrouter/openai/gpt-oss-safeguard-20b",
|
|
143
142
|
strategy: "redact",
|
|
144
143
|
customPatterns: ["system prompt", "internal instructions"],
|
|
145
144
|
includeDetections: true,
|
|
@@ -45,7 +45,7 @@ const processor = new TokenLimiterProcessor({
|
|
|
45
45
|
{
|
|
46
46
|
name: "encoding",
|
|
47
47
|
type: "TiktokenBPE",
|
|
48
|
-
description: "Optional encoding to use. Defaults to o200k_base which is used by gpt-
|
|
48
|
+
description: "Optional encoding to use. Defaults to o200k_base which is used by gpt-5.1",
|
|
49
49
|
isOptional: true,
|
|
50
50
|
default: "o200k_base",
|
|
51
51
|
},
|
|
@@ -124,7 +124,7 @@ import { TokenLimiterProcessor } from "@mastra/core/processors";
|
|
|
124
124
|
export const agent = new Agent({
|
|
125
125
|
name: "limited-agent",
|
|
126
126
|
instructions: "You are a helpful assistant",
|
|
127
|
-
model: "openai/gpt-
|
|
127
|
+
model: "openai/gpt-5.1",
|
|
128
128
|
outputProcessors: [
|
|
129
129
|
new TokenLimiterProcessor({
|
|
130
130
|
limit: 1000,
|
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: "Reference: Tool Call Filter | Processors"
|
|
3
|
+
description: "Documentation for the ToolCallFilter processor in Mastra, which filters out tool calls and results from messages."
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# ToolCallFilter
|
|
7
|
+
|
|
8
|
+
The `ToolCallFilter` is an **input processor** that filters out tool calls and their results from the message history before sending to the model. This is useful when you want to exclude specific tool interactions from context or remove all tool calls entirely.
|
|
9
|
+
|
|
10
|
+
## Usage example
|
|
11
|
+
|
|
12
|
+
```typescript copy
|
|
13
|
+
import { ToolCallFilter } from "@mastra/core/processors";
|
|
14
|
+
|
|
15
|
+
// Exclude all tool calls
|
|
16
|
+
const filterAll = new ToolCallFilter();
|
|
17
|
+
|
|
18
|
+
// Exclude specific tools by name
|
|
19
|
+
const filterSpecific = new ToolCallFilter({
|
|
20
|
+
exclude: ["searchDatabase", "sendEmail"],
|
|
21
|
+
});
|
|
22
|
+
```
|
|
23
|
+
|
|
24
|
+
## Constructor parameters
|
|
25
|
+
|
|
26
|
+
<PropertiesTable
|
|
27
|
+
content={[
|
|
28
|
+
{
|
|
29
|
+
name: "options",
|
|
30
|
+
type: "Options",
|
|
31
|
+
description: "Configuration options for the tool call filter",
|
|
32
|
+
isOptional: true,
|
|
33
|
+
},
|
|
34
|
+
]}
|
|
35
|
+
/>
|
|
36
|
+
|
|
37
|
+
### Options
|
|
38
|
+
|
|
39
|
+
<PropertiesTable
|
|
40
|
+
content={[
|
|
41
|
+
{
|
|
42
|
+
name: "exclude",
|
|
43
|
+
type: "string[]",
|
|
44
|
+
description: "List of specific tool names to exclude. If not provided or undefined, all tool calls are excluded",
|
|
45
|
+
isOptional: true,
|
|
46
|
+
},
|
|
47
|
+
]}
|
|
48
|
+
/>
|
|
49
|
+
|
|
50
|
+
## Returns
|
|
51
|
+
|
|
52
|
+
<PropertiesTable
|
|
53
|
+
content={[
|
|
54
|
+
{
|
|
55
|
+
name: "id",
|
|
56
|
+
type: "string",
|
|
57
|
+
description: "Processor identifier set to 'tool-call-filter'",
|
|
58
|
+
isOptional: false,
|
|
59
|
+
},
|
|
60
|
+
{
|
|
61
|
+
name: "name",
|
|
62
|
+
type: "string",
|
|
63
|
+
description: "Processor display name set to 'ToolCallFilter'",
|
|
64
|
+
isOptional: false,
|
|
65
|
+
},
|
|
66
|
+
{
|
|
67
|
+
name: "processInput",
|
|
68
|
+
type: "(args: { messages: MastraDBMessage[]; messageList: MessageList; abort: (reason?: string) => never; requestContext?: RequestContext }) => Promise<MessageList | MastraDBMessage[]>",
|
|
69
|
+
description: "Processes input messages to filter out tool calls and their results based on configuration",
|
|
70
|
+
isOptional: false,
|
|
71
|
+
},
|
|
72
|
+
]}
|
|
73
|
+
/>
|
|
74
|
+
|
|
75
|
+
## Extended usage example
|
|
76
|
+
|
|
77
|
+
```typescript title="src/mastra/agents/filtered-agent.ts" showLineNumbers copy
|
|
78
|
+
import { Agent } from "@mastra/core/agent";
|
|
79
|
+
import { ToolCallFilter } from "@mastra/core/processors";
|
|
80
|
+
|
|
81
|
+
export const agent = new Agent({
|
|
82
|
+
name: "filtered-agent",
|
|
83
|
+
instructions: "You are a helpful assistant",
|
|
84
|
+
model: "openai:gpt-4o",
|
|
85
|
+
tools: {
|
|
86
|
+
searchDatabase,
|
|
87
|
+
sendEmail,
|
|
88
|
+
getWeather,
|
|
89
|
+
},
|
|
90
|
+
inputProcessors: [
|
|
91
|
+
// Filter out database search tool calls from context
|
|
92
|
+
// to reduce token usage while keeping other tool interactions
|
|
93
|
+
new ToolCallFilter({
|
|
94
|
+
exclude: ["searchDatabase"],
|
|
95
|
+
}),
|
|
96
|
+
],
|
|
97
|
+
});
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
## Filtering all tool calls
|
|
101
|
+
|
|
102
|
+
```typescript copy
|
|
103
|
+
import { Agent } from "@mastra/core/agent";
|
|
104
|
+
import { ToolCallFilter } from "@mastra/core/processors";
|
|
105
|
+
|
|
106
|
+
export const agent = new Agent({
|
|
107
|
+
name: "no-tools-context-agent",
|
|
108
|
+
instructions: "You are a helpful assistant",
|
|
109
|
+
model: "openai:gpt-4o",
|
|
110
|
+
tools: {
|
|
111
|
+
searchDatabase,
|
|
112
|
+
sendEmail,
|
|
113
|
+
},
|
|
114
|
+
inputProcessors: [
|
|
115
|
+
// Remove all tool calls from the message history
|
|
116
|
+
// The agent can still use tools, but previous tool interactions
|
|
117
|
+
// won't be included in the context
|
|
118
|
+
new ToolCallFilter(),
|
|
119
|
+
],
|
|
120
|
+
});
|
|
121
|
+
```
|
|
122
|
+
|
|
123
|
+
## Related
|
|
124
|
+
|
|
125
|
+
- [Guardrails](/docs/v1/agents/guardrails)
|
|
@@ -101,7 +101,7 @@ import { UnicodeNormalizer } from "@mastra/core/processors";
|
|
|
101
101
|
export const agent = new Agent({
|
|
102
102
|
name: "normalized-agent",
|
|
103
103
|
instructions: "You are a helpful assistant",
|
|
104
|
-
model: "openai/gpt-
|
|
104
|
+
model: "openai/gpt-5.1",
|
|
105
105
|
inputProcessors: [
|
|
106
106
|
new UnicodeNormalizer({
|
|
107
107
|
stripControlChars: true,
|
|
@@ -0,0 +1,221 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: "Reference: Working Memory Processor | Processors"
|
|
3
|
+
description: "Documentation for the WorkingMemory processor in Mastra, which injects persistent user/context data as system instructions."
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# WorkingMemory
|
|
7
|
+
|
|
8
|
+
The `WorkingMemory` is an **input processor** that injects working memory data as a system message. It retrieves persistent information from storage and formats it as instructions for the LLM, enabling the agent to maintain context about users across conversations.
|
|
9
|
+
|
|
10
|
+
## Usage example
|
|
11
|
+
|
|
12
|
+
```typescript copy
|
|
13
|
+
import { WorkingMemory } from "@mastra/core/processors";
|
|
14
|
+
|
|
15
|
+
const processor = new WorkingMemory({
|
|
16
|
+
storage: memoryStorage,
|
|
17
|
+
scope: "resource",
|
|
18
|
+
template: {
|
|
19
|
+
format: "markdown",
|
|
20
|
+
content: `# User Profile
|
|
21
|
+
- **Name**:
|
|
22
|
+
- **Preferences**:
|
|
23
|
+
- **Goals**:
|
|
24
|
+
`,
|
|
25
|
+
},
|
|
26
|
+
});
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
## Constructor parameters
|
|
30
|
+
|
|
31
|
+
<PropertiesTable
|
|
32
|
+
content={[
|
|
33
|
+
{
|
|
34
|
+
name: "options",
|
|
35
|
+
type: "Options",
|
|
36
|
+
description: "Configuration options for the working memory processor",
|
|
37
|
+
isOptional: false,
|
|
38
|
+
},
|
|
39
|
+
]}
|
|
40
|
+
/>
|
|
41
|
+
|
|
42
|
+
### Options
|
|
43
|
+
|
|
44
|
+
<PropertiesTable
|
|
45
|
+
content={[
|
|
46
|
+
{
|
|
47
|
+
name: "storage",
|
|
48
|
+
type: "MemoryStorage",
|
|
49
|
+
description: "Storage instance for retrieving working memory data",
|
|
50
|
+
isOptional: false,
|
|
51
|
+
},
|
|
52
|
+
{
|
|
53
|
+
name: "template",
|
|
54
|
+
type: "WorkingMemoryTemplate",
|
|
55
|
+
description: "Template defining the format and structure of working memory",
|
|
56
|
+
isOptional: true,
|
|
57
|
+
},
|
|
58
|
+
{
|
|
59
|
+
name: "scope",
|
|
60
|
+
type: "'thread' | 'resource'",
|
|
61
|
+
description: "Scope of working memory. 'thread' scopes to current thread, 'resource' shares across all threads for the resource",
|
|
62
|
+
isOptional: true,
|
|
63
|
+
default: "'resource'",
|
|
64
|
+
},
|
|
65
|
+
{
|
|
66
|
+
name: "useVNext",
|
|
67
|
+
type: "boolean",
|
|
68
|
+
description: "Use the next-generation instruction format with improved guidelines",
|
|
69
|
+
isOptional: true,
|
|
70
|
+
},
|
|
71
|
+
{
|
|
72
|
+
name: "templateProvider",
|
|
73
|
+
type: "{ getWorkingMemoryTemplate(args: { memoryConfig?: MemoryConfig }): Promise<WorkingMemoryTemplate | null> }",
|
|
74
|
+
description: "Dynamic template provider for runtime template resolution",
|
|
75
|
+
isOptional: true,
|
|
76
|
+
},
|
|
77
|
+
{
|
|
78
|
+
name: "logger",
|
|
79
|
+
type: "IMastraLogger",
|
|
80
|
+
description: "Optional logger instance for structured logging",
|
|
81
|
+
isOptional: true,
|
|
82
|
+
},
|
|
83
|
+
]}
|
|
84
|
+
/>
|
|
85
|
+
|
|
86
|
+
### WorkingMemoryTemplate
|
|
87
|
+
|
|
88
|
+
<PropertiesTable
|
|
89
|
+
content={[
|
|
90
|
+
{
|
|
91
|
+
name: "format",
|
|
92
|
+
type: "'markdown' | 'json'",
|
|
93
|
+
description: "Format of the working memory content",
|
|
94
|
+
isOptional: false,
|
|
95
|
+
},
|
|
96
|
+
{
|
|
97
|
+
name: "content",
|
|
98
|
+
type: "string",
|
|
99
|
+
description: "Template content defining the structure of working memory data",
|
|
100
|
+
isOptional: false,
|
|
101
|
+
},
|
|
102
|
+
]}
|
|
103
|
+
/>
|
|
104
|
+
|
|
105
|
+
## Returns
|
|
106
|
+
|
|
107
|
+
<PropertiesTable
|
|
108
|
+
content={[
|
|
109
|
+
{
|
|
110
|
+
name: "id",
|
|
111
|
+
type: "string",
|
|
112
|
+
description: "Processor identifier set to 'working-memory'",
|
|
113
|
+
isOptional: false,
|
|
114
|
+
},
|
|
115
|
+
{
|
|
116
|
+
name: "name",
|
|
117
|
+
type: "string",
|
|
118
|
+
description: "Processor display name set to 'WorkingMemory'",
|
|
119
|
+
isOptional: false,
|
|
120
|
+
},
|
|
121
|
+
{
|
|
122
|
+
name: "defaultWorkingMemoryTemplate",
|
|
123
|
+
type: "string",
|
|
124
|
+
description: "The default markdown template used when no custom template is provided",
|
|
125
|
+
isOptional: false,
|
|
126
|
+
},
|
|
127
|
+
{
|
|
128
|
+
name: "processInput",
|
|
129
|
+
type: "(args: { messages: MastraDBMessage[]; messageList: MessageList; abort: (reason?: string) => never; requestContext?: RequestContext }) => Promise<MessageList | MastraDBMessage[]>",
|
|
130
|
+
description: "Retrieves working memory and adds it as a system message to the message list",
|
|
131
|
+
isOptional: false,
|
|
132
|
+
},
|
|
133
|
+
]}
|
|
134
|
+
/>
|
|
135
|
+
|
|
136
|
+
## Extended usage example
|
|
137
|
+
|
|
138
|
+
```typescript title="src/mastra/agents/personalized-agent.ts" showLineNumbers copy
|
|
139
|
+
import { Agent } from "@mastra/core/agent";
|
|
140
|
+
import { WorkingMemory, MessageHistory } from "@mastra/core/processors";
|
|
141
|
+
import { PostgresStorage } from "@mastra/pg";
|
|
142
|
+
|
|
143
|
+
const storage = new PostgresStorage({
|
|
144
|
+
connectionString: process.env.DATABASE_URL,
|
|
145
|
+
});
|
|
146
|
+
|
|
147
|
+
export const agent = new Agent({
|
|
148
|
+
name: "personalized-agent",
|
|
149
|
+
instructions: "You are a helpful assistant that remembers user preferences",
|
|
150
|
+
model: "openai:gpt-4o",
|
|
151
|
+
inputProcessors: [
|
|
152
|
+
new WorkingMemory({
|
|
153
|
+
storage,
|
|
154
|
+
scope: "resource",
|
|
155
|
+
template: {
|
|
156
|
+
format: "markdown",
|
|
157
|
+
content: `# User Information
|
|
158
|
+
- **Name**:
|
|
159
|
+
- **Location**:
|
|
160
|
+
- **Preferences**:
|
|
161
|
+
- **Communication Style**:
|
|
162
|
+
- **Current Projects**:
|
|
163
|
+
`,
|
|
164
|
+
},
|
|
165
|
+
}),
|
|
166
|
+
new MessageHistory({ storage, lastMessages: 50 }),
|
|
167
|
+
],
|
|
168
|
+
outputProcessors: [
|
|
169
|
+
new MessageHistory({ storage }),
|
|
170
|
+
],
|
|
171
|
+
});
|
|
172
|
+
```
|
|
173
|
+
|
|
174
|
+
## JSON format example
|
|
175
|
+
|
|
176
|
+
```typescript copy
|
|
177
|
+
import { WorkingMemory } from "@mastra/core/processors";
|
|
178
|
+
|
|
179
|
+
const processor = new WorkingMemory({
|
|
180
|
+
storage: memoryStorage,
|
|
181
|
+
scope: "resource",
|
|
182
|
+
template: {
|
|
183
|
+
format: "json",
|
|
184
|
+
content: JSON.stringify({
|
|
185
|
+
user: {
|
|
186
|
+
name: { type: "string" },
|
|
187
|
+
preferences: { type: "object" },
|
|
188
|
+
goals: { type: "array" },
|
|
189
|
+
},
|
|
190
|
+
}),
|
|
191
|
+
},
|
|
192
|
+
});
|
|
193
|
+
```
|
|
194
|
+
|
|
195
|
+
## Behavior
|
|
196
|
+
|
|
197
|
+
### Input processing
|
|
198
|
+
1. Retrieves `threadId` and `resourceId` from the request context
|
|
199
|
+
2. Based on scope, fetches working memory from either:
|
|
200
|
+
- Thread metadata (`scope: 'thread'`)
|
|
201
|
+
- Resource record (`scope: 'resource'`)
|
|
202
|
+
3. Resolves the template (from provider, options, or default)
|
|
203
|
+
4. Generates system instructions that include:
|
|
204
|
+
- Guidelines for the LLM on storing and updating information
|
|
205
|
+
- The template structure
|
|
206
|
+
- Current working memory data
|
|
207
|
+
5. Adds the instruction as a system message with `source: 'memory'` tag
|
|
208
|
+
|
|
209
|
+
### Working memory updates
|
|
210
|
+
Working memory updates happen through the `updateWorkingMemory` tool provided by the Memory class, not through this processor. The processor only handles injecting the current working memory state into conversations.
|
|
211
|
+
|
|
212
|
+
### Default template
|
|
213
|
+
If no template is provided, the processor uses a default markdown template with fields for:
|
|
214
|
+
- First Name, Last Name
|
|
215
|
+
- Location, Occupation
|
|
216
|
+
- Interests, Goals
|
|
217
|
+
- Events, Facts, Projects
|
|
218
|
+
|
|
219
|
+
## Related
|
|
220
|
+
|
|
221
|
+
- [Guardrails](/docs/v1/agents/guardrails)
|
|
@@ -13,9 +13,10 @@ The `embed` function generates a vector embedding for a single text input:
|
|
|
13
13
|
|
|
14
14
|
```typescript
|
|
15
15
|
import { embed } from "ai";
|
|
16
|
+
import { ModelRouterEmbeddingModel } from "@mastra/core/llm";
|
|
16
17
|
|
|
17
18
|
const result = await embed({
|
|
18
|
-
model:
|
|
19
|
+
model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
|
|
19
20
|
value: "Your text to embed",
|
|
20
21
|
maxRetries: 2, // optional, defaults to 2
|
|
21
22
|
});
|
|
@@ -80,7 +81,7 @@ For embedding multiple texts at once, use the `embedMany` function:
|
|
|
80
81
|
import { embedMany } from "ai";
|
|
81
82
|
|
|
82
83
|
const result = await embedMany({
|
|
83
|
-
model:
|
|
84
|
+
model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
|
|
84
85
|
values: ["First text", "Second text", "Third text"],
|
|
85
86
|
maxRetries: 2, // optional, defaults to 2
|
|
86
87
|
});
|
|
@@ -142,17 +143,16 @@ const result = await embedMany({
|
|
|
142
143
|
|
|
143
144
|
```typescript
|
|
144
145
|
import { embed, embedMany } from "ai";
|
|
145
|
-
import { openai } from "@ai-sdk/openai";
|
|
146
146
|
|
|
147
147
|
// Single embedding
|
|
148
148
|
const singleResult = await embed({
|
|
149
|
-
model:
|
|
149
|
+
model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
|
|
150
150
|
value: "What is the meaning of life?",
|
|
151
151
|
});
|
|
152
152
|
|
|
153
153
|
// Multiple embeddings
|
|
154
154
|
const multipleResult = await embedMany({
|
|
155
|
-
model:
|
|
155
|
+
model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
|
|
156
156
|
values: [
|
|
157
157
|
"First question about life",
|
|
158
158
|
"Second question about universe",
|
|
@@ -19,10 +19,9 @@ function rerank(
|
|
|
19
19
|
## Usage Example
|
|
20
20
|
|
|
21
21
|
```typescript
|
|
22
|
-
import { openai } from "@ai-sdk/openai";
|
|
23
22
|
import { rerank } from "@mastra/rag";
|
|
24
23
|
|
|
25
|
-
const model = openai
|
|
24
|
+
const model = "openai/gpt-5.1";
|
|
26
25
|
|
|
27
26
|
const rerankedResults = await rerank(
|
|
28
27
|
vectorSearchResults,
|