@mastra/mcp-docs-server 1.0.0-beta.5 → 1.0.0-beta.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (132) hide show
  1. package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +9 -9
  2. package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +67 -67
  3. package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +26 -26
  4. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +53 -53
  5. package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +26 -26
  6. package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +27 -27
  7. package/.docs/organized/changelogs/%40mastra%2Fconvex.md +29 -0
  8. package/.docs/organized/changelogs/%40mastra%2Fcore.md +274 -274
  9. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +15 -15
  10. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +12 -12
  11. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +65 -65
  12. package/.docs/organized/changelogs/%40mastra%2Fduckdb.md +42 -0
  13. package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +26 -26
  14. package/.docs/organized/changelogs/%40mastra%2Felasticsearch.md +52 -0
  15. package/.docs/organized/changelogs/%40mastra%2Fevals.md +12 -12
  16. package/.docs/organized/changelogs/%40mastra%2Flance.md +26 -26
  17. package/.docs/organized/changelogs/%40mastra%2Flibsql.md +24 -24
  18. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +9 -9
  19. package/.docs/organized/changelogs/%40mastra%2Fmcp.md +84 -84
  20. package/.docs/organized/changelogs/%40mastra%2Fmemory.md +36 -36
  21. package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +26 -26
  22. package/.docs/organized/changelogs/%40mastra%2Fmssql.md +27 -27
  23. package/.docs/organized/changelogs/%40mastra%2Fpg.md +28 -28
  24. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +47 -47
  25. package/.docs/organized/changelogs/%40mastra%2Frag.md +43 -43
  26. package/.docs/organized/changelogs/%40mastra%2Freact.md +9 -0
  27. package/.docs/organized/changelogs/%40mastra%2Fschema-compat.md +6 -0
  28. package/.docs/organized/changelogs/%40mastra%2Fserver.md +56 -56
  29. package/.docs/organized/changelogs/%40mastra%2Fupstash.md +26 -26
  30. package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +19 -19
  31. package/.docs/organized/changelogs/create-mastra.md +9 -9
  32. package/.docs/organized/changelogs/mastra.md +17 -17
  33. package/.docs/organized/code-examples/agui.md +1 -0
  34. package/.docs/organized/code-examples/ai-sdk-v5.md +1 -0
  35. package/.docs/organized/code-examples/mcp-server-adapters.md +721 -0
  36. package/.docs/organized/code-examples/server-app-access.md +342 -0
  37. package/.docs/raw/agents/agent-approval.mdx +189 -0
  38. package/.docs/raw/agents/guardrails.mdx +13 -9
  39. package/.docs/raw/agents/networks.mdx +1 -0
  40. package/.docs/raw/agents/overview.mdx +23 -58
  41. package/.docs/raw/agents/processors.mdx +279 -0
  42. package/.docs/raw/deployment/cloud-providers/index.mdx +19 -26
  43. package/.docs/raw/deployment/cloud-providers/netlify-deployer.mdx +44 -13
  44. package/.docs/raw/evals/running-in-ci.mdx +0 -2
  45. package/.docs/raw/{guides/getting-started → getting-started}/manual-install.mdx +2 -2
  46. package/.docs/raw/getting-started/start.mdx +1 -1
  47. package/.docs/raw/guides/build-your-ui/ai-sdk-ui.mdx +8 -0
  48. package/.docs/raw/guides/getting-started/quickstart.mdx +1 -1
  49. package/.docs/raw/guides/guide/whatsapp-chat-bot.mdx +421 -0
  50. package/.docs/raw/guides/index.mdx +3 -35
  51. package/.docs/raw/guides/migrations/upgrade-to-v1/agent.mdx +11 -0
  52. package/.docs/raw/guides/migrations/upgrade-to-v1/workflows.mdx +29 -0
  53. package/.docs/raw/index.mdx +1 -1
  54. package/.docs/raw/memory/memory-processors.mdx +265 -79
  55. package/.docs/raw/memory/working-memory.mdx +10 -2
  56. package/.docs/raw/observability/overview.mdx +0 -1
  57. package/.docs/raw/observability/tracing/bridges/otel.mdx +176 -0
  58. package/.docs/raw/observability/tracing/exporters/arize.mdx +17 -0
  59. package/.docs/raw/observability/tracing/exporters/braintrust.mdx +19 -0
  60. package/.docs/raw/observability/tracing/exporters/langfuse.mdx +20 -0
  61. package/.docs/raw/observability/tracing/exporters/langsmith.mdx +12 -0
  62. package/.docs/raw/observability/tracing/exporters/otel.mdx +5 -4
  63. package/.docs/raw/observability/tracing/overview.mdx +71 -6
  64. package/.docs/raw/observability/tracing/processors/sensitive-data-filter.mdx +0 -1
  65. package/.docs/raw/rag/retrieval.mdx +23 -6
  66. package/.docs/raw/rag/vector-databases.mdx +93 -2
  67. package/.docs/raw/reference/agents/generate.mdx +55 -6
  68. package/.docs/raw/reference/agents/network.mdx +44 -0
  69. package/.docs/raw/reference/client-js/memory.mdx +43 -0
  70. package/.docs/raw/reference/client-js/workflows.mdx +92 -63
  71. package/.docs/raw/reference/deployer/netlify.mdx +1 -2
  72. package/.docs/raw/reference/evals/scorer-utils.mdx +362 -0
  73. package/.docs/raw/reference/index.mdx +1 -0
  74. package/.docs/raw/reference/observability/tracing/bridges/otel.mdx +150 -0
  75. package/.docs/raw/reference/observability/tracing/configuration.mdx +0 -4
  76. package/.docs/raw/reference/observability/tracing/exporters/arize.mdx +4 -0
  77. package/.docs/raw/reference/observability/tracing/exporters/langsmith.mdx +17 -1
  78. package/.docs/raw/reference/observability/tracing/exporters/otel.mdx +6 -0
  79. package/.docs/raw/reference/observability/tracing/instances.mdx +0 -4
  80. package/.docs/raw/reference/observability/tracing/interfaces.mdx +29 -4
  81. package/.docs/raw/reference/observability/tracing/spans.mdx +0 -4
  82. package/.docs/raw/reference/processors/language-detector.mdx +9 -2
  83. package/.docs/raw/reference/processors/message-history-processor.mdx +131 -0
  84. package/.docs/raw/reference/processors/moderation-processor.mdx +10 -3
  85. package/.docs/raw/reference/processors/pii-detector.mdx +10 -3
  86. package/.docs/raw/reference/processors/processor-interface.mdx +502 -0
  87. package/.docs/raw/reference/processors/prompt-injection-detector.mdx +9 -2
  88. package/.docs/raw/reference/processors/semantic-recall-processor.mdx +197 -0
  89. package/.docs/raw/reference/processors/system-prompt-scrubber.mdx +2 -2
  90. package/.docs/raw/reference/processors/tool-call-filter.mdx +125 -0
  91. package/.docs/raw/reference/processors/working-memory-processor.mdx +221 -0
  92. package/.docs/raw/reference/storage/cloudflare-d1.mdx +37 -0
  93. package/.docs/raw/reference/storage/convex.mdx +164 -0
  94. package/.docs/raw/reference/storage/lance.mdx +33 -0
  95. package/.docs/raw/reference/storage/libsql.mdx +37 -0
  96. package/.docs/raw/reference/storage/mongodb.mdx +39 -0
  97. package/.docs/raw/reference/storage/mssql.mdx +37 -0
  98. package/.docs/raw/reference/storage/postgresql.mdx +37 -0
  99. package/.docs/raw/reference/streaming/ChunkType.mdx +1 -1
  100. package/.docs/raw/reference/streaming/agents/stream.mdx +56 -1
  101. package/.docs/raw/reference/streaming/workflows/observeStream.mdx +7 -9
  102. package/.docs/raw/reference/streaming/workflows/{resumeStreamVNext.mdx → resumeStream.mdx} +51 -11
  103. package/.docs/raw/reference/streaming/workflows/stream.mdx +83 -24
  104. package/.docs/raw/reference/tools/mcp-client.mdx +74 -17
  105. package/.docs/raw/reference/vectors/convex.mdx +429 -0
  106. package/.docs/raw/reference/vectors/duckdb.mdx +462 -0
  107. package/.docs/raw/reference/vectors/elasticsearch.mdx +310 -0
  108. package/.docs/raw/reference/voice/google.mdx +159 -20
  109. package/.docs/raw/reference/workflows/run-methods/restart.mdx +142 -0
  110. package/.docs/raw/reference/workflows/run-methods/resume.mdx +44 -0
  111. package/.docs/raw/reference/workflows/run-methods/start.mdx +44 -0
  112. package/.docs/raw/reference/workflows/run.mdx +13 -5
  113. package/.docs/raw/reference/workflows/step.mdx +13 -0
  114. package/.docs/raw/reference/workflows/workflow.mdx +19 -0
  115. package/.docs/raw/server-db/mastra-server.mdx +30 -1
  116. package/.docs/raw/server-db/request-context.mdx +0 -1
  117. package/.docs/raw/server-db/storage.mdx +11 -0
  118. package/.docs/raw/streaming/overview.mdx +6 -6
  119. package/.docs/raw/streaming/tool-streaming.mdx +2 -2
  120. package/.docs/raw/streaming/workflow-streaming.mdx +5 -11
  121. package/.docs/raw/workflows/error-handling.mdx +1 -0
  122. package/.docs/raw/workflows/human-in-the-loop.mdx +4 -4
  123. package/.docs/raw/workflows/overview.mdx +56 -44
  124. package/.docs/raw/workflows/snapshots.mdx +1 -0
  125. package/.docs/raw/workflows/suspend-and-resume.mdx +85 -16
  126. package/.docs/raw/workflows/time-travel.mdx +313 -0
  127. package/.docs/raw/workflows/workflow-state.mdx +191 -0
  128. package/CHANGELOG.md +8 -0
  129. package/package.json +4 -4
  130. package/.docs/raw/agents/human-in-the-loop-with-tools.mdx +0 -91
  131. package/.docs/raw/reference/streaming/workflows/observeStreamVNext.mdx +0 -47
  132. package/.docs/raw/reference/streaming/workflows/streamVNext.mdx +0 -153
@@ -5,132 +5,318 @@ description: "Learn how to use memory processors in Mastra to filter, trim, and
5
5
 
6
6
  # Memory Processors
7
7
 
8
- Memory Processors allow you to modify the list of messages retrieved from memory _before_ they are added to the agent's context window and sent to the LLM. This is useful for managing context size, filtering content, and optimizing performance.
8
+ Memory processors transform and filter messages as they pass through an agent with memory enabled. They manage context window limits, remove unnecessary content, and optimize the information sent to the language model.
9
9
 
10
- Processors operate on the messages retrieved based on your memory configuration (e.g., `lastMessages`, `semanticRecall`). They do **not** affect the new incoming user message.
10
+ When memory is enabled on an agent, Mastra adds memory processors to the agent's processor pipeline. These processors retrieve conversation history, working memory, and semantically relevant messages, then persist new messages after the model responds.
11
11
 
12
- ## Built-in Processors
12
+ Memory processors are [processors](/docs/v1/agents/processors) that operate specifically on memory-related messages and state.
13
13
 
14
- Mastra provides built-in processors:
14
+ ## Built-in Memory Processors
15
15
 
16
- ### `TokenLimiter`
16
+ Mastra automatically adds these processors when memory is enabled:
17
17
 
18
- This processor is used to prevent errors caused by exceeding the LLM's context window limit. It counts the tokens in the retrieved memory messages and removes the oldest messages until the total count is below the specified `limit`.
18
+ ### MessageHistory
19
19
 
20
- ```typescript copy showLineNumbers {9-12}
21
- import { Memory } from "@mastra/memory";
22
- import { TokenLimiter } from "@mastra/memory/processors";
20
+ Retrieves conversation history and persists new messages.
21
+
22
+ **When you configure:**
23
+
24
+ ```typescript
25
+ memory: new Memory({
26
+ lastMessages: 10,
27
+ });
28
+ ```
29
+
30
+ **Mastra internally:**
31
+
32
+ 1. Creates a `MessageHistory` processor with `limit: 10`
33
+ 2. Adds it to the agent's input processors (runs before the LLM)
34
+ 3. Adds it to the agent's output processors (runs after the LLM)
35
+
36
+ **What it does:**
37
+
38
+ - **Input**: Fetches the last 10 messages from storage and prepends them to the conversation
39
+ - **Output**: Persists new messages to storage after the model responds
40
+
41
+ **Example:**
42
+
43
+ ```typescript copy showLineNumbers
23
44
  import { Agent } from "@mastra/core/agent";
45
+ import { Memory } from "@mastra/memory";
46
+ import { LibSQLStore } from "@mastra/libsql";
47
+ import { openai } from "@ai-sdk/openai";
24
48
 
25
49
  const agent = new Agent({
26
50
  id: "test-agent",
27
51
  name: "Test Agent",
28
- model: "openai/gpt-5.1",
52
+ instructions: "You are a helpful assistant",
53
+ model: 'openai/gpt-4o',
29
54
  memory: new Memory({
30
- processors: [
31
- // Ensure the total tokens from memory don't exceed ~127k
32
- new TokenLimiter(127000),
33
- ],
55
+ storage: new LibSQLStore({
56
+ id: "memory-store",
57
+ url: "file:memory.db",
58
+ }),
59
+ lastMessages: 10, // MessageHistory processor automatically added
34
60
  }),
35
61
  });
36
62
  ```
37
63
 
38
- The `TokenLimiter` uses the `o200k_base` encoding by default (suitable for GPT-4o). You can specify other encodings if needed for different models:
64
+ ### SemanticRecall
39
65
 
40
- ```typescript copy showLineNumbers {6-9}
41
- // Import the encoding you need (e.g., for older OpenAI models)
42
- import cl100k_base from "js-tiktoken/ranks/cl100k_base";
66
+ Retrieves semantically relevant messages based on the current input and creates embeddings for new messages.
43
67
 
44
- const memoryForOlderModel = new Memory({
45
- processors: [
46
- new TokenLimiter({
47
- limit: 16000, // Example limit for a 16k context model
48
- encoding: cl100k_base,
49
- }),
50
- ],
68
+ **When you configure:**
69
+
70
+ ```typescript
71
+ memory: new Memory({
72
+ semanticRecall: { enabled: true },
73
+ vector: myVectorStore,
74
+ embedder: myEmbedder,
51
75
  });
52
76
  ```
53
77
 
54
- See the [OpenAI cookbook](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken#encodings) or [`js-tiktoken` repo](https://github.com/dqbd/tiktoken) for more on encodings.
78
+ **Mastra internally:**
79
+
80
+ 1. Creates a `SemanticRecall` processor
81
+ 2. Adds it to the agent's input processors (runs before the LLM)
82
+ 3. Adds it to the agent's output processors (runs after the LLM)
83
+ 4. Requires both a vector store and embedder to be configured
55
84
 
56
- ### `ToolCallFilter`
85
+ **What it does:**
57
86
 
58
- This processor removes tool calls from the memory messages sent to the LLM. It saves tokens by excluding potentially verbose tool interactions from the context, which is useful if the details aren't needed for future interactions. It's also useful if you always want your agent to call a specific tool again and not rely on previous tool results in memory.
87
+ - **Input**: Performs vector similarity search to find relevant past messages and prepends them to the conversation
88
+ - **Output**: Creates embeddings for new messages and stores them in the vector store for future retrieval
59
89
 
60
- ```typescript copy showLineNumbers {5-14}
90
+ **Example:**
91
+
92
+ ```typescript copy showLineNumbers
93
+ import { Agent } from "@mastra/core/agent";
61
94
  import { Memory } from "@mastra/memory";
62
- import { ToolCallFilter, TokenLimiter } from "@mastra/memory/processors";
95
+ import { LibSQLStore } from "@mastra/libsql";
96
+ import { PineconeVector } from "@mastra/pinecone";
97
+ import { OpenAIEmbedder } from "@mastra/openai";
98
+ import { openai } from "@ai-sdk/openai";
63
99
 
64
- const memoryFilteringTools = new Memory({
65
- processors: [
66
- // Example 1: Remove all tool calls/results
67
- new ToolCallFilter(),
100
+ const agent = new Agent({
101
+ name: "semantic-agent",
102
+ instructions: "You are a helpful assistant with semantic memory",
103
+ model: 'openai/gpt-4o',
104
+ memory: new Memory({
105
+ storage: new LibSQLStore({
106
+ id: "memory-store",
107
+ url: "file:memory.db",
108
+ }),
109
+ vector: new PineconeVector({
110
+ id: "memory-vector",
111
+ apiKey: process.env.PINECONE_API_KEY!,
112
+ environment: "us-east-1",
113
+ }),
114
+ embedder: new OpenAIEmbedder({
115
+ model: "text-embedding-3-small",
116
+ apiKey: process.env.OPENAI_API_KEY!,
117
+ }),
118
+ semanticRecall: { enabled: true }, // SemanticRecall processor automatically added
119
+ }),
120
+ });
121
+ ```
68
122
 
69
- // Example 2: Remove only noisy image generation tool calls/results
70
- new ToolCallFilter({ exclude: ["generateImageTool"] }),
123
+ ### WorkingMemory
71
124
 
72
- // Always place TokenLimiter last
73
- new TokenLimiter(127000),
74
- ],
125
+ Manages working memory state across conversations.
126
+
127
+ **When you configure:**
128
+
129
+ ```typescript
130
+ memory: new Memory({
131
+ workingMemory: { enabled: true },
75
132
  });
76
133
  ```
77
134
 
78
- ## Applying Multiple Processors
135
+ **Mastra internally:**
136
+
137
+ 1. Creates a `WorkingMemory` processor
138
+ 2. Adds it to the agent's input processors (runs before the LLM)
139
+ 3. Requires a storage adapter to be configured
79
140
 
80
- You can chain multiple processors. They execute in the order they appear in the `processors` array. The output of one processor becomes the input for the next.
141
+ **What it does:**
81
142
 
82
- **Order matters!** It's generally best practice to place `TokenLimiter` **last** in the chain. This ensures it operates on the final set of messages after other filtering has occurred, providing the most accurate token limit enforcement.
143
+ - **Input**: Retrieves working memory state for the current thread and prepends it to the conversation
144
+ - **Output**: No output processing
83
145
 
84
- ```typescript copy showLineNumbers {7-14}
146
+ **Example:**
147
+
148
+ ```typescript copy showLineNumbers
149
+ import { Agent } from "@mastra/core/agent";
85
150
  import { Memory } from "@mastra/memory";
86
- import { ToolCallFilter, TokenLimiter } from "@mastra/memory/processors";
87
- // Assume a hypothetical 'PIIFilter' custom processor exists
88
- // import { PIIFilter } from './custom-processors';
89
-
90
- const memoryWithMultipleProcessors = new Memory({
91
- processors: [
92
- // 1. Filter specific tool calls first
93
- new ToolCallFilter({ exclude: ["verboseDebugTool"] }),
94
- // 2. Apply custom filtering (e.g., remove hypothetical PII - use with caution)
95
- // new PIIFilter(),
96
- // 3. Apply token limiting as the final step
97
- new TokenLimiter(127000),
151
+ import { LibSQLStore } from "@mastra/libsql";
152
+ import { openai } from "@ai-sdk/openai";
153
+
154
+ const agent = new Agent({
155
+ name: "working-memory-agent",
156
+ instructions: "You are an assistant with working memory",
157
+ model: 'openai/gpt-4o',
158
+ memory: new Memory({
159
+ storage: new LibSQLStore({
160
+ id: "memory-store",
161
+ url: "file:memory.db",
162
+ }),
163
+ workingMemory: { enabled: true }, // WorkingMemory processor automatically added
164
+ }),
165
+ });
166
+ ```
167
+
168
+ ## Manual Control and Deduplication
169
+
170
+ If you manually add a memory processor to `inputProcessors` or `outputProcessors`, Mastra will **not** automatically add it. This gives you full control over processor ordering:
171
+
172
+ ```typescript copy showLineNumbers
173
+ import { Agent } from "@mastra/core/agent";
174
+ import { Memory } from "@mastra/memory";
175
+ import { MessageHistory } from "@mastra/memory/processors";
176
+ import { TokenLimiter } from "@mastra/core/processors";
177
+ import { LibSQLStore } from "@mastra/libsql";
178
+ import { openai } from "@ai-sdk/openai";
179
+
180
+ // Custom MessageHistory with different configuration
181
+ const customMessageHistory = new MessageHistory({
182
+ storage: new LibSQLStore({ id: "memory-store", url: "file:memory.db" }),
183
+ lastMessages: 20,
184
+ });
185
+
186
+ const agent = new Agent({
187
+ name: "custom-memory-agent",
188
+ instructions: "You are a helpful assistant",
189
+ model: 'openai/gpt-4o',
190
+ memory: new Memory({
191
+ storage: new LibSQLStore({ id: "memory-store", url: "file:memory.db" }),
192
+ lastMessages: 10, // This would normally add MessageHistory(10)
193
+ }),
194
+ inputProcessors: [
195
+ customMessageHistory, // Your custom one is used instead
196
+ new TokenLimiter({ limit: 4000 }), // Runs after your custom MessageHistory
98
197
  ],
99
198
  });
100
199
  ```
101
200
 
102
- ## Creating Custom Processors
201
+ ## Processor Execution Order
202
+
203
+ Understanding the execution order is important when combining guardrails with memory:
204
+
205
+ ### Input Processors
206
+
207
+ ```
208
+ [Memory Processors] → [Your inputProcessors]
209
+ ```
210
+
211
+ 1. **Memory processors run FIRST**: `WorkingMemory`, `MessageHistory`, `SemanticRecall`
212
+ 2. **Your input processors run AFTER**: guardrails, filters, validators
213
+
214
+ This means memory loads conversation history before your processors can validate or filter the input.
215
+
216
+ ### Output Processors
217
+
218
+ ```
219
+ [Your outputProcessors] → [Memory Processors]
220
+ ```
221
+
222
+ 1. **Your output processors run FIRST**: guardrails, filters, validators
223
+ 2. **Memory processors run AFTER**: `SemanticRecall` (embeddings), `MessageHistory` (persistence)
224
+
225
+ This ordering is designed to be **safe by default**: if your output guardrail calls `abort()`, the memory processors never run and **no messages are saved**.
103
226
 
104
- You can create custom logic by extending the base `MemoryProcessor` class.
227
+ ## Guardrails and Memory
105
228
 
106
- ```typescript copy showLineNumbers {5-20,24-27}
107
- import { Memory, MemoryProcessorOpts, MemoryProcessor } from "@mastra/memory";
108
- import { CoreMessage } from "@mastra/core/llm";
229
+ The default execution order provides safe guardrail behavior:
109
230
 
110
- class ConversationOnlyFilter extends MemoryProcessor {
111
- constructor() {
112
- // Provide a name for easier debugging if needed
113
- super({ name: "ConversationOnlyFilter" });
114
- }
231
+ ### Output guardrails (recommended)
115
232
 
116
- process(
117
- messages: CoreMessage[],
118
- _opts: MemoryProcessorOpts = {}, // Options passed during memory retrieval, rarely needed here
119
- ): CoreMessage[] {
120
- // Filter messages based on role
121
- return messages.filter(
122
- (msg) => msg.role === "user" || msg.role === "assistant",
233
+ Output guardrails run **before** memory processors save messages. If a guardrail aborts:
234
+
235
+ - The tripwire is triggered
236
+ - Memory processors are skipped
237
+ - **No messages are persisted to storage**
238
+
239
+ ```typescript copy showLineNumbers
240
+ import { Agent } from "@mastra/core/agent";
241
+ import { Memory } from "@mastra/memory";
242
+ import { openai } from "@ai-sdk/openai";
243
+
244
+ // Output guardrail that blocks inappropriate content
245
+ const contentBlocker = {
246
+ id: "content-blocker",
247
+ processOutputResult: async ({ messages, abort }) => {
248
+ const hasInappropriateContent = messages.some((msg) =>
249
+ containsBadContent(msg)
123
250
  );
124
- }
251
+ if (hasInappropriateContent) {
252
+ abort("Content blocked by guardrail");
253
+ }
254
+ return messages;
255
+ },
256
+ };
257
+
258
+ const agent = new Agent({
259
+ name: "safe-agent",
260
+ instructions: "You are a helpful assistant",
261
+ model: 'openai/gpt-4o',
262
+ memory: new Memory({ lastMessages: 10 }),
263
+ // Your guardrail runs BEFORE memory saves
264
+ outputProcessors: [contentBlocker],
265
+ });
266
+
267
+ // If the guardrail aborts, nothing is saved to memory
268
+ const result = await agent.generate("Hello");
269
+ if (result.tripwire) {
270
+ console.log("Blocked:", result.tripwireReason);
271
+ // Memory is empty - no messages were persisted
125
272
  }
273
+ ```
126
274
 
127
- // Use the custom processor
128
- const memoryWithCustomFilter = new Memory({
129
- processors: [
130
- new ConversationOnlyFilter(),
131
- new TokenLimiter(127000), // Still apply token limiting
132
- ],
275
+ ### Input guardrails
276
+
277
+ Input guardrails run **after** memory processors load history. If a guardrail aborts:
278
+
279
+ - The tripwire is triggered
280
+ - The LLM is never called
281
+ - Output processors (including memory persistence) are skipped
282
+ - **No messages are persisted to storage**
283
+
284
+ ```typescript copy showLineNumbers
285
+ // Input guardrail that validates user input
286
+ const inputValidator = {
287
+ id: "input-validator",
288
+ processInput: async ({ messages, abort }) => {
289
+ const lastUserMessage = messages.findLast((m) => m.role === "user");
290
+ if (isInvalidInput(lastUserMessage)) {
291
+ abort("Invalid input detected");
292
+ }
293
+ return messages;
294
+ },
295
+ };
296
+
297
+ const agent = new Agent({
298
+ name: "validated-agent",
299
+ instructions: "You are a helpful assistant",
300
+ model: 'openai/gpt-4o',
301
+ memory: new Memory({ lastMessages: 10 }),
302
+ // Your guardrail runs AFTER memory loads history
303
+ inputProcessors: [inputValidator],
133
304
  });
134
305
  ```
135
306
 
307
+ ### Summary
308
+
309
+ | Guardrail Type | When it runs | If it aborts |
310
+ | -------------- | ------------ | ------------ |
311
+ | Input | After memory loads history | LLM not called, nothing saved |
312
+ | Output | Before memory saves | Nothing saved to storage |
313
+
314
+ Both scenarios are safe - guardrails prevent inappropriate content from being persisted to memory
315
+
316
+ ## Related documentation
317
+
318
+ - [Processors](/docs/v1/agents/processors) - General processor concepts and custom processor creation
319
+ - [Guardrails](/docs/v1/agents/guardrails) - Security and validation processors
320
+ - [Memory Overview](/docs/v1/memory/overview) - Memory types and configuration
321
+
136
322
  When creating custom processors avoid mutating the input `messages` array or its objects directly.
@@ -267,10 +267,18 @@ When a schema is provided, the agent receives the working memory as a JSON objec
267
267
  }
268
268
  ```
269
269
 
270
+ ### Merge Semantics for Schema-Based Memory
271
+
272
+ Schema-based working memory uses **merge semantics**, meaning the agent only needs to include fields it wants to add or update. Existing fields are preserved automatically.
273
+
274
+ - **Object fields are deep merged:** Only provided fields are updated; others remain unchanged
275
+ - **Set a field to `null` to delete it:** This explicitly removes the field from memory
276
+ - **Arrays are replaced entirely:** When an array field is provided, it replaces the existing array (arrays are not merged element-by-element)
277
+
270
278
  ## Choosing Between Template and Schema
271
279
 
272
- - Use a **template** (Markdown) if you want the agent to maintain memory as a free-form text block, such as a user profile or scratchpad.
273
- - Use a **schema** if you need structured, type-safe data that can be validated and programmatically accessed as JSON.
280
+ - Use a **template** (Markdown) if you want the agent to maintain memory as a free-form text block, such as a user profile or scratchpad. Templates use **replace semantics** — the agent must provide the complete memory content on each update.
281
+ - Use a **schema** if you need structured, type-safe data that can be validated and programmatically accessed as JSON. Schemas use **merge semantics** — the agent only provides fields to update, and existing fields are preserved.
274
282
  - Only one mode can be active at a time: setting both `template` and `schema` is not supported.
275
283
 
276
284
  ## Example: Multi-step Retention
@@ -49,5 +49,4 @@ We also support various external tracing providers like MLflow, Langfuse, Braint
49
49
 
50
50
  - **[Set up Tracing](/docs/v1/observability/tracing/overview)**: Configure tracing for your application
51
51
  - **[Configure Logging](/docs/v1/observability/logging)**: Add structured logging
52
- - **[View Examples](/examples/v1/observability/basic-ai-tracing)**: See observability in action
53
52
  - **[API Reference](/reference/v1/observability/tracing/instances)**: Detailed configuration options
@@ -0,0 +1,176 @@
1
+ ---
2
+ title: "OpenTelemetry Bridge | Tracing | Observability"
3
+ description: "Integrate Mastra tracing with existing OpenTelemetry infrastructure"
4
+ ---
5
+
6
+ # OpenTelemetry Bridge
7
+
8
+ :::warning
9
+
10
+ The OpenTelemetry Bridge is currently **experimental**. APIs and configuration options may change in future releases.
11
+
12
+ :::
13
+
14
+ The OpenTelemetry (OTEL) Bridge enables bidirectional integration between Mastra's tracing system and existing OpenTelemetry infrastructure. Unlike exporters that send trace data to external platforms, the bridge creates native OTEL spans that participate in your distributed tracing context.
15
+
16
+ :::info Looking to send traces without existing OTEL infrastructure?
17
+
18
+ If you don't have existing OpenTelemetry instrumentation, the [OpenTelemetry Exporter](/docs/v1/observability/tracing/exporters/otel) may be simpler — it sends traces directly without requiring an OTEL SDK setup.
19
+
20
+ :::
21
+
22
+ ## When to Use the Bridge
23
+
24
+ Use the OtelBridge when you:
25
+
26
+ - Have existing OTEL instrumentation in your application (HTTP servers, database clients, etc.)
27
+ - Want Mastra operations to appear as child spans of your existing OTEL traces
28
+ - Need OTEL-instrumented code inside Mastra tools to maintain proper parent-child relationships
29
+ - Are building a distributed system where trace context must propagate across services
30
+
31
+ ## How It Works
32
+
33
+ The OtelBridge provides two-way integration:
34
+
35
+ **From OTEL to Mastra:**
36
+ - Reads from OTEL ambient context (AsyncLocalStorage) automatically
37
+ - Inherits trace ID and parent span ID from active OTEL spans
38
+ - Respects OTEL sampling decisions — if a trace is not sampled, Mastra won't create spans for it
39
+ - No manual trace ID passing required when OTEL auto-instrumentation is active
40
+
41
+ **From Mastra to OTEL:**
42
+ - Creates native OTEL spans for Mastra operations (agents, LLM calls, tools, workflows)
43
+ - Maintains proper parent-child relationships in distributed traces
44
+ - Allows OTEL-instrumented code (HTTP clients, database calls) within Mastra operations to nest correctly
45
+
46
+ ## Installation
47
+
48
+ ```bash npm2yarn
49
+ npm install @mastra/otel-bridge
50
+ ```
51
+
52
+ The bridge works with your existing OpenTelemetry setup. Depending on your configuration, you may also need some of these packages:
53
+
54
+ - `@opentelemetry/sdk-node` - Core Node.js SDK for OTEL
55
+ - `@opentelemetry/auto-instrumentations-node` - Auto-instrumentation for common libraries
56
+ - `@opentelemetry/exporter-trace-otlp-proto` - OTLP exporter (Protobuf over HTTP)
57
+ - `@opentelemetry/exporter-trace-otlp-http` - OTLP exporter (JSON over HTTP)
58
+ - `@opentelemetry/exporter-trace-otlp-grpc` - OTLP exporter (gRPC)
59
+ - `@opentelemetry/sdk-trace-base` - Base tracing SDK (for BatchSpanProcessor, etc.)
60
+ - `@opentelemetry/core` - Core utilities (for W3CTraceContextPropagator, etc.)
61
+
62
+ ## Configuration
63
+
64
+ Using the OtelBridge requires two steps:
65
+
66
+ 1. Configure OpenTelemetry instrumentation in your application
67
+ 2. Add the OtelBridge to your Mastra observability config
68
+
69
+ ### Step 1: OpenTelemetry Instrumentation
70
+
71
+ Create an instrumentation file that initializes OTEL. This must run before your application code:
72
+
73
+ ```typescript title="instrumentation.ts" showLineNumbers copy
74
+ import { NodeSDK } from "@opentelemetry/sdk-node";
75
+ import { getNodeAutoInstrumentations } from "@opentelemetry/auto-instrumentations-node";
76
+ import { OTLPTraceExporter } from "@opentelemetry/exporter-trace-otlp-proto";
77
+ import { BatchSpanProcessor } from "@opentelemetry/sdk-trace-base";
78
+ import { W3CTraceContextPropagator } from "@opentelemetry/core";
79
+
80
+ const sdk = new NodeSDK({
81
+ serviceName: "my-service",
82
+ spanProcessors: [
83
+ new BatchSpanProcessor(
84
+ new OTLPTraceExporter({
85
+ url: process.env.OTEL_EXPORTER_OTLP_ENDPOINT || "http://localhost:4318/v1/traces",
86
+ })
87
+ ),
88
+ ],
89
+ instrumentations: [getNodeAutoInstrumentations()],
90
+ textMapPropagator: new W3CTraceContextPropagator(),
91
+ });
92
+
93
+ sdk.start();
94
+
95
+ export { sdk };
96
+ ```
97
+
98
+ ### Step 2: Mastra Configuration
99
+
100
+ Add the OtelBridge to your Mastra observability config:
101
+
102
+ ```typescript title="src/mastra/index.ts" showLineNumbers copy
103
+ import { Mastra } from "@mastra/core";
104
+ import { Observability } from "@mastra/observability";
105
+ import { OtelBridge } from "@mastra/otel-bridge";
106
+
107
+ export const mastra = new Mastra({
108
+ observability: new Observability({
109
+ configs: {
110
+ default: {
111
+ serviceName: "my-service",
112
+ bridge: new OtelBridge(),
113
+ },
114
+ },
115
+ }),
116
+ agents: {
117
+ /* your agents */
118
+ },
119
+ });
120
+ ```
121
+
122
+ No Mastra exporters are required when using the bridge — traces are sent via your OTEL SDK configuration. You can optionally add Mastra exporters if you want to send traces to additional destinations.
123
+
124
+ ### Running Your Application
125
+
126
+ Use the `--import` flag to ensure instrumentation loads before your application:
127
+
128
+ ```bash
129
+ tsx --import ./instrumentation.ts ./src/index.ts
130
+ ```
131
+
132
+ ## Trace Hierarchy
133
+
134
+ With the OtelBridge, your traces maintain proper hierarchy across OTEL and Mastra boundaries:
135
+
136
+ ```
137
+ HTTP POST /api/chat (from Hono middleware)
138
+ └── agent.assistant (from Mastra via OtelBridge)
139
+ ├── chat gpt-4o (LLM call)
140
+ ├── tool.execute search (tool execution)
141
+ │ └── HTTP GET api.example.com (from OTEL auto-instrumentation)
142
+ └── chat gpt-4o (follow-up LLM call)
143
+ ```
144
+
145
+ ## Multi-Service Distributed Tracing
146
+
147
+ The OtelBridge enables trace propagation across service boundaries. When Service A calls Service B via HTTP, trace context propagates automatically:
148
+
149
+ ```
150
+ Service A: HTTP POST /api/process
151
+ └── HTTP POST service-b/api/analyze (outgoing call)
152
+
153
+ Service B: HTTP POST /api/analyze (incoming call - same trace!)
154
+ └── agent.analyzer (Mastra agent inherits trace context)
155
+ └── chat gpt-4o
156
+ ```
157
+
158
+ Both services must have:
159
+ 1. OTEL instrumentation configured
160
+ 2. W3C Trace Context propagator enabled
161
+ 3. Mastra with OtelBridge configured
162
+
163
+ ## Troubleshooting
164
+
165
+ If traces aren't appearing or connecting as expected:
166
+
167
+ - Verify OTEL SDK is initialized before Mastra (use `--import` flag or import at top of entry point)
168
+ - Ensure the OtelBridge is added to your observability config
169
+ - Check that your OTEL backend is running and accessible
170
+
171
+ ## Related
172
+
173
+ - [Tracing Overview](/docs/v1/observability/tracing/overview)
174
+ - [OpenTelemetry Exporter](/docs/v1/observability/tracing/exporters/otel) - For sending traces to OTEL backends
175
+ - [OtelBridge Reference](/reference/v1/observability/tracing/bridges/otel) - API documentation
176
+ - [OpenTelemetry GenAI Conventions](https://opentelemetry.io/docs/specs/semconv/gen-ai/)
@@ -182,6 +182,23 @@ new ArizeExporter({
182
182
  });
183
183
  ```
184
184
 
185
+ ### Custom metadata
186
+
187
+ Non-reserved span attributes are serialized into the OpenInference `metadata` payload and surface in Arize/Phoenix. You can add them via `tracingOptions.metadata`:
188
+
189
+ ```ts
190
+ await agent.generate(input, {
191
+ tracingOptions: {
192
+ metadata: {
193
+ companyId: "acme-co",
194
+ tier: "enterprise",
195
+ },
196
+ },
197
+ });
198
+ ```
199
+
200
+ Reserved fields such as `input`, `output`, `sessionId`, thread/user IDs, and OpenInference IDs are excluded automatically.
201
+
185
202
  ## OpenInference Semantic Conventions
186
203
 
187
204
  This exporter implements the [OpenInference Semantic Conventions](https://github.com/Arize-ai/openinference/tree/main/spec) for generative AI applications, providing standardized trace structure across different observability platforms.
@@ -65,6 +65,25 @@ new BraintrustExporter({
65
65
  });
66
66
  ```
67
67
 
68
+ ## Using Tags
69
+
70
+ Tags help you categorize and filter traces in the Braintrust dashboard. Add tags when executing agents or workflows:
71
+
72
+ ```typescript
73
+ const result = await agent.generate({
74
+ messages: [{ role: "user", content: "Hello" }],
75
+ tracingOptions: {
76
+ tags: ["production", "experiment-v2", "user-request"],
77
+ },
78
+ });
79
+ ```
80
+
81
+ Tags appear in Braintrust's trace view and can be used to filter and search traces. Common use cases include:
82
+
83
+ - Environment labels: `"production"`, `"staging"`
84
+ - Experiment tracking: `"experiment-v1"`, `"control-group"`
85
+ - Priority levels: `"priority-high"`, `"batch-job"`
86
+
68
87
  ## Related
69
88
 
70
89
  - [Tracing Overview](/docs/v1/observability/tracing/overview)