@mastra/mcp-docs-server 0.0.3 → 0.0.4-alpha.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (127) hide show
  1. package/.docs/organized/changelogs/%40mastra%2Fastra.md +27 -27
  2. package/.docs/organized/changelogs/%40mastra%2Fchroma.md +27 -27
  3. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +29 -29
  4. package/.docs/organized/changelogs/%40mastra%2Fcomposio.md +26 -26
  5. package/.docs/organized/changelogs/%40mastra%2Fcore.md +23 -23
  6. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +36 -36
  7. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +35 -35
  8. package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +35 -35
  9. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +32 -32
  10. package/.docs/organized/changelogs/%40mastra%2Fevals.md +27 -27
  11. package/.docs/organized/changelogs/%40mastra%2Ffirecrawl.md +29 -29
  12. package/.docs/organized/changelogs/%40mastra%2Fgithub.md +26 -26
  13. package/.docs/organized/changelogs/%40mastra%2Floggers.md +26 -26
  14. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +26 -0
  15. package/.docs/organized/changelogs/%40mastra%2Fmcp.md +27 -27
  16. package/.docs/organized/changelogs/%40mastra%2Fmemory.md +26 -26
  17. package/.docs/organized/changelogs/%40mastra%2Fpg.md +26 -26
  18. package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +26 -26
  19. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +35 -35
  20. package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +27 -27
  21. package/.docs/organized/changelogs/%40mastra%2Frag.md +26 -26
  22. package/.docs/organized/changelogs/%40mastra%2Fragie.md +26 -26
  23. package/.docs/organized/changelogs/%40mastra%2Fspeech-azure.md +26 -26
  24. package/.docs/organized/changelogs/%40mastra%2Fspeech-deepgram.md +26 -26
  25. package/.docs/organized/changelogs/%40mastra%2Fspeech-elevenlabs.md +26 -26
  26. package/.docs/organized/changelogs/%40mastra%2Fspeech-google.md +26 -26
  27. package/.docs/organized/changelogs/%40mastra%2Fspeech-ibm.md +26 -26
  28. package/.docs/organized/changelogs/%40mastra%2Fspeech-murf.md +26 -26
  29. package/.docs/organized/changelogs/%40mastra%2Fspeech-openai.md +26 -26
  30. package/.docs/organized/changelogs/%40mastra%2Fspeech-playai.md +26 -26
  31. package/.docs/organized/changelogs/%40mastra%2Fspeech-replicate.md +26 -26
  32. package/.docs/organized/changelogs/%40mastra%2Fspeech-speechify.md +26 -26
  33. package/.docs/organized/changelogs/%40mastra%2Fstabilityai.md +26 -26
  34. package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +25 -0
  35. package/.docs/organized/changelogs/%40mastra%2Fupstash.md +31 -31
  36. package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +30 -30
  37. package/.docs/organized/changelogs/%40mastra%2Fvoice-azure.md +9 -0
  38. package/.docs/organized/changelogs/%40mastra%2Fvoice-cloudflare.md +9 -0
  39. package/.docs/organized/changelogs/%40mastra%2Fvoice-deepgram.md +26 -26
  40. package/.docs/organized/changelogs/%40mastra%2Fvoice-elevenlabs.md +26 -26
  41. package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +26 -26
  42. package/.docs/organized/changelogs/%40mastra%2Fvoice-murf.md +26 -26
  43. package/.docs/organized/changelogs/%40mastra%2Fvoice-openai-realtime.md +25 -0
  44. package/.docs/organized/changelogs/%40mastra%2Fvoice-openai.md +26 -26
  45. package/.docs/organized/changelogs/%40mastra%2Fvoice-playai.md +26 -26
  46. package/.docs/organized/changelogs/%40mastra%2Fvoice-sarvam.md +27 -0
  47. package/.docs/organized/changelogs/%40mastra%2Fvoice-speechify.md +26 -26
  48. package/.docs/organized/changelogs/create-mastra.md +22 -22
  49. package/.docs/organized/changelogs/mastra.md +47 -47
  50. package/.docs/organized/code-examples/ai-sdk-useChat.md +2 -1
  51. package/.docs/raw/agents/02-adding-tools.mdx +6 -0
  52. package/.docs/raw/agents/02a-mcp-guide.mdx +192 -0
  53. package/.docs/raw/agents/03-adding-voice.mdx +8 -8
  54. package/.docs/raw/deployment/deployment.mdx +5 -42
  55. package/.docs/raw/deployment/server.mdx +45 -3
  56. package/.docs/raw/evals/00-overview.mdx +2 -2
  57. package/.docs/raw/evals/03-running-in-ci.mdx +7 -4
  58. package/.docs/raw/getting-started/mcp-docs-server.mdx +5 -2
  59. package/.docs/raw/guides/04-research-assistant.mdx +273 -0
  60. package/.docs/raw/local-dev/mastra-dev.mdx +2 -2
  61. package/.docs/raw/observability/logging.mdx +38 -0
  62. package/.docs/raw/observability/nextjs-tracing.mdx +102 -0
  63. package/.docs/raw/observability/tracing.mdx +110 -0
  64. package/.docs/raw/rag/overview.mdx +3 -3
  65. package/.docs/raw/rag/retrieval.mdx +7 -4
  66. package/.docs/raw/rag/vector-databases.mdx +107 -40
  67. package/.docs/raw/reference/client-js/memory.mdx +6 -3
  68. package/.docs/raw/reference/client-js/workflows.mdx +1 -0
  69. package/.docs/raw/reference/observability/providers/langsmith.mdx +2 -0
  70. package/.docs/raw/reference/rag/libsql.mdx +3 -3
  71. package/.docs/raw/reference/rag/upstash.mdx +50 -1
  72. package/.docs/raw/reference/rag/vectorize.mdx +48 -3
  73. package/.docs/raw/reference/tools/client.mdx +10 -2
  74. package/.docs/raw/reference/tools/vector-query-tool.mdx +1 -1
  75. package/.docs/raw/reference/voice/sarvam.mdx +260 -0
  76. package/.docs/raw/reference/workflows/afterEvent.mdx +76 -0
  77. package/.docs/raw/reference/workflows/events.mdx +305 -0
  78. package/.docs/raw/reference/workflows/resumeWithEvent.mdx +134 -0
  79. package/.docs/raw/reference/workflows/snapshots.mdx +204 -0
  80. package/.docs/raw/reference/workflows/step-retries.mdx +203 -0
  81. package/.docs/raw/voice/overview.mdx +135 -0
  82. package/.docs/raw/voice/speech-to-text.mdx +45 -0
  83. package/.docs/raw/voice/text-to-speech.mdx +52 -0
  84. package/.docs/raw/voice/voice-to-voice.mdx +310 -0
  85. package/.docs/raw/workflows/dynamic-workflows.mdx +4 -0
  86. package/.docs/raw/workflows/error-handling.mdx +183 -0
  87. package/.docs/raw/workflows/steps.mdx +12 -2
  88. package/.docs/raw/workflows/suspend-and-resume.mdx +207 -2
  89. package/.docs/raw/workflows/variables.mdx +23 -3
  90. package/dist/_tsup-dts-rollup.d.ts +83 -0
  91. package/dist/chunk-YEOOTUPA.js +191 -0
  92. package/dist/prepare-docs/prepare.d.ts +1 -1
  93. package/dist/prepare-docs/prepare.js +1 -13
  94. package/dist/stdio.d.ts +0 -1
  95. package/dist/stdio.js +352 -5
  96. package/package.json +9 -15
  97. package/.docs/raw/deployment/logging-and-tracing.mdx +0 -242
  98. package/dist/index.d.ts +0 -3
  99. package/dist/index.js +0 -19
  100. package/dist/prepare-docs/code-examples.d.ts +0 -4
  101. package/dist/prepare-docs/code-examples.js +0 -91
  102. package/dist/prepare-docs/copy-raw.d.ts +0 -1
  103. package/dist/prepare-docs/copy-raw.js +0 -41
  104. package/dist/prepare-docs/index.d.ts +0 -1
  105. package/dist/prepare-docs/index.js +0 -8
  106. package/dist/prepare-docs/package-changes.d.ts +0 -4
  107. package/dist/prepare-docs/package-changes.js +0 -92
  108. package/dist/sse.d.ts +0 -1
  109. package/dist/sse.js +0 -9
  110. package/dist/tools/__tests__/blog.test.d.ts +0 -1
  111. package/dist/tools/__tests__/blog.test.js +0 -48
  112. package/dist/tools/__tests__/changes.test.d.ts +0 -1
  113. package/dist/tools/__tests__/changes.test.js +0 -37
  114. package/dist/tools/__tests__/docs.test.d.ts +0 -1
  115. package/dist/tools/__tests__/docs.test.js +0 -46
  116. package/dist/tools/__tests__/examples.test.d.ts +0 -1
  117. package/dist/tools/__tests__/examples.test.js +0 -53
  118. package/dist/tools/blog.d.ts +0 -15
  119. package/dist/tools/blog.js +0 -73
  120. package/dist/tools/changes.d.ts +0 -11
  121. package/dist/tools/changes.js +0 -69
  122. package/dist/tools/docs.d.ts +0 -11
  123. package/dist/tools/docs.js +0 -176
  124. package/dist/tools/examples.d.ts +0 -11
  125. package/dist/tools/examples.js +0 -61
  126. package/dist/utils.d.ts +0 -6
  127. package/dist/utils.js +0 -9
@@ -0,0 +1,273 @@
1
+ ---
2
+ title: "Building a Research Paper Assistant | Mastra RAG Guides"
3
+ description: Guide on creating an AI research assistant that can analyze and answer questions about academic papers using RAG.
4
+ ---
5
+
6
+ import { Steps } from "nextra/components";
7
+
8
+ # Building a Research Paper Assistant with RAG
9
+
10
+ In this guide, we'll create an AI research assistant that can analyze academic papers and answer specific questions about their content using Retrieval Augmented Generation (RAG).
11
+
12
+ We'll use the foundational Transformer paper [Attention Is All You Need](https://arxiv.org/html/1706.03762) as our example.
13
+
14
+ ## Understanding RAG Components
15
+
16
+ Let's understand how RAG works and how we'll implement each component:
17
+
18
+ 1. Knowledge Store/Index
19
+ - Converting text into vector representations
20
+ - Creating numerical representations of content
21
+ - Implementation: We'll use OpenAI's text-embedding-3-small to create embeddings and store them in PgVector
22
+
23
+ 2. Retriever
24
+ - Finding relevant content via similarity search
25
+ - Matching query embeddings with stored vectors
26
+ - Implementation: We'll use PgVector to perform similarity searches on our stored embeddings
27
+
28
+ 3. Generator
29
+ - Processing retrieved content with an LLM
30
+ - Creating contextually informed responses
31
+ - Implementation: We'll use GPT-4o-mini to generate answers based on retrieved content
32
+
33
+ Our implementation will:
34
+ 1. Process the Transformer paper into embeddings
35
+ 2. Store them in PgVector for quick retrieval
36
+ 3. Use similarity search to find relevant sections
37
+ 4. Generate accurate responses using retrieved context
38
+
39
+ ## Project Structure
40
+
41
+ ```
42
+ research-assistant/
43
+ ├── src/
44
+ │ ├── agents/
45
+ │ │ └── researchAgent.ts
46
+ │ └── index.ts
47
+ ├── package.json
48
+ └── .env
49
+ ```
50
+
51
+ <Steps>
52
+ ### Initialize Project and Install Dependencies
53
+
54
+ First, create a new directory for your project and navigate into it:
55
+
56
+ ```bash
57
+ mkdir research-assistant
58
+ cd research-assistant
59
+ ```
60
+
61
+ Initialize a new Node.js project and install the required dependencies:
62
+
63
+ ```bash
64
+ npm init -y
65
+ npm install @mastra/core @mastra/rag @mastra/pg @ai-sdk/openai zod
66
+ ```
67
+
68
+ Set up environment variables for API access and database connection:
69
+
70
+ ```bash filename=".env" copy
71
+ OPENAI_API_KEY=your_openai_api_key
72
+ POSTGRES_CONNECTION_STRING=your_connection_string
73
+ ```
74
+
75
+ Create the necessary files for our project:
76
+
77
+ ```bash
78
+ mkdir -p src/agents
79
+ touch src/agents/researchAgent.ts src/index.ts
80
+ ```
81
+
82
+ ### Create the Research Assistant Agent
83
+
84
+ Now we'll create our RAG-enabled research assistant. The agent uses:
85
+ - A [Vector Query Tool](/docs/reference/tools/vector-query-tool) for performing semantic search over our vector store to find relevant content in our papers.
86
+ - GPT-4o-mini for understanding queries and generating responses
87
+ - Custom instructions that guide the agent on how to analyze papers, use retrieved content effectively, and acknowledge limitations
88
+
89
+ ```typescript copy showLineNumbers filename="src/agents/researchAgent.ts"
90
+ import { Agent } from '@mastra/core/agent';
91
+ import { openai } from '@ai-sdk/openai';
92
+ import { createVectorQueryTool } from '@mastra/rag';
93
+
94
+ // Create a tool for semantic search over our paper embeddings
95
+ const vectorQueryTool = createVectorQueryTool({
96
+ vectorStoreName: 'pgVector',
97
+ indexName: 'papers',
98
+ model: openai.embedding('text-embedding-3-small'),
99
+ });
100
+
101
+ export const researchAgent = new Agent({
102
+ name: 'Research Assistant',
103
+ instructions:
104
+ `You are a helpful research assistant that analyzes academic papers and technical documents.
105
+ Use the provided vector query tool to find relevant information from your knowledge base,
106
+ and provide accurate, well-supported answers based on the retrieved content.
107
+ Focus on the specific content available in the tool and acknowledge if you cannot find sufficient information to answer a question.
108
+ Base your responses only on the content provided, not on general knowledge.`,
109
+ model: openai('gpt-4o-mini'),
110
+ tools: {
111
+ vectorQueryTool,
112
+ },
113
+ });
114
+ ```
115
+
116
+ ### Set Up the Mastra Instance and Vector Store
117
+
118
+ ```typescript copy showLineNumbers filename="src/index.ts"
119
+ import { MDocument } from '@mastra/rag';
120
+ import { Mastra } from '@mastra/core';
121
+ import { PgVector } from '@mastra/pg';
122
+ import { embedMany } from 'ai';
123
+
124
+ import { researchAgent } from './agents/researchAgent';
125
+
126
+ const pgVector = new PgVector(process.env.POSTGRES_CONNECTION_STRING!);
127
+ export const mastra = new Mastra({
128
+ agents: { researchAgent },
129
+ vectors: { pgVector },
130
+ });
131
+ ```
132
+
133
+ ### Load and Process the Paper
134
+
135
+ This step handles the initial document processing. We:
136
+ 1. Fetch the research paper from its URL
137
+ 2. Convert it into a document object
138
+ 3. Split it into smaller, manageable chunks for better processing
139
+
140
+ ```typescript copy showLineNumbers{14} filename="src/index.ts"
141
+ // Load the paper
142
+ const paperUrl = "https://arxiv.org/html/1706.03762";
143
+ const response = await fetch(paperUrl);
144
+ const paperText = await response.text();
145
+
146
+ // Create document and chunk it
147
+ const doc = MDocument.fromText(paperText);
148
+ const chunks = await doc.chunk({
149
+ strategy: 'recursive',
150
+ size: 512,
151
+ overlap: 50,
152
+ separator: '\n',
153
+ });
154
+ ```
155
+
156
+ ### Create and Store Embeddings
157
+
158
+ Finally, we'll prepare our content for RAG by:
159
+ 1. Generating embeddings for each chunk of text
160
+ 2. Creating a vector store index to hold our embeddings
161
+ 3. Storing both the embeddings and metadata (original text and source information) in our vector database
162
+
163
+ > **Note**: This metadata is crucial as it allows us to return the actual content when the vector store finds relevant matches.
164
+
165
+ This allows our agent to efficiently search and retrieve relevant information.
166
+
167
+ ```typescript copy showLineNumbers{28} filename="src/index.ts"
168
+ // Generate embeddings
169
+ const { embeddings } = await embedMany({
170
+ model: openai.embedding('text-embedding-3-small'),
171
+ values: chunks.map(chunk => chunk.text),
172
+ });
173
+
174
+ // Get the vector store instance from Mastra
175
+ const vectorStore = mastra.getVector('pgVector');
176
+
177
+ // Create an index for our paper chunks
178
+ await vectorStore.createIndex({
179
+ indexName: 'papers',
180
+ dimension: 1536,
181
+ });
182
+
183
+ // Store embeddings
184
+ await vectorStore.upsert({
185
+ indexName: 'papers',
186
+ vectors: embeddings,
187
+ metadata: chunks.map(chunk => ({
188
+ text: chunk.text,
189
+ source: 'transformer-paper'
190
+ })),
191
+ });
192
+ ```
193
+
194
+ This will:
195
+ 1. Load the paper from the URL
196
+ 2. Split it into manageable chunks
197
+ 3. Generate embeddings for each chunk
198
+ 4. Store both the embeddings and text in our vector database
199
+
200
+ ### Test the Assistant
201
+
202
+ Let's test our research assistant with different types of queries:
203
+
204
+ ```typescript filename="src/index.ts" showLineNumbers{52} copy
205
+ const agent = mastra.getAgent('researchAgent');
206
+
207
+ // Basic query about concepts
208
+ const query1 = "What problems does sequence modeling face with neural networks?";
209
+ const response1 = await agent.generate(query1);
210
+ console.log("\nQuery:", query1);
211
+ console.log("Response:", response1.text);
212
+ ```
213
+
214
+ You should see output like:
215
+ ```
216
+ Query: What problems does sequence modeling face with neural networks?
217
+ Response: Sequence modeling with neural networks faces several key challenges:
218
+ 1. Vanishing and exploding gradients during training, especially with long sequences
219
+ 2. Difficulty handling long-term dependencies in the input
220
+ 3. Limited computational efficiency due to sequential processing
221
+ 4. Challenges in parallelizing computations, resulting in longer training times
222
+ ```
223
+
224
+ Let's try another question:
225
+ ```typescript filename="src/index.ts" showLineNumbers{60} copy
226
+ // Query about specific findings
227
+ const query2 = "What improvements were achieved in translation quality?";
228
+ const response2 = await agent.generate(query2);
229
+ console.log("\nQuery:", query2);
230
+ console.log("Response:", response2.text);
231
+ ```
232
+
233
+ Output:
234
+ ```
235
+ Query: What improvements were achieved in translation quality?
236
+ Response: The model showed significant improvements in translation quality, achieving more than 2.0
237
+ BLEU points improvement over previously reported models on the WMT 2014 English-to-German translation
238
+ task, while also reducing training costs.
239
+ ```
240
+
241
+ ### Serve the Application
242
+
243
+ Start the Mastra server to expose your research assistant via API:
244
+
245
+ ```bash
246
+ mastra dev --dir src
247
+ ```
248
+
249
+ Your research assistant will be available at:
250
+ ```
251
+ http://localhost:4111/api/agents/researchAgent/generate
252
+ ```
253
+
254
+ Test with curl:
255
+
256
+ ```bash
257
+ curl -X POST http://localhost:4111/api/agents/researchAgent/generate \
258
+ -H "Content-Type: application/json" \
259
+ -d '{
260
+ "messages": [
261
+ { "role": "user", "content": "What were the main findings about model parallelization?" }
262
+ ]
263
+ }'
264
+ ```
265
+ </Steps>
266
+
267
+ ## Advanced RAG Examples
268
+
269
+ Explore these examples for more advanced RAG techniques:
270
+ - [Filter RAG](/examples/rag/usage/filter-rag) for filtering results using metadata
271
+ - [Cleanup RAG](/examples/rag/usage/cleanup-rag) for optimizing information density
272
+ - [Chain of Thought RAG](/examples/rag/usage/cot-rag) for complex reasoning queries using workflows
273
+ - [Rerank RAG](/examples/rag/usage/rerank-rag) for improved result relevance
@@ -74,7 +74,7 @@ The Tools playground allows you to test your custom tools in isolation:
74
74
 
75
75
  ## REST API Endpoints
76
76
 
77
- `mastra dev` also spins up REST API routes for your agents and workflows via the local [Mastra Server](/docs/deployment/server). This allows you to test your API endpoints before deployment. See [Mastra Dev reference](http://localhost:3000/docs/reference/cli/dev#routes) for more details about all endpoints.
77
+ `mastra dev` also spins up REST API routes for your agents and workflows via the local [Mastra Server](/docs/deployment/server). This allows you to test your API endpoints before deployment. See [Mastra Dev reference](/docs/reference/cli/dev#routes) for more details about all endpoints.
78
78
 
79
79
  You can then leverage the [Mastra Client](/docs/deployment/client) SDK to interact with your served REST API routes seamlessly.
80
80
 
@@ -105,4 +105,4 @@ This architecture allows you to start developing immediately without setting up
105
105
 
106
106
  `mastra dev` makes it easy to develop, debug, and iterate on your AI logic in a self-contained environment before deploying to production.
107
107
 
108
- - [Mastra Dev reference](../reference/cli/dev.mdx)
108
+ - [Mastra Dev reference](../reference/cli/dev.mdx)
@@ -0,0 +1,38 @@
1
+ ---
2
+ title: "Logging | Mastra Observability Documentation"
3
+ description: Documentation on effective logging in Mastra, crucial for understanding application behavior and improving AI accuracy.
4
+ ---
5
+
6
+ import Image from "next/image";
7
+
8
+ # Logging
9
+
10
+ In Mastra, logs can detail when certain functions run, what input data they receive, and how they respond.
11
+
12
+ ## Basic Setup
13
+
14
+ Here's a minimal example that sets up a **console logger** at the `INFO` level. This will print out informational messages and above (i.e., `DEBUG`, `INFO`, `WARN`, `ERROR`) to the console.
15
+
16
+ ```typescript filename="mastra.config.ts" showLineNumbers copy
17
+ import { Mastra } from "@mastra/core";
18
+ import { createLogger } from "@mastra/core/logger";
19
+
20
+ export const mastra = new Mastra({
21
+ // Other Mastra configuration...
22
+ logger: createLogger({
23
+ name: "Mastra",
24
+ level: "info",
25
+ }),
26
+ });
27
+ ```
28
+
29
+ In this configuration:
30
+
31
+ - `name: "Mastra"` specifies the name to group logs under.
32
+ - `level: "info"` sets the minimum severity of logs to record.
33
+
34
+ ## Configuration
35
+
36
+ - For more details on the options you can pass to `createLogger()`, see the [createLogger reference documentation](/docs/reference/observability/create-logger.mdx).
37
+ - Once you have a `Logger` instance, you can call its methods (e.g., `.info()`, `.warn()`, `.error()`) in the [Logger instance reference documentation](/docs/reference/observability/logger.mdx).
38
+ - If you want to send your logs to an external service for centralized collection, analysis, or storage, you can configure other logger types such as Upstash Redis. Consult the [createLogger reference documentation](/docs/reference/observability/create-logger.mdx) for details on parameters like `url`, `token`, and `key` when using the `UPSTASH` logger type.
@@ -0,0 +1,102 @@
1
+ ---
2
+ title: "Next.js Tracing | Mastra Observability Documentation"
3
+ description: "Set up OpenTelemetry tracing for Next.js applications"
4
+ ---
5
+
6
+ # Next.js Tracing
7
+
8
+ If you're using Next.js, you have two options for setting up OpenTelemetry instrumentation:
9
+
10
+ ### Option 1: Using Vercel's OTEL Setup
11
+
12
+ If you're deploying to Vercel, you can use their built-in OpenTelemetry setup:
13
+
14
+ 1. Install the required dependencies:
15
+
16
+ ```bash copy
17
+ npm install @opentelemetry/api @vercel/otel
18
+ ```
19
+
20
+ 2. Create an instrumentation file at the root of your project (or in the src folder if using one):
21
+
22
+ ```ts filename="instrumentation.ts" copy
23
+ import { registerOTel } from '@vercel/otel'
24
+
25
+ export function register() {
26
+ registerOTel({ serviceName: 'your-project-name' })
27
+ }
28
+ ```
29
+
30
+ ### Option 2: Using Custom Exporters
31
+
32
+ If you're using other observability tools (like Langfuse), you can configure a custom exporter:
33
+
34
+ 1. Install the required dependencies (example using Langfuse):
35
+
36
+ ```bash copy
37
+ npm install @opentelemetry/api langfuse-vercel
38
+ ```
39
+
40
+ 2. Create an instrumentation file:
41
+
42
+ ```ts filename="instrumentation.ts" copy
43
+ import {
44
+ NodeSDK,
45
+ ATTR_SERVICE_NAME,
46
+ Resource,
47
+ } from '@mastra/core/telemetry/otel-vendor';
48
+ import { LangfuseExporter } from 'langfuse-vercel';
49
+
50
+ export function register() {
51
+ const exporter = new LangfuseExporter({
52
+ // ... Langfuse config
53
+ })
54
+
55
+ const sdk = new NodeSDK({
56
+ resource: new Resource({
57
+ [ATTR_SERVICE_NAME]: 'ai',
58
+ }),
59
+ traceExporter: exporter,
60
+ });
61
+
62
+ sdk.start();
63
+ }
64
+ ```
65
+
66
+ ### Next.js Configuration
67
+
68
+ For either option, enable the instrumentation hook in your Next.js config:
69
+
70
+ ```ts filename="next.config.ts" showLineNumbers copy
71
+ import type { NextConfig } from "next";
72
+
73
+ const nextConfig: NextConfig = {
74
+ experimental: {
75
+ instrumentationHook: true // Not required in Next.js 15+
76
+ }
77
+ };
78
+
79
+ export default nextConfig;
80
+ ```
81
+
82
+ ### Mastra Configuration
83
+
84
+ Configure your Mastra instance:
85
+
86
+ ```typescript filename="mastra.config.ts" copy
87
+ import { Mastra } from "@mastra/core";
88
+
89
+ export const mastra = new Mastra({
90
+ // ... other config
91
+ telemetry: {
92
+ serviceName: "your-project-name",
93
+ enabled: true
94
+ }
95
+ });
96
+ ```
97
+
98
+ This setup will enable OpenTelemetry tracing for your Next.js application and Mastra operations.
99
+
100
+ For more details, see the documentation for:
101
+ - [Next.js Instrumentation](https://nextjs.org/docs/app/building-your-application/optimizing/instrumentation)
102
+ - [Vercel OpenTelemetry](https://vercel.com/docs/observability/otel-overview/quickstart)
@@ -0,0 +1,110 @@
1
+ ---
2
+ title: "Tracing | Mastra Observability Documentation"
3
+ description: "Set up OpenTelemetry tracing for Mastra applications"
4
+ ---
5
+
6
+ import Image from "next/image";
7
+
8
+ # Tracing
9
+
10
+ Mastra supports the OpenTelemetry Protocol (OTLP) for tracing and monitoring your application. When telemetry is enabled, Mastra automatically traces all core primitives including agent operations, LLM interactions, tool executions, integration calls, workflow runs, and database operations. Your telemetry data can then be exported to any OTEL collector.
11
+
12
+ ### Basic Configuration
13
+
14
+ Here's a simple example of enabling telemetry:
15
+
16
+ ```ts filename="mastra.config.ts" showLineNumbers copy
17
+ export const mastra = new Mastra({
18
+ // ... other config
19
+ telemetry: {
20
+ serviceName: "my-app",
21
+ enabled: true,
22
+ sampling: {
23
+ type: "always_on",
24
+ },
25
+ export: {
26
+ type: "otlp",
27
+ endpoint: "http://localhost:4318", // SigNoz local endpoint
28
+ },
29
+ },
30
+ });
31
+ ```
32
+
33
+ ### Configuration Options
34
+
35
+ The telemetry config accepts these properties:
36
+
37
+ ```ts
38
+ type OtelConfig = {
39
+ // Name to identify your service in traces (optional)
40
+ serviceName?: string;
41
+
42
+ // Enable/disable telemetry (defaults to true)
43
+ enabled?: boolean;
44
+
45
+ // Control how many traces are sampled
46
+ sampling?: {
47
+ type: "ratio" | "always_on" | "always_off" | "parent_based";
48
+ probability?: number; // For ratio sampling
49
+ root?: {
50
+ probability: number; // For parent_based sampling
51
+ };
52
+ };
53
+
54
+ // Where to send telemetry data
55
+ export?: {
56
+ type: "otlp" | "console";
57
+ endpoint?: string;
58
+ headers?: Record<string, string>;
59
+ };
60
+ };
61
+ ```
62
+
63
+ See the [OtelConfig reference documentation](/docs/reference/observability/otel-config.mdx) for more details.
64
+
65
+ ### Environment Variables
66
+
67
+ You can configure the OTLP endpoint and headers through environment variables:
68
+
69
+ ```env filename=".env" copy
70
+ OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4318
71
+ OTEL_EXPORTER_OTLP_HEADERS=x-api-key=your-api-key
72
+ ```
73
+
74
+ Then in your config:
75
+
76
+ ```ts filename="mastra.config.ts" showLineNumbers copy
77
+ export const mastra = new Mastra({
78
+ // ... other config
79
+ telemetry: {
80
+ serviceName: "my-app",
81
+ enabled: true,
82
+ export: {
83
+ type: "otlp",
84
+ // endpoint and headers will be picked up from env vars
85
+ },
86
+ },
87
+ });
88
+ ```
89
+
90
+ ### Example: SigNoz Integration
91
+
92
+ Here's what a traced agent interaction looks like in [SigNoz](https://signoz.io):
93
+
94
+ <img
95
+ src="/docs/signoz-telemetry-demo.png"
96
+ alt="Agent interaction trace showing spans, LLM calls, and tool executions"
97
+ style={{ maxWidth: "800px", width: "100%", margin: "8px 0" }}
98
+ className="nextra-image rounded-md"
99
+ data-zoom
100
+ width={800}
101
+ height={400}
102
+ />
103
+
104
+ ### Other Supported Providers
105
+
106
+ For a complete list of supported observability providers and their configuration details, see the [Observability Providers reference](../reference/observability/providers/).
107
+
108
+ ### Framework-Specific Setup
109
+
110
+ For Next.js applications, see our [Next.js Tracing](/docs/observability/nextjs-tracing) guide.
@@ -35,9 +35,9 @@ const chunks = await doc.chunk({
35
35
  overlap: 50,
36
36
  });
37
37
 
38
- // 3. Generate embeddings
38
+ // 3. Generate embeddings; we need to pass the text of each chunk
39
39
  const { embeddings } = await embedMany({
40
- values: chunks,
40
+ values: chunks.map(chunk => chunk.text),
41
41
  model: openai.embedding("text-embedding-3-small"),
42
42
  });
43
43
 
@@ -81,5 +81,5 @@ See the [OTel Configuration](../reference/observability/otel-config.mdx) page fo
81
81
 
82
82
  ## More resources
83
83
 
84
- - [Chain of Thought RAG Example](../../examples/rag/cot-rag.mdx)
84
+ - [Chain of Thought RAG Example](../../examples/rag/usage/cot-rag.mdx)
85
85
  - [All RAG Examples](../../examples/) (including different chunking strategies, embedding models, and vector stores)
@@ -42,6 +42,9 @@ const results = await pgVector.query({
42
42
  queryVector: embedding,
43
43
  topK: 10,
44
44
  });
45
+
46
+ // Display results
47
+ console.log(results);
45
48
  ```
46
49
 
47
50
  Results include both the text content and a similarity score:
@@ -62,7 +65,7 @@ Results include both the text content and a similarity score:
62
65
  ]
63
66
  ```
64
67
 
65
- For an example of how to use the basic retrieval method, see the [Retrieve Results](../../examples/rag/retrieve-results.mdx) example.
68
+ For an example of how to use the basic retrieval method, see the [Retrieve Results](../../examples/rag/query/retrieve-results.mdx) example.
66
69
 
67
70
  ## Advanced Retrieval options
68
71
 
@@ -143,7 +146,7 @@ Common use cases for metadata filtering:
143
146
  - Combine multiple conditions for precise querying
144
147
  - Filter by document attributes (e.g., language, author)
145
148
 
146
- For an example of how to use metadata filtering, see the [Hybrid Vector Search](../../examples/rag/hybrid-vector-search.mdx) example.
149
+ For an example of how to use metadata filtering, see the [Hybrid Vector Search](../../examples/rag/query/hybrid-vector-search.mdx) example.
147
150
 
148
151
  ### Vector Query Tool
149
152
 
@@ -334,7 +337,7 @@ The re-ranked results combine vector similarity with semantic understanding to i
334
337
 
335
338
  For more details about re-ranking, see the [rerank()](/docs/reference/rag/rerank) method.
336
339
 
337
- For an example of how to use the re-ranking method, see the [Re-ranking Results](../../examples/rag/rerank.mdx) example.
340
+ For an example of how to use the re-ranking method, see the [Re-ranking Results](../../examples/rag/rerank/rerank.mdx) example.
338
341
 
339
342
  ### Graph-based Retrieval
340
343
 
@@ -359,4 +362,4 @@ const graphQueryTool = createGraphQueryTool({
359
362
 
360
363
  For more details about graph-based retrieval, see the [GraphRAG](/docs/reference/rag/graph-rag) class and the [createGraphQueryTool()](/docs/reference/tools/graph-rag-tool) function.
361
364
 
362
- For an example of how to use the graph-based retrieval method, see the [Graph-based Retrieval](../../examples/rag/graph-rag.mdx) example.
365
+ For an example of how to use the graph-based retrieval method, see the [Graph-based Retrieval](../../examples/rag/usage/graph-rag.mdx) example.