@mastra/mcp-docs-server 0.13.16 → 0.13.17-alpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +8 -8
  2. package/.docs/organized/changelogs/%40internal%2Ftypes-builder.md +2 -0
  3. package/.docs/organized/changelogs/%40mastra%2Fastra.md +10 -10
  4. package/.docs/organized/changelogs/%40mastra%2Fauth.md +6 -0
  5. package/.docs/organized/changelogs/%40mastra%2Fchroma.md +10 -10
  6. package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +10 -10
  7. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +19 -19
  8. package/.docs/organized/changelogs/%40mastra%2Fcloud.md +10 -10
  9. package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +10 -10
  10. package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +10 -10
  11. package/.docs/organized/changelogs/%40mastra%2Fcore.md +22 -22
  12. package/.docs/organized/changelogs/%40mastra%2Fcouchbase.md +11 -11
  13. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +19 -0
  14. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +19 -19
  15. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +19 -19
  16. package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +20 -20
  17. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +23 -23
  18. package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +11 -11
  19. package/.docs/organized/changelogs/%40mastra%2Fevals.md +10 -10
  20. package/.docs/organized/changelogs/%40mastra%2Ffastembed.md +6 -0
  21. package/.docs/organized/changelogs/%40mastra%2Ffirecrawl.md +11 -11
  22. package/.docs/organized/changelogs/%40mastra%2Fgithub.md +10 -10
  23. package/.docs/organized/changelogs/%40mastra%2Flance.md +10 -10
  24. package/.docs/organized/changelogs/%40mastra%2Flibsql.md +10 -10
  25. package/.docs/organized/changelogs/%40mastra%2Floggers.md +10 -10
  26. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +19 -19
  27. package/.docs/organized/changelogs/%40mastra%2Fmcp-registry-registry.md +11 -11
  28. package/.docs/organized/changelogs/%40mastra%2Fmcp.md +19 -19
  29. package/.docs/organized/changelogs/%40mastra%2Fmem0.md +10 -10
  30. package/.docs/organized/changelogs/%40mastra%2Fmemory.md +11 -11
  31. package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +10 -10
  32. package/.docs/organized/changelogs/%40mastra%2Fmssql.md +9 -0
  33. package/.docs/organized/changelogs/%40mastra%2Fopensearch.md +10 -10
  34. package/.docs/organized/changelogs/%40mastra%2Fpg.md +10 -10
  35. package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +10 -10
  36. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +25 -25
  37. package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +11 -11
  38. package/.docs/organized/changelogs/%40mastra%2Frag.md +10 -10
  39. package/.docs/organized/changelogs/%40mastra%2Fragie.md +10 -10
  40. package/.docs/organized/changelogs/%40mastra%2Fschema-compat.md +6 -0
  41. package/.docs/organized/changelogs/%40mastra%2Fserver.md +19 -19
  42. package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +10 -10
  43. package/.docs/organized/changelogs/%40mastra%2Fupstash.md +10 -10
  44. package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +10 -10
  45. package/.docs/organized/changelogs/%40mastra%2Fvoice-azure.md +10 -10
  46. package/.docs/organized/changelogs/%40mastra%2Fvoice-cloudflare.md +10 -10
  47. package/.docs/organized/changelogs/%40mastra%2Fvoice-deepgram.md +10 -10
  48. package/.docs/organized/changelogs/%40mastra%2Fvoice-elevenlabs.md +10 -10
  49. package/.docs/organized/changelogs/%40mastra%2Fvoice-gladia.md +9 -0
  50. package/.docs/organized/changelogs/%40mastra%2Fvoice-google-gemini-live.md +9 -0
  51. package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +10 -10
  52. package/.docs/organized/changelogs/%40mastra%2Fvoice-murf.md +10 -10
  53. package/.docs/organized/changelogs/%40mastra%2Fvoice-openai-realtime.md +11 -11
  54. package/.docs/organized/changelogs/%40mastra%2Fvoice-openai.md +11 -11
  55. package/.docs/organized/changelogs/%40mastra%2Fvoice-playai.md +10 -10
  56. package/.docs/organized/changelogs/%40mastra%2Fvoice-sarvam.md +10 -10
  57. package/.docs/organized/changelogs/%40mastra%2Fvoice-speechify.md +10 -10
  58. package/.docs/organized/changelogs/create-mastra.md +21 -21
  59. package/.docs/organized/changelogs/mastra.md +32 -32
  60. package/.docs/raw/agents/overview.mdx +29 -47
  61. package/.docs/raw/deployment/monorepo.mdx +107 -0
  62. package/.docs/raw/frameworks/web-frameworks/astro.mdx +1 -0
  63. package/.docs/raw/frameworks/web-frameworks/next-js.mdx +1 -0
  64. package/.docs/raw/frameworks/web-frameworks/sveltekit.mdx +5 -0
  65. package/.docs/raw/frameworks/web-frameworks/vite-react.mdx +5 -0
  66. package/.docs/raw/reference/core/mastra-class.mdx +5 -3
  67. package/.docs/raw/reference/scorers/context-precision.mdx +130 -0
  68. package/.docs/raw/reference/scorers/context-relevance.mdx +222 -0
  69. package/.docs/raw/scorers/off-the-shelf-scorers.mdx +17 -1
  70. package/.docs/raw/server-db/local-dev-playground.mdx +19 -0
  71. package/.docs/raw/workflows/control-flow.mdx +0 -46
  72. package/.docs/raw/workflows/error-handling.mdx +213 -0
  73. package/package.json +5 -5
@@ -161,64 +161,46 @@ Define the `output` shape using [Zod](https://zod.dev/):
161
161
  ```typescript showLineNumbers copy
162
162
  import { z } from "zod";
163
163
 
164
- const response = await testAgent.generate("Monkey, Ice Cream, Boat", {
165
- experimental_output: z.object({
166
- summary: z.string(),
167
- keywords: z.array(z.string())
168
- })
169
- });
170
-
171
- console.log(response.object);
172
- ```
173
-
174
- ### Using Tools
175
-
176
- If you need to generate structured output alongside tool calls, you'll need to use the `experimental_output` or `structuredOutput` property instead of `output`. Here's how:
177
-
178
- ```typescript showLineNumbers copy
179
- const response = await testAgent.generate("Monkey, Ice Cream, Boat", {
180
- experimental_output: z.object({
181
- summary: z.string(),
182
- keywords: z.array(z.string())
183
- })
184
- });
185
-
186
- const responseWithExperimentalOutput = await testAgent.generate(
164
+ const response = await testAgent.generate(
187
165
  [
188
166
  {
189
- role: "user",
190
- content:
191
- "Please analyze this repository and provide a summary and keywords...",
167
+ role: "system",
168
+ content: "Provide a summary and keywords for the following text:"
192
169
  },
170
+ {
171
+ role: "user",
172
+ content: "Monkey, Ice Cream, Boat"
173
+ }
193
174
  ],
194
175
  {
195
- // Use experimental_output to enable both structured output and tool calls
196
- experimental_output: schema,
197
- },
176
+ output: z.object({
177
+ summary: z.string(),
178
+ keywords: z.array(z.string())
179
+ })
180
+ }
198
181
  );
199
182
 
200
- console.log("Structured Output:", responseWithExperimentalOutput.object);
183
+ console.log(response.object);
184
+ ```
201
185
 
202
- const responseWithStructuredOutput = await testAgent.generate(
203
- [
204
- {
205
- role: "user",
206
- content:
207
- "Please analyze this repository and provide a summary and keywords...",
208
- },
209
- ],
186
+ #### Agents with tools
187
+
188
+ To generate structured output with agents that use tools, use the `experimental_output` property:
189
+
190
+ ```typescript {6} showLineNumbers copy
191
+ import { z } from "zod";
192
+
193
+ const response = await testAgent.generate(
194
+ // ...
210
195
  {
211
- structuredOutput: {
212
- schema: z.object({
213
- summary: z.string(),
214
- keywords: z.array(z.string())
215
- }),
216
- model: openai("gpt-4o-mini"),
217
- }
218
- },
196
+ experimental_output: z.object({
197
+ summary: z.string(),
198
+ keywords: z.array(z.string())
199
+ })
200
+ }
219
201
  );
220
202
 
221
- console.log("Structured Output:", responseWithStructuredOutput.object);
203
+ console.log(response.object);
222
204
  ```
223
205
 
224
206
  ## Describing images
@@ -0,0 +1,107 @@
1
+ ---
2
+ title: Monorepo Deployment
3
+ description: Learn how to deploy Mastra applications that are part of a monorepo setup
4
+ ---
5
+
6
+ import { FileTree } from "nextra/components";
7
+
8
+ # Monorepo Deployment
9
+
10
+ Deploying Mastra in a monorepo follows the same approach as deploying a standalone application. While some [Cloud](./cloud-providers/) or [Serverless Platform](./serverless-platforms/) providers may introduce extra requirements, the core setup is the same.
11
+
12
+ ## Example monorepo
13
+
14
+ In this example, the Mastra application is located at `apps/api`.
15
+
16
+ <FileTree>
17
+ <FileTree.Folder name="apps" defaultOpen>
18
+ <FileTree.Folder name="api" defaultOpen>
19
+ <FileTree.Folder name="src" defaultOpen>
20
+ <FileTree.Folder name="mastra" defaultOpen>
21
+ <FileTree.Folder name="agents" />
22
+ <FileTree.Folder name="tools" />
23
+ <FileTree.Folder name="workflows" />
24
+ <FileTree.File name="index.ts" />
25
+ </FileTree.Folder>
26
+ </FileTree.Folder>
27
+ <FileTree.File name="package.json" />
28
+ <FileTree.File name="tsconfig.json" />
29
+ </FileTree.Folder>
30
+ <FileTree.Folder name="web" />
31
+ </FileTree.Folder>
32
+ <FileTree.Folder name="packages" defaultOpen>
33
+ <FileTree.Folder name="ui" />
34
+ <FileTree.Folder name="utils" />
35
+ </FileTree.Folder>
36
+ <FileTree.File name="package.json" />
37
+ </FileTree>
38
+
39
+ ## Environment variables
40
+
41
+ Environment variables like `OPENAI_API_KEY` should be stored in an `.env` file at the root of the Mastra application `(apps/api)`, for example:
42
+
43
+ <FileTree>
44
+ <FileTree.Folder name="api" defaultOpen>
45
+ <FileTree.Folder name="src" defaultOpen>
46
+ <FileTree.Folder name="mastra" />
47
+ </FileTree.Folder>
48
+ <FileTree.File name=".env" />
49
+ <FileTree.File name="package.json" />
50
+ <FileTree.File name="tsconfig.json" />
51
+ </FileTree.Folder>
52
+ </FileTree>
53
+
54
+
55
+ ## Deployment configuration
56
+
57
+ The image below shows how to select `apps/api` as the project root when deploying to [Mastra Cloud](../mastra-cloud/overview.mdx). While the interface may differ between providers, the configuration remains the same.
58
+
59
+ ![Deployment configuration](/image/monorepo/monorepo-mastra-cloud.jpg)
60
+
61
+ ## Dependency management
62
+
63
+ In a monorepo, keep dependencies consistent to avoid version conflicts and build errors.
64
+
65
+ - Use a **single lockfile** at the project root so all packages resolve the same versions.
66
+ - Align versions of **shared libraries** (like Mastra or frameworks) to prevent duplicates.
67
+
68
+ ## Deployment pitfalls
69
+
70
+ Common issues to watch for when deploying Mastra in a monorepo:
71
+
72
+ - **Wrong project root**: make sure the correct package (e.g. `apps/api`) is selected as the deploy target.
73
+
74
+ ## Bundler options
75
+
76
+ Use `transpilePackages` to compile TypeScript workspace packages or libraries. List package names exactly as they appear in each `package.json`. Use `externals` to exclude dependencies resolved at runtime, and `sourcemap` to emit readable stack traces.
77
+
78
+ ```typescript filename="src/mastra/index.ts" showLineNumbers copy
79
+ import { Mastra } from "@mastra/core/mastra";
80
+
81
+ export const mastra = new Mastra({
82
+ // ...
83
+ bundler: {
84
+ transpilePackages: ["utils"],
85
+ externals: ["ui"],
86
+ sourcemap: true
87
+ }
88
+ });
89
+ ```
90
+
91
+ > See [Mastra Class](../../reference/core/mastra-class.mdx) for more configuration options.
92
+
93
+ ## Supported monorepos
94
+
95
+ Mastra works with:
96
+
97
+ - npm workspaces
98
+ - pnpm workspaces
99
+ - Yarn workspaces
100
+ - Turborepo
101
+
102
+ Known limitations:
103
+
104
+ - Bun workspaces — partial support; known issues
105
+ - Nx — known issues
106
+
107
+ > If you are experiencing issues with monorepos see our: [Monorepos Support mega issue](https://github.com/mastra-ai/mastra/issues/6852).
@@ -516,3 +516,4 @@ Let me know if you need more information!
516
516
  ## Next Steps
517
517
 
518
518
  - [Deployment | With Astro on Vercel](/docs/deployment/web-framework#with-astro-on-vercel)
519
+ - [Monorepo Deployment](../../deployment/monorepo.mdx)
@@ -530,3 +530,4 @@ Let me know if you need more information!
530
530
  ## Next Steps
531
531
 
532
532
  - [Deployment | With Next.js on Vercel](/docs/deployment/web-framework#with-nextjs-on-vercel)
533
+ - [Monorepo Deployment](../../deployment/monorepo.mdx)
@@ -454,3 +454,8 @@ If you need more details or information about a different location, feel free to
454
454
  </Steps>
455
455
  </Tabs.Tab>
456
456
  </Tabs>
457
+
458
+
459
+ ## Next steps
460
+
461
+ - [Monorepo Deployment](../../deployment/monorepo.mdx)
@@ -235,3 +235,8 @@ Submitting **London** as the city would return a result similar to:
235
235
  ```plaintext
236
236
  The current weather in London is partly cloudy with a temperature of 19.3°C, feeling like 17.4°C. The humidity is at 53%, and there is a wind speed of 15.9 km/h, with gusts up to 38.5 km/h.
237
237
  ```
238
+
239
+
240
+ ## Next steps
241
+
242
+ - [Monorepo Deployment](../../deployment/monorepo.mdx)
@@ -16,7 +16,7 @@ Think of `Mastra` as a top-level registry:
16
16
  ## Importing
17
17
 
18
18
  ```typescript
19
- import { Mastra } from "@mastra/core";
19
+ import { Mastra } from "@mastra/core/mastra";
20
20
  ```
21
21
 
22
22
  ## Constructor
@@ -32,7 +32,7 @@ constructor(config?: Config);
32
32
  The Mastra class is typically initialized in your `src/mastra/index.ts` file:
33
33
 
34
34
  ```typescript filename="src/mastra/index.ts" showLineNumbers copy
35
- import { Mastra } from "@mastra/core";
35
+ import { Mastra } from "@mastra/core/mastra";
36
36
  import { LibSQLStore } from "@mastra/libsql";
37
37
  import { weatherAgent } from "./agents/weather-agent";
38
38
 
@@ -138,7 +138,9 @@ The constructor accepts an optional `Config` object to customize its behavior an
138
138
  {
139
139
  name: "bundler",
140
140
  type: "BundlerConfig",
141
- description: "Configuration for the asset bundler.",
141
+ description: "Configuration for the asset bundler with options for externals, sourcemap, and transpilePackages.",
142
+ isOptional: true,
143
+ defaultValue: "{ externals: [], sourcemap: false, transpilePackages: [] }",
142
144
  },
143
145
  ]}
144
146
  />
@@ -0,0 +1,130 @@
1
+ ---
2
+ title: "Reference: Context Precision Scorer | Scorers | Mastra Docs"
3
+ description: Documentation for the Context Precision Scorer in Mastra. Evaluates the relevance and precision of retrieved context for generating expected outputs using Mean Average Precision.
4
+ ---
5
+
6
+ import { PropertiesTable } from "@/components/properties-table";
7
+
8
+ # Context Precision Scorer
9
+
10
+ The `createContextPrecisionScorer()` function creates a scorer that evaluates how relevant and well-positioned retrieved context pieces are for generating expected outputs. It uses **Mean Average Precision (MAP)** to reward systems that place relevant context earlier in the sequence.
11
+
12
+ ## Parameters
13
+
14
+ <PropertiesTable
15
+ content={[
16
+ {
17
+ name: "model",
18
+ type: "MastraLanguageModel",
19
+ description: "The language model to use for evaluating context relevance",
20
+ required: true,
21
+ },
22
+ {
23
+ name: "options",
24
+ type: "ContextPrecisionMetricOptions",
25
+ description: "Configuration options for the scorer",
26
+ required: true,
27
+ children: [
28
+ {
29
+ name: "context",
30
+ type: "string[]",
31
+ description: "Array of context pieces to evaluate for relevance",
32
+ required: false,
33
+ },
34
+ {
35
+ name: "contextExtractor",
36
+ type: "(input, output) => string[]",
37
+ description: "Function to dynamically extract context from the run input and output",
38
+ required: false,
39
+ },
40
+ {
41
+ name: "scale",
42
+ type: "number",
43
+ description: "Scale factor to multiply the final score (default: 1)",
44
+ required: false,
45
+ },
46
+ ],
47
+ },
48
+ ]}
49
+ />
50
+
51
+ :::note
52
+ Either `context` or `contextExtractor` must be provided. If both are provided, `contextExtractor` takes precedence.
53
+ :::
54
+
55
+ ## .run() Returns
56
+
57
+ <PropertiesTable
58
+ content={[
59
+ {
60
+ name: "score",
61
+ type: "number",
62
+ description: "Mean Average Precision score between 0 and scale (default 0-1)",
63
+ },
64
+ {
65
+ name: "reason",
66
+ type: "string",
67
+ description: "Human-readable explanation of the context precision evaluation",
68
+ },
69
+ ]}
70
+ />
71
+
72
+ ## Scoring Details
73
+
74
+ ### Mean Average Precision (MAP)
75
+
76
+ Context Precision uses **Mean Average Precision** to evaluate both relevance and positioning:
77
+
78
+ 1. **Context Evaluation**: Each context piece is classified as relevant or irrelevant for generating the expected output
79
+ 2. **Precision Calculation**: For each relevant context at position `i`, precision = `relevant_items_so_far / (i + 1)`
80
+ 3. **Average Precision**: Sum all precision values and divide by total relevant items
81
+ 4. **Final Score**: Multiply by scale factor and round to 2 decimals
82
+
83
+ ### Scoring Formula
84
+
85
+ ```
86
+ MAP = (Σ Precision@k) / R
87
+
88
+ Where:
89
+ - Precision@k = (relevant items in positions 1...k) / k
90
+ - R = total number of relevant items
91
+ - Only calculated at positions where relevant items appear
92
+ ```
93
+
94
+ ### Score Interpretation
95
+
96
+ - **1.0** = Perfect precision (all relevant context appears first)
97
+ - **0.5-0.9** = Good precision with some relevant context well-positioned
98
+ - **0.1-0.4** = Poor precision with relevant context buried or scattered
99
+ - **0.0** = No relevant context found
100
+
101
+ ### Example Calculation
102
+
103
+ Given context: `[relevant, irrelevant, relevant, irrelevant]`
104
+
105
+ - Position 0: Relevant → Precision = 1/1 = 1.0
106
+ - Position 1: Skip (irrelevant)
107
+ - Position 2: Relevant → Precision = 2/3 = 0.67
108
+ - Position 3: Skip (irrelevant)
109
+
110
+ MAP = (1.0 + 0.67) / 2 = 0.835 ≈ **0.83**
111
+
112
+ ## Usage Patterns
113
+
114
+ ### RAG System Evaluation
115
+ Ideal for evaluating retrieved context in RAG pipelines where:
116
+ - Context ordering matters for model performance
117
+ - You need to measure retrieval quality beyond simple relevance
118
+ - Early relevant context is more valuable than later relevant context
119
+
120
+ ### Context Window Optimization
121
+ Use when optimizing context selection for:
122
+ - Limited context windows
123
+ - Token budget constraints
124
+ - Multi-step reasoning tasks
125
+
126
+ ## Related
127
+
128
+ - [Answer Relevancy Scorer](/reference/scorers/answer-relevancy) - Evaluates if answers address the question
129
+ - [Faithfulness Scorer](/reference/scorers/faithfulness) - Measures answer groundedness in context
130
+ - [Custom Scorers](/docs/scorers/custom-scorers) - Creating your own evaluation metrics
@@ -0,0 +1,222 @@
1
+ ---
2
+ title: "Reference: Context Relevance Scorer | Scorers | Mastra Docs"
3
+ description: Documentation for the Context Relevance Scorer in Mastra. Evaluates the relevance and utility of provided context for generating agent responses using weighted relevance scoring.
4
+ ---
5
+
6
+ import { PropertiesTable } from "@/components/properties-table";
7
+
8
+ # Context Relevance Scorer
9
+
10
+ The `createContextRelevanceScorerLLM()` function creates a scorer that evaluates how relevant and useful provided context was for generating agent responses. It uses weighted relevance levels and applies penalties for unused high-relevance context and missing information.
11
+
12
+ ## Parameters
13
+
14
+ <PropertiesTable
15
+ content={[
16
+ {
17
+ name: "model",
18
+ type: "MastraLanguageModel",
19
+ description: "The language model to use for evaluating context relevance",
20
+ required: true,
21
+ },
22
+ {
23
+ name: "options",
24
+ type: "ContextRelevanceOptions",
25
+ description: "Configuration options for the scorer",
26
+ required: true,
27
+ children: [
28
+ {
29
+ name: "context",
30
+ type: "string[]",
31
+ description: "Array of context pieces to evaluate for relevance",
32
+ required: false,
33
+ },
34
+ {
35
+ name: "contextExtractor",
36
+ type: "(input, output) => string[]",
37
+ description: "Function to dynamically extract context from the run input and output",
38
+ required: false,
39
+ },
40
+ {
41
+ name: "scale",
42
+ type: "number",
43
+ description: "Scale factor to multiply the final score (default: 1)",
44
+ required: false,
45
+ },
46
+ {
47
+ name: "penalties",
48
+ type: "object",
49
+ description: "Configurable penalty settings for scoring",
50
+ required: false,
51
+ children: [
52
+ {
53
+ name: "unusedHighRelevanceContext",
54
+ type: "number",
55
+ description: "Penalty per unused high-relevance context (default: 0.1)",
56
+ required: false,
57
+ },
58
+ {
59
+ name: "missingContextPerItem",
60
+ type: "number",
61
+ description: "Penalty per missing context item (default: 0.15)",
62
+ required: false,
63
+ },
64
+ {
65
+ name: "maxMissingContextPenalty",
66
+ type: "number",
67
+ description: "Maximum total missing context penalty (default: 0.5)",
68
+ required: false,
69
+ },
70
+ ],
71
+ },
72
+ ],
73
+ },
74
+ ]}
75
+ />
76
+
77
+ :::note
78
+ Either `context` or `contextExtractor` must be provided. If both are provided, `contextExtractor` takes precedence.
79
+ :::
80
+
81
+ ## .run() Returns
82
+
83
+ <PropertiesTable
84
+ content={[
85
+ {
86
+ name: "score",
87
+ type: "number",
88
+ description: "Weighted relevance score between 0 and scale (default 0-1)",
89
+ },
90
+ {
91
+ name: "reason",
92
+ type: "string",
93
+ description: "Human-readable explanation of the context relevance evaluation",
94
+ },
95
+ ]}
96
+ />
97
+
98
+ ## Scoring Details
99
+
100
+ ### Weighted Relevance Scoring
101
+
102
+ Context Relevance uses a sophisticated scoring algorithm that considers:
103
+
104
+ 1. **Relevance Levels**: Each context piece is classified with weighted values:
105
+ - `high` = 1.0 (directly addresses the query)
106
+ - `medium` = 0.7 (supporting information)
107
+ - `low` = 0.3 (tangentially related)
108
+ - `none` = 0.0 (completely irrelevant)
109
+
110
+ 2. **Usage Detection**: Tracks whether relevant context was actually used in the response
111
+
112
+ 3. **Penalties Applied** (configurable via `penalties` options):
113
+ - **Unused High-Relevance**: `unusedHighRelevanceContext` penalty per unused high-relevance context (default: 0.1)
114
+ - **Missing Context**: Up to `maxMissingContextPenalty` for identified missing information (default: 0.5)
115
+
116
+ ### Scoring Formula
117
+
118
+ ```
119
+ Base Score = Σ(relevance_weights) / (num_contexts × 1.0)
120
+ Usage Penalty = count(unused_high_relevance) × unusedHighRelevanceContext
121
+ Missing Penalty = min(count(missing_context) × missingContextPerItem, maxMissingContextPenalty)
122
+
123
+ Final Score = max(0, Base Score - Usage Penalty - Missing Penalty) × scale
124
+ ```
125
+
126
+ **Default Values**:
127
+ - `unusedHighRelevanceContext` = 0.1 (10% penalty per unused high-relevance context)
128
+ - `missingContextPerItem` = 0.15 (15% penalty per missing context item)
129
+ - `maxMissingContextPenalty` = 0.5 (maximum 50% penalty for missing context)
130
+ - `scale` = 1
131
+
132
+ ### Score Interpretation
133
+
134
+ - **0.9-1.0** = Excellent relevance with minimal gaps
135
+ - **0.7-0.8** = Good relevance with some unused or missing context
136
+ - **0.4-0.6** = Mixed relevance with significant gaps
137
+ - **0.0-0.3** = Poor relevance or mostly irrelevant context
138
+
139
+ ### Difference from Context Precision
140
+
141
+ | Aspect | Context Relevance | Context Precision |
142
+ |--------|-------------------|-------------------|
143
+ | **Algorithm** | Weighted levels with penalties | Mean Average Precision (MAP) |
144
+ | **Relevance** | Multiple levels (high/medium/low/none) | Binary (yes/no) |
145
+ | **Position** | Not considered | Critical (rewards early placement) |
146
+ | **Usage** | Tracks and penalizes unused context | Not considered |
147
+ | **Missing** | Identifies and penalizes gaps | Not evaluated |
148
+
149
+ ## Usage Examples
150
+
151
+ ### Basic Configuration
152
+
153
+ ```typescript
154
+ const scorer = createContextRelevanceScorerLLM({
155
+ model: openai('gpt-4o'),
156
+ options: {
157
+ context: ['Einstein won the Nobel Prize for his work on the photoelectric effect'],
158
+ scale: 1,
159
+ },
160
+ });
161
+ ```
162
+
163
+ ### Custom Penalty Configuration
164
+
165
+ ```typescript
166
+ const scorer = createContextRelevanceScorerLLM({
167
+ model: openai('gpt-4o'),
168
+ options: {
169
+ context: ['Context information...'],
170
+ penalties: {
171
+ unusedHighRelevanceContext: 0.05, // Lower penalty for unused context
172
+ missingContextPerItem: 0.2, // Higher penalty per missing item
173
+ maxMissingContextPenalty: 0.4, // Lower maximum penalty cap
174
+ },
175
+ scale: 2, // Double the final score
176
+ },
177
+ });
178
+ ```
179
+
180
+ ### Dynamic Context Extraction
181
+
182
+ ```typescript
183
+ const scorer = createContextRelevanceScorerLLM({
184
+ model: openai('gpt-4o'),
185
+ options: {
186
+ contextExtractor: (input, output) => {
187
+ // Extract context based on the query
188
+ const userQuery = input?.inputMessages?.[0]?.content || '';
189
+ if (userQuery.includes('Einstein')) {
190
+ return [
191
+ 'Einstein won the Nobel Prize for the photoelectric effect',
192
+ 'He developed the theory of relativity'
193
+ ];
194
+ }
195
+ return ['General physics information'];
196
+ },
197
+ penalties: {
198
+ unusedHighRelevanceContext: 0.15,
199
+ },
200
+ },
201
+ });
202
+ ```
203
+
204
+ ## Usage Patterns
205
+
206
+ ### Content Generation Evaluation
207
+ Best for evaluating context quality in:
208
+ - Chat systems where context usage matters
209
+ - RAG pipelines needing nuanced relevance assessment
210
+ - Systems where missing context affects quality
211
+
212
+ ### Context Selection Optimization
213
+ Use when optimizing for:
214
+ - Comprehensive context coverage
215
+ - Effective context utilization
216
+ - Identifying context gaps
217
+
218
+ ## Related
219
+
220
+ - [Context Precision Scorer](/reference/scorers/context-precision) - Evaluates context ranking using MAP
221
+ - [Faithfulness Scorer](/reference/scorers/faithfulness) - Measures answer groundedness in context
222
+ - [Custom Scorers](/docs/scorers/custom-scorers) - Creating your own evaluation metrics
@@ -21,6 +21,22 @@ These scorers evaluate how correct, truthful, and complete your agent's answers
21
21
  - [`textual-difference`](/reference/scorers/textual-difference): Measures textual differences between strings (`0-1`, higher means more similar)
22
22
  - [`tool-call-accuracy`](/reference/scorers/tool-call-accuracy): Evaluates whether the LLM selects the correct tool from available options (`0-1`, higher is better)
23
23
 
24
+ ### Context Quality
25
+
26
+ These scorers evaluate the quality and relevance of context used in generating responses:
27
+
28
+ - [`context-precision`](/reference/scorers/context-precision): Evaluates context relevance and ranking using Mean Average Precision, rewarding early placement of relevant context (`0-1`, higher is better)
29
+ - [`context-relevance`](/reference/scorers/context-relevance): Measures context utility with nuanced relevance levels, usage tracking, and missing context detection (`0-1`, higher is better)
30
+
31
+ :::tip Context Scorer Selection
32
+ - Use **Context Precision** when context ordering matters and you need standard IR metrics (ideal for RAG ranking evaluation)
33
+ - Use **Context Relevance** when you need detailed relevance assessment and want to track context usage and identify gaps
34
+
35
+ Both context scorers support:
36
+ - **Static context**: Pre-defined context arrays
37
+ - **Dynamic context extraction**: Extract context from runs using custom functions (ideal for RAG systems, vector databases, etc.)
38
+ :::
39
+
24
40
  ### Output Quality
25
41
 
26
42
  These scorers evaluate adherence to format, style, and safety requirements:
@@ -28,4 +44,4 @@ These scorers evaluate adherence to format, style, and safety requirements:
28
44
  - [`tone-consistency`](/reference/scorers/tone-consistency): Measures consistency in formality, complexity, and style (`0-1`, higher is better)
29
45
  - [`toxicity`](/reference/scorers/toxicity): Detects harmful or inappropriate content (`0-1`, lower is better)
30
46
  - [`bias`](/reference/scorers/bias): Detects potential biases in the output (`0-1`, lower is better)
31
- - [`keyword-coverage`](/reference/scorers/keyword-coverage): Assesses technical terminology usage (`0-1`, higher is better)
47
+ - [`keyword-coverage`](/reference/scorers/keyword-coverage): Assesses technical terminology usage (`0-1`, higher is better)
@@ -184,6 +184,25 @@ export const mastra = new Mastra({
184
184
  });
185
185
  ```
186
186
 
187
+ ## Bundler options
188
+
189
+ Use `transpilePackages` to compile TypeScript packages or libraries. Use `externals` to exclude dependencies resolved at runtime, and `sourcemap` to emit readable stack traces.
190
+
191
+ ```typescript filename="src/mastra/index.ts" showLineNumbers copy
192
+ import { Mastra } from "@mastra/core/mastra";
193
+
194
+ export const mastra = new Mastra({
195
+ // ...
196
+ bundler: {
197
+ transpilePackages: ["utils"],
198
+ externals: ["ui"],
199
+ sourcemap: true
200
+ }
201
+ });
202
+ ```
203
+
204
+ > See [Mastra Class](../../reference/core/mastra-class.mdx) for more configuration options.
205
+
187
206
 
188
207
  ## Next steps
189
208