@mastra/mcp-docs-server 0.13.16 → 0.13.17-alpha.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +8 -8
  2. package/.docs/organized/changelogs/%40internal%2Ftypes-builder.md +2 -0
  3. package/.docs/organized/changelogs/%40mastra%2Fastra.md +10 -10
  4. package/.docs/organized/changelogs/%40mastra%2Fauth.md +6 -0
  5. package/.docs/organized/changelogs/%40mastra%2Fchroma.md +10 -10
  6. package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +10 -10
  7. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +26 -26
  8. package/.docs/organized/changelogs/%40mastra%2Fcloud.md +10 -10
  9. package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +10 -10
  10. package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +10 -10
  11. package/.docs/organized/changelogs/%40mastra%2Fcore.md +35 -35
  12. package/.docs/organized/changelogs/%40mastra%2Fcouchbase.md +11 -11
  13. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +27 -0
  14. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +19 -19
  15. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +19 -19
  16. package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +20 -20
  17. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +31 -31
  18. package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +11 -11
  19. package/.docs/organized/changelogs/%40mastra%2Fevals.md +10 -10
  20. package/.docs/organized/changelogs/%40mastra%2Ffastembed.md +6 -0
  21. package/.docs/organized/changelogs/%40mastra%2Ffirecrawl.md +19 -19
  22. package/.docs/organized/changelogs/%40mastra%2Fgithub.md +10 -10
  23. package/.docs/organized/changelogs/%40mastra%2Flance.md +10 -10
  24. package/.docs/organized/changelogs/%40mastra%2Flibsql.md +10 -10
  25. package/.docs/organized/changelogs/%40mastra%2Floggers.md +10 -10
  26. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +26 -26
  27. package/.docs/organized/changelogs/%40mastra%2Fmcp-registry-registry.md +11 -11
  28. package/.docs/organized/changelogs/%40mastra%2Fmcp.md +19 -19
  29. package/.docs/organized/changelogs/%40mastra%2Fmem0.md +10 -10
  30. package/.docs/organized/changelogs/%40mastra%2Fmemory.md +19 -19
  31. package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +10 -10
  32. package/.docs/organized/changelogs/%40mastra%2Fmssql.md +9 -0
  33. package/.docs/organized/changelogs/%40mastra%2Fopensearch.md +10 -10
  34. package/.docs/organized/changelogs/%40mastra%2Fpg.md +10 -10
  35. package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +10 -10
  36. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +25 -25
  37. package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +11 -11
  38. package/.docs/organized/changelogs/%40mastra%2Frag.md +23 -23
  39. package/.docs/organized/changelogs/%40mastra%2Fragie.md +10 -10
  40. package/.docs/organized/changelogs/%40mastra%2Fschema-compat.md +12 -0
  41. package/.docs/organized/changelogs/%40mastra%2Fserver.md +26 -26
  42. package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +10 -10
  43. package/.docs/organized/changelogs/%40mastra%2Fupstash.md +10 -10
  44. package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +10 -10
  45. package/.docs/organized/changelogs/%40mastra%2Fvoice-azure.md +10 -10
  46. package/.docs/organized/changelogs/%40mastra%2Fvoice-cloudflare.md +10 -10
  47. package/.docs/organized/changelogs/%40mastra%2Fvoice-deepgram.md +10 -10
  48. package/.docs/organized/changelogs/%40mastra%2Fvoice-elevenlabs.md +10 -10
  49. package/.docs/organized/changelogs/%40mastra%2Fvoice-gladia.md +9 -0
  50. package/.docs/organized/changelogs/%40mastra%2Fvoice-google-gemini-live.md +9 -0
  51. package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +10 -10
  52. package/.docs/organized/changelogs/%40mastra%2Fvoice-murf.md +10 -10
  53. package/.docs/organized/changelogs/%40mastra%2Fvoice-openai-realtime.md +11 -11
  54. package/.docs/organized/changelogs/%40mastra%2Fvoice-openai.md +11 -11
  55. package/.docs/organized/changelogs/%40mastra%2Fvoice-playai.md +10 -10
  56. package/.docs/organized/changelogs/%40mastra%2Fvoice-sarvam.md +10 -10
  57. package/.docs/organized/changelogs/%40mastra%2Fvoice-speechify.md +10 -10
  58. package/.docs/organized/changelogs/create-mastra.md +21 -21
  59. package/.docs/organized/changelogs/mastra.md +32 -32
  60. package/.docs/raw/agents/overview.mdx +29 -47
  61. package/.docs/raw/deployment/monorepo.mdx +107 -0
  62. package/.docs/raw/frameworks/web-frameworks/astro.mdx +1 -0
  63. package/.docs/raw/frameworks/web-frameworks/next-js.mdx +5 -201
  64. package/.docs/raw/frameworks/web-frameworks/sveltekit.mdx +5 -0
  65. package/.docs/raw/frameworks/web-frameworks/vite-react.mdx +5 -0
  66. package/.docs/raw/reference/core/mastra-class.mdx +5 -3
  67. package/.docs/raw/reference/scorers/context-precision.mdx +130 -0
  68. package/.docs/raw/reference/scorers/context-relevance.mdx +222 -0
  69. package/.docs/raw/reference/tools/graph-rag-tool.mdx +7 -0
  70. package/.docs/raw/reference/tools/vector-query-tool.mdx +7 -0
  71. package/.docs/raw/scorers/off-the-shelf-scorers.mdx +17 -1
  72. package/.docs/raw/server-db/local-dev-playground.mdx +20 -1
  73. package/.docs/raw/workflows/control-flow.mdx +0 -46
  74. package/.docs/raw/workflows/error-handling.mdx +213 -0
  75. package/package.json +4 -4
@@ -0,0 +1,130 @@
1
+ ---
2
+ title: "Reference: Context Precision Scorer | Scorers | Mastra Docs"
3
+ description: Documentation for the Context Precision Scorer in Mastra. Evaluates the relevance and precision of retrieved context for generating expected outputs using Mean Average Precision.
4
+ ---
5
+
6
+ import { PropertiesTable } from "@/components/properties-table";
7
+
8
+ # Context Precision Scorer
9
+
10
+ The `createContextPrecisionScorer()` function creates a scorer that evaluates how relevant and well-positioned retrieved context pieces are for generating expected outputs. It uses **Mean Average Precision (MAP)** to reward systems that place relevant context earlier in the sequence.
11
+
12
+ ## Parameters
13
+
14
+ <PropertiesTable
15
+ content={[
16
+ {
17
+ name: "model",
18
+ type: "MastraLanguageModel",
19
+ description: "The language model to use for evaluating context relevance",
20
+ required: true,
21
+ },
22
+ {
23
+ name: "options",
24
+ type: "ContextPrecisionMetricOptions",
25
+ description: "Configuration options for the scorer",
26
+ required: true,
27
+ children: [
28
+ {
29
+ name: "context",
30
+ type: "string[]",
31
+ description: "Array of context pieces to evaluate for relevance",
32
+ required: false,
33
+ },
34
+ {
35
+ name: "contextExtractor",
36
+ type: "(input, output) => string[]",
37
+ description: "Function to dynamically extract context from the run input and output",
38
+ required: false,
39
+ },
40
+ {
41
+ name: "scale",
42
+ type: "number",
43
+ description: "Scale factor to multiply the final score (default: 1)",
44
+ required: false,
45
+ },
46
+ ],
47
+ },
48
+ ]}
49
+ />
50
+
51
+ :::note
52
+ Either `context` or `contextExtractor` must be provided. If both are provided, `contextExtractor` takes precedence.
53
+ :::
54
+
55
+ ## .run() Returns
56
+
57
+ <PropertiesTable
58
+ content={[
59
+ {
60
+ name: "score",
61
+ type: "number",
62
+ description: "Mean Average Precision score between 0 and scale (default 0-1)",
63
+ },
64
+ {
65
+ name: "reason",
66
+ type: "string",
67
+ description: "Human-readable explanation of the context precision evaluation",
68
+ },
69
+ ]}
70
+ />
71
+
72
+ ## Scoring Details
73
+
74
+ ### Mean Average Precision (MAP)
75
+
76
+ Context Precision uses **Mean Average Precision** to evaluate both relevance and positioning:
77
+
78
+ 1. **Context Evaluation**: Each context piece is classified as relevant or irrelevant for generating the expected output
79
+ 2. **Precision Calculation**: For each relevant context at position `i`, precision = `relevant_items_so_far / (i + 1)`
80
+ 3. **Average Precision**: Sum all precision values and divide by total relevant items
81
+ 4. **Final Score**: Multiply by scale factor and round to 2 decimals
82
+
83
+ ### Scoring Formula
84
+
85
+ ```
86
+ MAP = (Σ Precision@k) / R
87
+
88
+ Where:
89
+ - Precision@k = (relevant items in positions 1...k) / k
90
+ - R = total number of relevant items
91
+ - Only calculated at positions where relevant items appear
92
+ ```
93
+
94
+ ### Score Interpretation
95
+
96
+ - **1.0** = Perfect precision (all relevant context appears first)
97
+ - **0.5-0.9** = Good precision with some relevant context well-positioned
98
+ - **0.1-0.4** = Poor precision with relevant context buried or scattered
99
+ - **0.0** = No relevant context found
100
+
101
+ ### Example Calculation
102
+
103
+ Given context: `[relevant, irrelevant, relevant, irrelevant]`
104
+
105
+ - Position 0: Relevant → Precision = 1/1 = 1.0
106
+ - Position 1: Skip (irrelevant)
107
+ - Position 2: Relevant → Precision = 2/3 = 0.67
108
+ - Position 3: Skip (irrelevant)
109
+
110
+ MAP = (1.0 + 0.67) / 2 = 0.835 ≈ **0.83**
111
+
112
+ ## Usage Patterns
113
+
114
+ ### RAG System Evaluation
115
+ Ideal for evaluating retrieved context in RAG pipelines where:
116
+ - Context ordering matters for model performance
117
+ - You need to measure retrieval quality beyond simple relevance
118
+ - Early relevant context is more valuable than later relevant context
119
+
120
+ ### Context Window Optimization
121
+ Use when optimizing context selection for:
122
+ - Limited context windows
123
+ - Token budget constraints
124
+ - Multi-step reasoning tasks
125
+
126
+ ## Related
127
+
128
+ - [Answer Relevancy Scorer](/reference/scorers/answer-relevancy) - Evaluates if answers address the question
129
+ - [Faithfulness Scorer](/reference/scorers/faithfulness) - Measures answer groundedness in context
130
+ - [Custom Scorers](/docs/scorers/custom-scorers) - Creating your own evaluation metrics
@@ -0,0 +1,222 @@
1
+ ---
2
+ title: "Reference: Context Relevance Scorer | Scorers | Mastra Docs"
3
+ description: Documentation for the Context Relevance Scorer in Mastra. Evaluates the relevance and utility of provided context for generating agent responses using weighted relevance scoring.
4
+ ---
5
+
6
+ import { PropertiesTable } from "@/components/properties-table";
7
+
8
+ # Context Relevance Scorer
9
+
10
+ The `createContextRelevanceScorerLLM()` function creates a scorer that evaluates how relevant and useful provided context was for generating agent responses. It uses weighted relevance levels and applies penalties for unused high-relevance context and missing information.
11
+
12
+ ## Parameters
13
+
14
+ <PropertiesTable
15
+ content={[
16
+ {
17
+ name: "model",
18
+ type: "MastraLanguageModel",
19
+ description: "The language model to use for evaluating context relevance",
20
+ required: true,
21
+ },
22
+ {
23
+ name: "options",
24
+ type: "ContextRelevanceOptions",
25
+ description: "Configuration options for the scorer",
26
+ required: true,
27
+ children: [
28
+ {
29
+ name: "context",
30
+ type: "string[]",
31
+ description: "Array of context pieces to evaluate for relevance",
32
+ required: false,
33
+ },
34
+ {
35
+ name: "contextExtractor",
36
+ type: "(input, output) => string[]",
37
+ description: "Function to dynamically extract context from the run input and output",
38
+ required: false,
39
+ },
40
+ {
41
+ name: "scale",
42
+ type: "number",
43
+ description: "Scale factor to multiply the final score (default: 1)",
44
+ required: false,
45
+ },
46
+ {
47
+ name: "penalties",
48
+ type: "object",
49
+ description: "Configurable penalty settings for scoring",
50
+ required: false,
51
+ children: [
52
+ {
53
+ name: "unusedHighRelevanceContext",
54
+ type: "number",
55
+ description: "Penalty per unused high-relevance context (default: 0.1)",
56
+ required: false,
57
+ },
58
+ {
59
+ name: "missingContextPerItem",
60
+ type: "number",
61
+ description: "Penalty per missing context item (default: 0.15)",
62
+ required: false,
63
+ },
64
+ {
65
+ name: "maxMissingContextPenalty",
66
+ type: "number",
67
+ description: "Maximum total missing context penalty (default: 0.5)",
68
+ required: false,
69
+ },
70
+ ],
71
+ },
72
+ ],
73
+ },
74
+ ]}
75
+ />
76
+
77
+ :::note
78
+ Either `context` or `contextExtractor` must be provided. If both are provided, `contextExtractor` takes precedence.
79
+ :::
80
+
81
+ ## .run() Returns
82
+
83
+ <PropertiesTable
84
+ content={[
85
+ {
86
+ name: "score",
87
+ type: "number",
88
+ description: "Weighted relevance score between 0 and scale (default 0-1)",
89
+ },
90
+ {
91
+ name: "reason",
92
+ type: "string",
93
+ description: "Human-readable explanation of the context relevance evaluation",
94
+ },
95
+ ]}
96
+ />
97
+
98
+ ## Scoring Details
99
+
100
+ ### Weighted Relevance Scoring
101
+
102
+ Context Relevance uses a sophisticated scoring algorithm that considers:
103
+
104
+ 1. **Relevance Levels**: Each context piece is classified with weighted values:
105
+ - `high` = 1.0 (directly addresses the query)
106
+ - `medium` = 0.7 (supporting information)
107
+ - `low` = 0.3 (tangentially related)
108
+ - `none` = 0.0 (completely irrelevant)
109
+
110
+ 2. **Usage Detection**: Tracks whether relevant context was actually used in the response
111
+
112
+ 3. **Penalties Applied** (configurable via `penalties` options):
113
+ - **Unused High-Relevance**: `unusedHighRelevanceContext` penalty per unused high-relevance context (default: 0.1)
114
+ - **Missing Context**: Up to `maxMissingContextPenalty` for identified missing information (default: 0.5)
115
+
116
+ ### Scoring Formula
117
+
118
+ ```
119
+ Base Score = Σ(relevance_weights) / (num_contexts × 1.0)
120
+ Usage Penalty = count(unused_high_relevance) × unusedHighRelevanceContext
121
+ Missing Penalty = min(count(missing_context) × missingContextPerItem, maxMissingContextPenalty)
122
+
123
+ Final Score = max(0, Base Score - Usage Penalty - Missing Penalty) × scale
124
+ ```
125
+
126
+ **Default Values**:
127
+ - `unusedHighRelevanceContext` = 0.1 (10% penalty per unused high-relevance context)
128
+ - `missingContextPerItem` = 0.15 (15% penalty per missing context item)
129
+ - `maxMissingContextPenalty` = 0.5 (maximum 50% penalty for missing context)
130
+ - `scale` = 1
131
+
132
+ ### Score Interpretation
133
+
134
+ - **0.9-1.0** = Excellent relevance with minimal gaps
135
+ - **0.7-0.8** = Good relevance with some unused or missing context
136
+ - **0.4-0.6** = Mixed relevance with significant gaps
137
+ - **0.0-0.3** = Poor relevance or mostly irrelevant context
138
+
139
+ ### Difference from Context Precision
140
+
141
+ | Aspect | Context Relevance | Context Precision |
142
+ |--------|-------------------|-------------------|
143
+ | **Algorithm** | Weighted levels with penalties | Mean Average Precision (MAP) |
144
+ | **Relevance** | Multiple levels (high/medium/low/none) | Binary (yes/no) |
145
+ | **Position** | Not considered | Critical (rewards early placement) |
146
+ | **Usage** | Tracks and penalizes unused context | Not considered |
147
+ | **Missing** | Identifies and penalizes gaps | Not evaluated |
148
+
149
+ ## Usage Examples
150
+
151
+ ### Basic Configuration
152
+
153
+ ```typescript
154
+ const scorer = createContextRelevanceScorerLLM({
155
+ model: openai('gpt-4o'),
156
+ options: {
157
+ context: ['Einstein won the Nobel Prize for his work on the photoelectric effect'],
158
+ scale: 1,
159
+ },
160
+ });
161
+ ```
162
+
163
+ ### Custom Penalty Configuration
164
+
165
+ ```typescript
166
+ const scorer = createContextRelevanceScorerLLM({
167
+ model: openai('gpt-4o'),
168
+ options: {
169
+ context: ['Context information...'],
170
+ penalties: {
171
+ unusedHighRelevanceContext: 0.05, // Lower penalty for unused context
172
+ missingContextPerItem: 0.2, // Higher penalty per missing item
173
+ maxMissingContextPenalty: 0.4, // Lower maximum penalty cap
174
+ },
175
+ scale: 2, // Double the final score
176
+ },
177
+ });
178
+ ```
179
+
180
+ ### Dynamic Context Extraction
181
+
182
+ ```typescript
183
+ const scorer = createContextRelevanceScorerLLM({
184
+ model: openai('gpt-4o'),
185
+ options: {
186
+ contextExtractor: (input, output) => {
187
+ // Extract context based on the query
188
+ const userQuery = input?.inputMessages?.[0]?.content || '';
189
+ if (userQuery.includes('Einstein')) {
190
+ return [
191
+ 'Einstein won the Nobel Prize for the photoelectric effect',
192
+ 'He developed the theory of relativity'
193
+ ];
194
+ }
195
+ return ['General physics information'];
196
+ },
197
+ penalties: {
198
+ unusedHighRelevanceContext: 0.15,
199
+ },
200
+ },
201
+ });
202
+ ```
203
+
204
+ ## Usage Patterns
205
+
206
+ ### Content Generation Evaluation
207
+ Best for evaluating context quality in:
208
+ - Chat systems where context usage matters
209
+ - RAG pipelines needing nuanced relevance assessment
210
+ - Systems where missing context affects quality
211
+
212
+ ### Context Selection Optimization
213
+ Use when optimizing for:
214
+ - Comprehensive context coverage
215
+ - Effective context utilization
216
+ - Identifying context gaps
217
+
218
+ ## Related
219
+
220
+ - [Context Precision Scorer](/reference/scorers/context-precision) - Evaluates context ranking using MAP
221
+ - [Faithfulness Scorer](/reference/scorers/faithfulness) - Measures answer groundedness in context
222
+ - [Custom Scorers](/docs/scorers/custom-scorers) - Creating your own evaluation metrics
@@ -98,6 +98,13 @@ const graphTool = createGraphRAGTool({
98
98
  isOptional: true,
99
99
  defaultValue: "Default graph options",
100
100
  },
101
+ {
102
+ name: "providerOptions",
103
+ type: "Record<string, Record<string, any>>",
104
+ description:
105
+ "Provider-specific options for the embedding model (e.g., outputDimensionality). **Important**: Only works with AI SDK EmbeddingModelV2 models. For V1 models, configure options when creating the model itself.",
106
+ isOptional: true,
107
+ },
101
108
  ]}
102
109
  />
103
110
 
@@ -108,6 +108,13 @@ const queryTool = createVectorQueryTool({
108
108
  "Database-specific configuration options for optimizing queries. (Can be set at creation or overridden at runtime.)",
109
109
  isOptional: true,
110
110
  },
111
+ {
112
+ name: "providerOptions",
113
+ type: "Record<string, Record<string, any>>",
114
+ description:
115
+ "Provider-specific options for the embedding model (e.g., outputDimensionality). **Important**: Only works with AI SDK EmbeddingModelV2 models. For V1 models, configure options when creating the model itself.",
116
+ isOptional: true,
117
+ },
111
118
  ]}
112
119
  />
113
120
 
@@ -21,6 +21,22 @@ These scorers evaluate how correct, truthful, and complete your agent's answers
21
21
  - [`textual-difference`](/reference/scorers/textual-difference): Measures textual differences between strings (`0-1`, higher means more similar)
22
22
  - [`tool-call-accuracy`](/reference/scorers/tool-call-accuracy): Evaluates whether the LLM selects the correct tool from available options (`0-1`, higher is better)
23
23
 
24
+ ### Context Quality
25
+
26
+ These scorers evaluate the quality and relevance of context used in generating responses:
27
+
28
+ - [`context-precision`](/reference/scorers/context-precision): Evaluates context relevance and ranking using Mean Average Precision, rewarding early placement of relevant context (`0-1`, higher is better)
29
+ - [`context-relevance`](/reference/scorers/context-relevance): Measures context utility with nuanced relevance levels, usage tracking, and missing context detection (`0-1`, higher is better)
30
+
31
+ :::tip Context Scorer Selection
32
+ - Use **Context Precision** when context ordering matters and you need standard IR metrics (ideal for RAG ranking evaluation)
33
+ - Use **Context Relevance** when you need detailed relevance assessment and want to track context usage and identify gaps
34
+
35
+ Both context scorers support:
36
+ - **Static context**: Pre-defined context arrays
37
+ - **Dynamic context extraction**: Extract context from runs using custom functions (ideal for RAG systems, vector databases, etc.)
38
+ :::
39
+
24
40
  ### Output Quality
25
41
 
26
42
  These scorers evaluate adherence to format, style, and safety requirements:
@@ -28,4 +44,4 @@ These scorers evaluate adherence to format, style, and safety requirements:
28
44
  - [`tone-consistency`](/reference/scorers/tone-consistency): Measures consistency in formality, complexity, and style (`0-1`, higher is better)
29
45
  - [`toxicity`](/reference/scorers/toxicity): Detects harmful or inappropriate content (`0-1`, lower is better)
30
46
  - [`bias`](/reference/scorers/bias): Detects potential biases in the output (`0-1`, lower is better)
31
- - [`keyword-coverage`](/reference/scorers/keyword-coverage): Assesses technical terminology usage (`0-1`, higher is better)
47
+ - [`keyword-coverage`](/reference/scorers/keyword-coverage): Assesses technical terminology usage (`0-1`, higher is better)
@@ -116,7 +116,7 @@ Key features:
116
116
 
117
117
  ## REST API Endpoints
118
118
 
119
- The local development server exposes a set of REST API routes via the [Mastra Server](/docs/deployment/server), allowing you to test and interact with your agents and workflows before deployment.
119
+ The local development server exposes a set of REST API routes via the [Mastra Server](/docs/deployment/server-deployment), allowing you to test and interact with your agents and workflows before deployment.
120
120
 
121
121
  For a full overview of available API routes, including agents, tools, and workflows, see the [Routes reference](/reference/cli/dev#routes).
122
122
 
@@ -184,6 +184,25 @@ export const mastra = new Mastra({
184
184
  });
185
185
  ```
186
186
 
187
+ ## Bundler options
188
+
189
+ Use `transpilePackages` to compile TypeScript packages or libraries. Use `externals` to exclude dependencies resolved at runtime, and `sourcemap` to emit readable stack traces.
190
+
191
+ ```typescript filename="src/mastra/index.ts" showLineNumbers copy
192
+ import { Mastra } from "@mastra/core/mastra";
193
+
194
+ export const mastra = new Mastra({
195
+ // ...
196
+ bundler: {
197
+ transpilePackages: ["utils"],
198
+ externals: ["ui"],
199
+ sourcemap: true
200
+ }
201
+ });
202
+ ```
203
+
204
+ > See [Mastra Class](../../reference/core/mastra-class.mdx) for more configuration options.
205
+
187
206
 
188
207
  ## Next steps
189
208
 
@@ -190,52 +190,6 @@ export const testWorkflow = createWorkflow({...})
190
190
  .commit();
191
191
  ```
192
192
 
193
- ## Exiting early with `bail()`
194
-
195
- Use `bail()` in a step to exit early with a successful result. This returns the provided payload as the step output and ends workflow execution.
196
-
197
- ```typescript {7} filename="src/mastra/workflows/test-workflow.ts" showLineNumbers copy
198
- import { createWorkflow, createStep } from "@mastra/core/workflows";
199
- import { z } from "zod";
200
-
201
- const step1 = createStep({
202
- id: 'step1',
203
- execute: async ({ bail }) => {
204
- return bail({ result: 'bailed' });
205
- },
206
- inputSchema: z.object({ value: z.string() }),
207
- outputSchema: z.object({ result: z.string() }),
208
- });
209
-
210
- export const testWorkflow = createWorkflow({...})
211
- .then(step1)
212
- .commit();
213
- ```
214
-
215
- ## Exiting early with `Error()`
216
-
217
- Use `throw new Error()` in a step to exit with an error.
218
-
219
- ```typescript {7} filename="src/mastra/workflows/test-workflow.ts" showLineNumbers copy
220
- import { createWorkflow, createStep } from "@mastra/core/workflows";
221
- import { z } from "zod";
222
-
223
- const step1 = createStep({
224
- id: 'step1',
225
- execute: async () => {
226
- throw new Error('bailed');
227
- },
228
- inputSchema: z.object({ value: z.string() }),
229
- outputSchema: z.object({ result: z.string() }),
230
- });
231
-
232
- export const testWorkflow = createWorkflow({...})
233
- .then(step1)
234
- .commit();
235
- ```
236
-
237
- This throws an error from the step and stops workflow execution, returning the error as the result.
238
-
239
193
  ## Example Run Instance
240
194
 
241
195
  The following example demonstrates how to start a run with multiple inputs. Each input will pass through the `mapStep` sequentially.