@mastra/mcp-docs-server 0.13.4 → 0.13.5-alpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +8 -8
  2. package/.docs/organized/changelogs/%40mastra%2Fagui.md +12 -12
  3. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +38 -38
  4. package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +11 -11
  5. package/.docs/organized/changelogs/%40mastra%2Fcore.md +33 -33
  6. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +42 -42
  7. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +24 -24
  8. package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +24 -24
  9. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +52 -52
  10. package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +31 -31
  11. package/.docs/organized/changelogs/%40mastra%2Ffastembed.md +7 -0
  12. package/.docs/organized/changelogs/%40mastra%2Floggers.md +9 -9
  13. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +21 -21
  14. package/.docs/organized/changelogs/%40mastra%2Fmcp.md +12 -12
  15. package/.docs/organized/changelogs/%40mastra%2Fmemory.md +25 -25
  16. package/.docs/organized/changelogs/%40mastra%2Fpg.md +14 -14
  17. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +55 -55
  18. package/.docs/organized/changelogs/%40mastra%2Fserver.md +37 -37
  19. package/.docs/organized/changelogs/%40mastra%2Fupstash.md +13 -13
  20. package/.docs/organized/changelogs/create-mastra.md +27 -27
  21. package/.docs/organized/changelogs/mastra.md +63 -63
  22. package/.docs/organized/code-examples/bird-checker-with-express.md +1 -1
  23. package/.docs/organized/code-examples/crypto-chatbot.md +9 -9
  24. package/.docs/organized/code-examples/fireworks-r1.md +1 -1
  25. package/.docs/organized/code-examples/memory-per-resource-example.md +1 -1
  26. package/.docs/organized/code-examples/memory-with-pg.md +1 -1
  27. package/.docs/organized/code-examples/memory-with-upstash.md +1 -1
  28. package/.docs/organized/code-examples/openapi-spec-writer.md +4 -4
  29. package/.docs/raw/client-js/overview.mdx +16 -0
  30. package/.docs/raw/deployment/cloud-providers/aws-lambda.mdx +279 -0
  31. package/.docs/raw/deployment/serverless-platforms/index.mdx +0 -1
  32. package/.docs/raw/frameworks/agentic-uis/ai-sdk.mdx +97 -0
  33. package/.docs/raw/frameworks/agentic-uis/assistant-ui.mdx +34 -0
  34. package/.docs/raw/local-dev/mastra-dev.mdx +10 -3
  35. package/.docs/raw/memory/overview.mdx +2 -1
  36. package/.docs/raw/reference/agents/generate.mdx +3 -2
  37. package/.docs/raw/reference/agents/stream.mdx +3 -2
  38. package/.docs/raw/reference/cli/dev.mdx +12 -0
  39. package/.docs/raw/reference/legacyWorkflows/createRun.mdx +0 -4
  40. package/.docs/raw/reference/memory/Memory.mdx +12 -6
  41. package/.docs/raw/reference/workflows/create-run.mdx +4 -4
  42. package/.docs/raw/reference/workflows/sendEvent.mdx +49 -0
  43. package/.docs/raw/workflows/overview.mdx +1 -1
  44. package/.docs/raw/workflows/pausing-execution.mdx +74 -37
  45. package/package.json +4 -4
@@ -3,7 +3,7 @@
3
3
  {
4
4
  "name": "memory-per-resource-example",
5
5
  "dependencies": {
6
- "@ai-sdk/openai": "^0.0.68",
6
+ "@ai-sdk/openai": "^1.3.22",
7
7
  "@mastra/core": "latest",
8
8
  "@mastra/memory": "latest",
9
9
  "@mastra/libsql": "latest",
@@ -9,7 +9,7 @@
9
9
  "@mastra/pg": "latest"
10
10
  },
11
11
  "devDependencies": {
12
- "dotenv": "^16.5.0",
12
+ "dotenv": "^17.0.0",
13
13
  "tsx": "^4.19.3"
14
14
  }
15
15
  }
@@ -10,7 +10,7 @@
10
10
  "@mastra/upstash": "latest"
11
11
  },
12
12
  "devDependencies": {
13
- "dotenv": "^16.5.0",
13
+ "dotenv": "^17.0.0",
14
14
  "tsx": "^4.19.3"
15
15
  }
16
16
  }
@@ -9,10 +9,10 @@
9
9
  "@mastra/github": "latest",
10
10
  "@mastra/loggers": "latest",
11
11
  "@mastra/rag": "latest",
12
- "@radix-ui/react-accordion": "^1.2.3",
13
- "@radix-ui/react-dialog": "^1.1.6",
14
- "@radix-ui/react-select": "^2.1.6",
15
- "@radix-ui/react-slot": "^1.1.2",
12
+ "@radix-ui/react-accordion": "^1.2.11",
13
+ "@radix-ui/react-dialog": "^1.1.14",
14
+ "@radix-ui/react-select": "^2.2.5",
15
+ "@radix-ui/react-slot": "^1.2.3",
16
16
  "class-variance-authority": "^0.7.1",
17
17
  "clsx": "^2.1.1",
18
18
  "lucide-react": "^0.454.0",
@@ -78,6 +78,22 @@ const client = new MastraClient({
78
78
  });
79
79
  ```
80
80
 
81
+ ## AbortSignal
82
+
83
+ The Mastra Client SDK supports request cancellation using the standard Web API `AbortSignal`. Pass an `AbortSignal` to the client constructor to enable cancellation for all requests:
84
+
85
+ ```typescript
86
+ const controller = new AbortController();
87
+
88
+ const client = new MastraClient({
89
+ baseUrl: "http://localhost:4111",
90
+ abortSignal: controller.signal,
91
+ });
92
+
93
+ // Cancel all requests from this client
94
+ controller.abort();
95
+ ```
96
+
81
97
  ## Example
82
98
 
83
99
  Once your MastraClient is initialized you can start making client calls via the type-safe
@@ -0,0 +1,279 @@
1
+ ---
2
+ title: "AWS Lambda"
3
+ description: "Deploy your Mastra applications to AWS Lambda using Docker containers and the AWS Lambda Web Adapter."
4
+ ---
5
+
6
+ import { Callout, Steps } from "nextra/components";
7
+
8
+ ## AWS Lambda
9
+
10
+ Deploy your Mastra applications to AWS Lambda using Docker containers and the AWS Lambda Web Adapter.
11
+ This approach allows you to run your Mastra server as a containerized Lambda function with automatic scaling.
12
+
13
+ <Callout>
14
+ This guide assumes your Mastra application has been created using the default
15
+ `npx create-mastra@latest` command.
16
+ For more information on how to create a new Mastra application,
17
+ refer to our [getting started guide](/docs/getting-started/installation)
18
+ </Callout>
19
+
20
+ ### Prerequisites
21
+
22
+ Before deploying to AWS Lambda, ensure you have:
23
+
24
+ - [AWS CLI](https://aws.amazon.com/cli/) installed and configured
25
+ - [Docker](https://www.docker.com/) installed and running
26
+ - An AWS account with appropriate permissions for Lambda, ECR, and IAM
27
+ - Your Mastra application configured with appropriate memory storage
28
+
29
+ ### Memory Configuration
30
+
31
+ <Callout>
32
+ AWS Lambda uses an ephemeral file system,
33
+ meaning that any files written to the file system are short-lived and may be lost.
34
+ Avoid using a Mastra storage provider that uses the file system,
35
+ such as `LibSQLStore` with a file URL.
36
+ </Callout>
37
+
38
+ Lambda functions have limitations with file system storage. Configure your Mastra application to use either in-memory or external storage providers:
39
+
40
+ #### Option 1: In-Memory (Simplest)
41
+
42
+ ```typescript filename="src/mastra/index.ts" copy showLineNumbers
43
+ import { LibSQLStore } from "@mastra/libsql";
44
+
45
+ const storage = new LibSQLStore({
46
+ url: ":memory:", // in-memory storage
47
+ });
48
+ ```
49
+
50
+ #### Option 2: External Storage Providers
51
+
52
+ For persistent memory across Lambda invocations, use external storage providers like `LibSQLStore` with Turso or other storage providers like `PostgreStore`:
53
+
54
+ ```typescript filename="src/mastra/index.ts" copy showLineNumbers
55
+ import { LibSQLStore } from "@mastra/libsql";
56
+
57
+ const storage = new LibSQLStore({
58
+ url: "libsql://your-database.turso.io", // External Turso database
59
+ authToken: process.env.TURSO_AUTH_TOKEN,
60
+ });
61
+ ```
62
+
63
+ For more memory configuration options, see the [Memory documentation](/docs/memory/overview).
64
+
65
+ ### Creating a Dockerfile
66
+
67
+ <Steps>
68
+
69
+ #### Create a Dockerfile in your project root
70
+
71
+ Create a `Dockerfile` in your Mastra project root directory:
72
+
73
+ ```dockerfile filename="Dockerfile" copy showLineNumbers
74
+ FROM node:22-alpine
75
+
76
+ WORKDIR /app
77
+ COPY package*.json ./
78
+ RUN npm ci
79
+ COPY src ./src
80
+ RUN npx mastra build
81
+ RUN apk add --no-cache gcompat
82
+
83
+ COPY --from=public.ecr.aws/awsguru/aws-lambda-adapter:0.9.0 /lambda-adapter /opt/extensions/lambda-adapter
84
+ RUN addgroup -g 1001 -S nodejs && \
85
+ adduser -S mastra -u 1001 && \
86
+ chown -R mastra:nodejs /app
87
+
88
+ USER mastra
89
+
90
+ ENV PORT=8080
91
+ ENV NODE_ENV=production
92
+ ENV READINESS_CHECK_PATH="/api"
93
+
94
+ EXPOSE 8080
95
+
96
+ CMD ["node", "--import=./.mastra/output/instrumentation.mjs", ".mastra/output/index.mjs"]
97
+ ```
98
+
99
+ </Steps>
100
+
101
+ ### Building and Deploying
102
+
103
+ <Steps>
104
+
105
+ #### Set up environment variables
106
+
107
+ Set up your environment variables for the deployment process:
108
+
109
+ ```bash copy
110
+ export PROJECT_NAME="your-mastra-app"
111
+ export AWS_REGION="us-east-1"
112
+ export AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text)
113
+ ```
114
+
115
+ #### Build the Docker image
116
+
117
+ Build your Docker image locally:
118
+
119
+ ```bash copy
120
+ docker build -t "$PROJECT_NAME" .
121
+ ```
122
+
123
+ #### Create an ECR repository
124
+
125
+ Create an Amazon ECR repository to store your Docker image:
126
+
127
+ ```bash copy
128
+ aws ecr create-repository --repository-name "$PROJECT_NAME" --region "$AWS_REGION"
129
+ ```
130
+
131
+ #### Authenticate Docker with ECR
132
+
133
+ Log in to Amazon ECR:
134
+
135
+ ```bash copy
136
+ aws ecr get-login-password --region "$AWS_REGION" | docker login --username AWS --password-stdin "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com"
137
+ ```
138
+
139
+ #### Tag and push the image
140
+
141
+ Tag your image with the ECR repository URI and push it:
142
+
143
+ ```bash copy
144
+ docker tag "$PROJECT_NAME":latest "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$PROJECT_NAME":latest
145
+ docker push "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$PROJECT_NAME":latest
146
+ ```
147
+
148
+ #### Create the Lambda function
149
+
150
+ Create a Lambda function using the AWS Console:
151
+
152
+ 1. Navigate to the [AWS Lambda Console](https://console.aws.amazon.com/lambda/)
153
+ 2. Click **Create function**
154
+ 3. Select **Container image**
155
+ 4. Configure the function:
156
+ - **Function name**: Your function name (e.g., `mastra-app`)
157
+ - **Container image URI**: Click **Browse images** and select your ECR repository, then choose the `latest` tag
158
+ - **Architecture**: Select the architecture that matches your Docker build (typically `x86_64`)
159
+
160
+ #### Configure Function URL
161
+
162
+ Enable Function URL for external access:
163
+
164
+ 1. In the Lambda function configuration, go to **Configuration** > **Function URL**
165
+ 2. Click **Create function URL**
166
+ 3. Set **Auth type** to **NONE** (for public access)
167
+ 4. Configure **CORS** settings:
168
+ - **Allow-Origin**: `*` (restrict to your domain in production)
169
+ - **Allow-Headers**: `content-type`
170
+ - **Allow-Methods**: `*` (audit and restrict in production)
171
+ 5. Click **Save**
172
+
173
+ #### Configure environment variables
174
+
175
+ Add your environment variables in the Lambda function configuration:
176
+
177
+ 1. Go to **Configuration** > **Environment variables**
178
+ 2. Add the required variables for your Mastra application:
179
+ - `OPENAI_API_KEY`: Your OpenAI API key (if using OpenAI)
180
+ - `ANTHROPIC_API_KEY`: Your Anthropic API key (if using Anthropic)
181
+ - `TURSO_AUTH_TOKEN`: Your Turso auth token (if using LibSQL with Turso)
182
+ - Other provider-specific API keys as needed
183
+
184
+ #### Adjust function settings
185
+
186
+ Configure the function's memory and timeout settings:
187
+
188
+ 1. Go to **Configuration** > **General configuration**
189
+ 2. Set the following recommended values:
190
+ - **Memory**: 512 MB (adjust based on your application needs)
191
+ - **Timeout**: 30 seconds (adjust based on your application needs)
192
+ - **Ephemeral storage**: 512 MB (optional, for temporary files)
193
+
194
+ </Steps>
195
+
196
+ ### Testing your deployment
197
+
198
+ Once deployed, test your Lambda function:
199
+
200
+ 1. Copy the **Function URL** from the Lambda console
201
+ 2. Visit the URL in your browser to see your Mastra's server home screen
202
+ 3. Test your agents and workflows using the generated API endpoints
203
+
204
+ For more information about available API endpoints, see the [Server documentation](/docs/deployment/server).
205
+
206
+ ### Connecting your client
207
+
208
+ Update your client application to use the Lambda function URL:
209
+
210
+ ```typescript filename="src/client.ts" copy showLineNumbers
211
+ import { MastraClient } from "@mastra/client-js";
212
+
213
+ const mastraClient = new MastraClient({
214
+ baseUrl: "https://your-function-url.lambda-url.us-east-1.on.aws",
215
+ });
216
+ ```
217
+
218
+ ### Troubleshooting
219
+
220
+ #### Function timeout errors
221
+
222
+ If your Lambda function times out:
223
+
224
+ - Increase the timeout value in **Configuration** > **General configuration**
225
+ - Optimize your Mastra application for faster cold starts
226
+ - Consider using provisioned concurrency for consistent performance
227
+
228
+ #### Memory issues
229
+
230
+ If you encounter memory-related errors:
231
+
232
+ - Increase the memory allocation in **Configuration** > **General configuration**
233
+ - Monitor memory usage in CloudWatch Logs
234
+ - Optimize your application's memory usage
235
+
236
+ #### CORS issues
237
+
238
+ If you encounter CORS errors when accessing endpoints but not the home page:
239
+
240
+ - Verify CORS headers are properly set in your Mastra server configuration
241
+ - Check the Lambda Function URL CORS configuration
242
+ - Ensure your client is making requests to the correct URL
243
+
244
+ #### Container image issues
245
+
246
+ If the Lambda function fails to start:
247
+
248
+ - Verify the Docker image builds successfully locally
249
+ - Check that the `CMD` instruction in your Dockerfile is correct
250
+ - Review CloudWatch Logs for container startup errors
251
+ - Ensure the Lambda Web Adapter is properly installed in the container
252
+
253
+ ### Production considerations
254
+
255
+ For production deployments:
256
+
257
+ #### Security
258
+
259
+ - Restrict CORS origins to your trusted domains
260
+ - Use AWS IAM roles for secure access to other AWS services
261
+ - Store sensitive environment variables in AWS Secrets Manager or Parameter Store
262
+
263
+ #### Monitoring
264
+
265
+ - Enable CloudWatch monitoring for your Lambda function
266
+ - Set up CloudWatch alarms for errors and performance metrics
267
+ - Use AWS X-Ray for distributed tracing
268
+
269
+ #### Scaling
270
+
271
+ - Configure provisioned concurrency for predictable performance
272
+ - Monitor concurrent executions and adjust limits as needed
273
+ - Consider using Application Load Balancer for more complex routing needs
274
+
275
+ ## Next steps
276
+
277
+ - [Mastra Client SDK](/docs/client-js/overview)
278
+ - [AWS Lambda documentation](https://docs.aws.amazon.com/lambda/)
279
+ - [AWS Lambda Web Adapter](https://github.com/awslabs/aws-lambda-web-adapter)
@@ -30,7 +30,6 @@ Before you begin, ensure you have:
30
30
 
31
31
  Specifically, ensure you've removed it from both `src/mastra/index.ts` and `src/mastra/agents/weather-agent.ts`:
32
32
 
33
-
34
33
  ```diff filename="src/mastra/index.ts" showLineNumbers
35
34
  export const mastra = new Mastra({
36
35
  // ...
@@ -189,6 +189,103 @@ export default function Page() {
189
189
  }
190
190
  ```
191
191
 
192
+ ### With additional data / RuntimeContext
193
+
194
+ You can send additional data via the UI hooks that can be leveraged in Mastra as RuntimeContext using the `sendExtraMessageFields` option.
195
+
196
+ #### Frontend: Using sendExtraMessageFields
197
+
198
+ ```typescript
199
+ import { useChat } from '@ai-sdk/react';
200
+
201
+ export function ChatComponent() {
202
+ const { messages, input, handleInputChange, handleSubmit } = useChat({
203
+ api: '/api/chat',
204
+ sendExtraMessageFields: true, // Enable sending extra fields
205
+ });
206
+
207
+ const handleFormSubmit = (e: React.FormEvent) => {
208
+ e.preventDefault();
209
+ handleSubmit(e,{
210
+ // Add context data to the message
211
+ data: {
212
+ userId: 'user123',
213
+ preferences: { language: 'en', temperature: 'celsius' },
214
+ },
215
+ });
216
+ };
217
+
218
+ return (
219
+ <form onSubmit={handleFormSubmit}>
220
+ <input value={input} onChange={handleInputChange} />
221
+ </form>
222
+ );
223
+ }
224
+ ```
225
+
226
+ #### Backend: Handling in API Route
227
+
228
+ ```typescript filename="app/api/chat/route.ts" copy
229
+ import { mastra } from "@/src/mastra";
230
+ import { RuntimeContext } from "@mastra/core/runtime-context";
231
+
232
+ export async function POST(req: Request) {
233
+ const { messages, data } = await req.json();
234
+ const myAgent = mastra.getAgent("weatherAgent");
235
+
236
+ const runtimeContext = new RuntimeContext();
237
+
238
+ if (data) {
239
+ Object.entries(data).forEach(([key, value]) => {
240
+ runtimeContext.set(key, value);
241
+ });
242
+ }
243
+
244
+ const stream = await myAgent.stream(messages, { runtimeContext });
245
+ return stream.toDataStreamResponse();
246
+ }
247
+ ```
248
+
249
+ #### Alternative: Server Middleware
250
+
251
+ You can also handle this at the server middleware level:
252
+
253
+ ```typescript filename="src/mastra/index.ts" copy
254
+ import { Mastra } from "@mastra/core";
255
+
256
+ export const mastra = new Mastra({
257
+ agents: { weatherAgent },
258
+ server: {
259
+ middleware: [
260
+ async (c, next) => {
261
+ const runtimeContext = c.get("runtimeContext");
262
+
263
+ if (c.req.method === 'POST') {
264
+ try {
265
+ // Clone the request since reading the body can only be done once
266
+ const clonedReq = c.req.raw.clone();
267
+ const body = await clonedReq.json();
268
+
269
+
270
+ if (body?.data) {
271
+ Object.entries(body.data).forEach(([key, value]) => {
272
+ runtimeContext.set(key, value);
273
+ });
274
+ }
275
+ } catch {
276
+ // Continue without additional data
277
+ }
278
+ }
279
+
280
+ await next();
281
+ },
282
+ ],
283
+ },
284
+ });
285
+ ```
286
+
287
+ You can then access this data in your tools via the `runtimeContext` parameter. See the [Runtime Context documentation](/docs/agents/runtime-variables) for more details.
288
+
192
289
  ## Tool Calling
193
290
 
194
291
  ### AI SDK Tool Format
@@ -69,6 +69,40 @@ You now have a basic Mastra server project ready. You should have the following
69
69
  Ensure that you have set the appropriate environment variables for your LLM provider in the `.env` file.
70
70
  </Callout>
71
71
 
72
+ ### Compatibility Fix
73
+
74
+ Currently, to ensure proper compatibility between Mastra and Assistant UI, you need to setup server middleware. Update your `/mastra/index.ts` file with the following configuration:
75
+
76
+ ```typescript showLineNumbers copy filename="src/mastra/index.ts"
77
+ export const mastra = new Mastra({
78
+ //mastra server middleware
79
+ server:{
80
+ middleware: [{
81
+ path: '/api/agents/*/stream',
82
+ handler: async (c,next)=>{
83
+
84
+ const body = await c.req.json();
85
+
86
+ if ('state' in body && body.state == null) {
87
+ delete body.state;
88
+ delete body.tools;
89
+ }
90
+
91
+ c.req.json = async() => body;
92
+
93
+ return next()
94
+ }
95
+ }]
96
+ },
97
+ });
98
+ ```
99
+
100
+ This middleware ensures that when Assistant UI sends a request with `state: null` and `tools: {}` in the request body, we remove those properties to make the request work properly with Mastra.
101
+
102
+ <Callout type="info">
103
+ The `state: null` property can cause errors like `Cannot use 'in' operator to search for 'input' in null` in Mastra. Additionally, passing `tools: {}` overrides Mastra's built-in tools. Mastra only supports `clientTools` via the Mastra client SDK from the client side. For more information about client tools, see the [Client Tools documentation](/reference/client-js/agents#client-tools).
104
+ </Callout>
105
+
72
106
  ### Run the Mastra Server
73
107
 
74
108
  Run the Mastra server using the following command:
@@ -4,6 +4,7 @@ description: Documentation for the Mastra local development environment for Mast
4
4
  ---
5
5
 
6
6
  import YouTube from "@/components/youtube";
7
+ import { VideoPlayer } from "@/components/video-player"
7
8
  import { Tabs, Tab } from "@/components/tabs";
8
9
 
9
10
  # Playground
@@ -42,7 +43,9 @@ The Playground lets you interact with your agents, workflows, and tools. It prov
42
43
 
43
44
  Quickly test and debug your agents during development using the interactive chat interface in the Agent Playground.
44
45
 
45
- ![Agents Playground](/image/local-dev/local-dev-agents-playground.jpg)
46
+ <VideoPlayer
47
+ src="https://res.cloudinary.com/dygi6femd/video/upload/v1751406022/local-dev-agents-playground_100_m3begx.mp4"
48
+ />
46
49
 
47
50
  Key features:
48
51
 
@@ -56,7 +59,9 @@ Key features:
56
59
 
57
60
  Validate workflows by supplying defined inputs and visualizing each step within the Workflow Playground.
58
61
 
59
- ![Workflows Playground](/image/local-dev/local-dev-workflow-playground.jpg)
62
+ <VideoPlayer
63
+ src="https://res.cloudinary.com/dygi6femd/video/upload/v1751406027/local-dev-workflows-playground_100_rbc466.mp4"
64
+ />
60
65
 
61
66
  Key features:
62
67
 
@@ -70,7 +75,9 @@ Key features:
70
75
 
71
76
  Quickly test and debug custom tools in isolation using the Tools Playground, without running a full agent or workflow.
72
77
 
73
- ![Tools Playground](/image/local-dev/local-dev-tools-playground.jpg)
78
+ <VideoPlayer
79
+ src="https://res.cloudinary.com/dygi6femd/video/upload/v1751406316/local-dev-agents-tools_100_fe1jdt.mp4"
80
+ />
74
81
 
75
82
  Key features:
76
83
 
@@ -96,7 +96,7 @@ const memory = new Memory({
96
96
  });
97
97
  ```
98
98
 
99
- By default, title generation uses the same model as your agent. For cost optimization, you can specify a cheaper model specifically for title generation:
99
+ By default, title generation uses the same model and default instructions as your agent. For customization or cost optimization, you can specify a different model or provide custom instructions specifically for title generation:
100
100
 
101
101
  ```typescript {5-7}
102
102
  const memory = new Memory({
@@ -104,6 +104,7 @@ const memory = new Memory({
104
104
  threads: {
105
105
  generateTitle: {
106
106
  model: openai("gpt-4.1-nano"), // Use cheaper model for titles
107
+ instructions: "Generate a concise title for this conversation based on the first user message.",
107
108
  },
108
109
  },
109
110
  },
@@ -265,10 +265,11 @@ Configuration options for memory management:
265
265
  parameters: [
266
266
  {
267
267
  name: "generateTitle",
268
- type: "boolean | { model: LanguageModelV1 | ((ctx: RuntimeContext) => LanguageModelV1 | Promise<LanguageModelV1>) }",
268
+ type: "boolean | { model: LanguageModelV1 | ((ctx: RuntimeContext) => LanguageModelV1 | Promise<LanguageModelV1>), instructions: string | ((ctx: RuntimeContext) => string | Promise<string>) }",
269
269
  isOptional: true,
270
270
  description:
271
- "Controls automatic thread title generation from the user's first message. Can be a boolean to enable/disable using the agent's model, or an object with a custom model for title generation (useful for cost optimization). Example: { model: openai('gpt-4.1-nano') }",
271
+ `Controls automatic thread title generation from the user's first message. Can be a boolean to enable/disable using the agent's model, or an object specifying a custom model and/or custom instructions for title generation (useful for cost optimization or title customization).
272
+ Example: { model: openai('gpt-4.1-nano'), instructions: 'Generate a concise title based on the initial user message.' }`,
272
273
  },
273
274
  ],
274
275
  },
@@ -271,10 +271,11 @@ Configuration options for memory management:
271
271
  parameters: [
272
272
  {
273
273
  name: "generateTitle",
274
- type: "boolean | { model: LanguageModelV1 | ((ctx: RuntimeContext) => LanguageModelV1 | Promise<LanguageModelV1>) }",
274
+ type: "boolean | { model: LanguageModelV1 | ((ctx: RuntimeContext) => LanguageModelV1 | Promise<LanguageModelV1>), instructions: string | ((ctx: RuntimeContext) => string | Promise<string>) }",
275
275
  isOptional: true,
276
276
  description:
277
- "Controls automatic thread title generation from the user's first message. Can be a boolean to enable/disable using the agent's model, or an object with a custom model for title generation (useful for cost optimization). Example: { model: openai('gpt-4.1-nano') }",
277
+ `Controls automatic thread title generation from the user's first message. Can be a boolean to enable/disable using the agent's model, or an object specifying a custom model and/or custom instructions for title generation (useful for cost optimization or title customization).
278
+ Example: { model: openai('gpt-4.1-nano'), instructions: 'Generate a concise title based on the initial user message.' }`,
278
279
  },
279
280
  ],
280
281
  },
@@ -48,6 +48,18 @@ mastra dev [options]
48
48
  description: "Path to custom environment file",
49
49
  isOptional: true,
50
50
  },
51
+ {
52
+ name: "--inspect",
53
+ type: "boolean",
54
+ description: "Start the dev server in inspect mode for debugging (cannot be used with --inspect-brk)",
55
+ isOptional: true,
56
+ },
57
+ {
58
+ name: "--inspect-brk",
59
+ type: "boolean",
60
+ description: "Start the dev server in inspect mode and break at the beginning of the script (cannot be used with --inspect)",
61
+ isOptional: true,
62
+ },
51
63
  {
52
64
  name: "--help",
53
65
  type: "boolean",
@@ -74,7 +74,3 @@ try {
74
74
  - [Workflow Class Reference](./workflow.mdx)
75
75
  - [Step Class Reference](./step-class.mdx)
76
76
  - See the [Creating a Workflow](../../examples/workflows_legacy/creating-a-workflow.mdx) example for complete usage
77
-
78
- ```
79
-
80
- ```