@mastra/mcp-docs-server 0.13.4-alpha.0 → 0.13.5-alpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +8 -8
  2. package/.docs/organized/changelogs/%40mastra%2Fagui.md +12 -12
  3. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +46 -46
  4. package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +11 -11
  5. package/.docs/organized/changelogs/%40mastra%2Fcore.md +35 -35
  6. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +42 -42
  7. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +24 -24
  8. package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +24 -24
  9. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +61 -61
  10. package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +31 -31
  11. package/.docs/organized/changelogs/%40mastra%2Ffastembed.md +7 -0
  12. package/.docs/organized/changelogs/%40mastra%2Floggers.md +15 -15
  13. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +21 -21
  14. package/.docs/organized/changelogs/%40mastra%2Fmcp.md +12 -12
  15. package/.docs/organized/changelogs/%40mastra%2Fmemory.md +25 -25
  16. package/.docs/organized/changelogs/%40mastra%2Fpg.md +14 -14
  17. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +65 -65
  18. package/.docs/organized/changelogs/%40mastra%2Fserver.md +44 -44
  19. package/.docs/organized/changelogs/%40mastra%2Fupstash.md +13 -13
  20. package/.docs/organized/changelogs/create-mastra.md +33 -33
  21. package/.docs/organized/changelogs/mastra.md +79 -79
  22. package/.docs/organized/code-examples/bird-checker-with-express.md +1 -1
  23. package/.docs/organized/code-examples/crypto-chatbot.md +9 -9
  24. package/.docs/organized/code-examples/fireworks-r1.md +1 -1
  25. package/.docs/organized/code-examples/memory-per-resource-example.md +1 -1
  26. package/.docs/organized/code-examples/memory-with-pg.md +1 -1
  27. package/.docs/organized/code-examples/memory-with-upstash.md +1 -1
  28. package/.docs/organized/code-examples/openapi-spec-writer.md +4 -4
  29. package/.docs/raw/client-js/overview.mdx +16 -0
  30. package/.docs/raw/deployment/cloud-providers/aws-lambda.mdx +279 -0
  31. package/.docs/raw/deployment/serverless-platforms/index.mdx +0 -1
  32. package/.docs/raw/frameworks/agentic-uis/ai-sdk.mdx +97 -0
  33. package/.docs/raw/frameworks/agentic-uis/assistant-ui.mdx +34 -0
  34. package/.docs/raw/local-dev/mastra-dev.mdx +10 -3
  35. package/.docs/raw/memory/overview.mdx +2 -1
  36. package/.docs/raw/reference/agents/generate.mdx +3 -2
  37. package/.docs/raw/reference/agents/stream.mdx +3 -2
  38. package/.docs/raw/reference/cli/dev.mdx +12 -0
  39. package/.docs/raw/reference/legacyWorkflows/createRun.mdx +0 -4
  40. package/.docs/raw/reference/memory/Memory.mdx +12 -6
  41. package/.docs/raw/reference/workflows/create-run.mdx +4 -4
  42. package/.docs/raw/reference/workflows/sendEvent.mdx +49 -0
  43. package/.docs/raw/workflows/overview.mdx +1 -1
  44. package/.docs/raw/workflows/pausing-execution.mdx +74 -37
  45. package/package.json +5 -5
@@ -69,6 +69,40 @@ You now have a basic Mastra server project ready. You should have the following
69
69
  Ensure that you have set the appropriate environment variables for your LLM provider in the `.env` file.
70
70
  </Callout>
71
71
 
72
+ ### Compatibility Fix
73
+
74
+ Currently, to ensure proper compatibility between Mastra and Assistant UI, you need to setup server middleware. Update your `/mastra/index.ts` file with the following configuration:
75
+
76
+ ```typescript showLineNumbers copy filename="src/mastra/index.ts"
77
+ export const mastra = new Mastra({
78
+ //mastra server middleware
79
+ server:{
80
+ middleware: [{
81
+ path: '/api/agents/*/stream',
82
+ handler: async (c,next)=>{
83
+
84
+ const body = await c.req.json();
85
+
86
+ if ('state' in body && body.state == null) {
87
+ delete body.state;
88
+ delete body.tools;
89
+ }
90
+
91
+ c.req.json = async() => body;
92
+
93
+ return next()
94
+ }
95
+ }]
96
+ },
97
+ });
98
+ ```
99
+
100
+ This middleware ensures that when Assistant UI sends a request with `state: null` and `tools: {}` in the request body, we remove those properties to make the request work properly with Mastra.
101
+
102
+ <Callout type="info">
103
+ The `state: null` property can cause errors like `Cannot use 'in' operator to search for 'input' in null` in Mastra. Additionally, passing `tools: {}` overrides Mastra's built-in tools. Mastra only supports `clientTools` via the Mastra client SDK from the client side. For more information about client tools, see the [Client Tools documentation](/reference/client-js/agents#client-tools).
104
+ </Callout>
105
+
72
106
  ### Run the Mastra Server
73
107
 
74
108
  Run the Mastra server using the following command:
@@ -4,6 +4,7 @@ description: Documentation for the Mastra local development environment for Mast
4
4
  ---
5
5
 
6
6
  import YouTube from "@/components/youtube";
7
+ import { VideoPlayer } from "@/components/video-player"
7
8
  import { Tabs, Tab } from "@/components/tabs";
8
9
 
9
10
  # Playground
@@ -42,7 +43,9 @@ The Playground lets you interact with your agents, workflows, and tools. It prov
42
43
 
43
44
  Quickly test and debug your agents during development using the interactive chat interface in the Agent Playground.
44
45
 
45
- ![Agents Playground](/image/local-dev/local-dev-agents-playground.jpg)
46
+ <VideoPlayer
47
+ src="https://res.cloudinary.com/dygi6femd/video/upload/v1751406022/local-dev-agents-playground_100_m3begx.mp4"
48
+ />
46
49
 
47
50
  Key features:
48
51
 
@@ -56,7 +59,9 @@ Key features:
56
59
 
57
60
  Validate workflows by supplying defined inputs and visualizing each step within the Workflow Playground.
58
61
 
59
- ![Workflows Playground](/image/local-dev/local-dev-workflow-playground.jpg)
62
+ <VideoPlayer
63
+ src="https://res.cloudinary.com/dygi6femd/video/upload/v1751406027/local-dev-workflows-playground_100_rbc466.mp4"
64
+ />
60
65
 
61
66
  Key features:
62
67
 
@@ -70,7 +75,9 @@ Key features:
70
75
 
71
76
  Quickly test and debug custom tools in isolation using the Tools Playground, without running a full agent or workflow.
72
77
 
73
- ![Tools Playground](/image/local-dev/local-dev-tools-playground.jpg)
78
+ <VideoPlayer
79
+ src="https://res.cloudinary.com/dygi6femd/video/upload/v1751406316/local-dev-agents-tools_100_fe1jdt.mp4"
80
+ />
74
81
 
75
82
  Key features:
76
83
 
@@ -96,7 +96,7 @@ const memory = new Memory({
96
96
  });
97
97
  ```
98
98
 
99
- By default, title generation uses the same model as your agent. For cost optimization, you can specify a cheaper model specifically for title generation:
99
+ By default, title generation uses the same model and default instructions as your agent. For customization or cost optimization, you can specify a different model or provide custom instructions specifically for title generation:
100
100
 
101
101
  ```typescript {5-7}
102
102
  const memory = new Memory({
@@ -104,6 +104,7 @@ const memory = new Memory({
104
104
  threads: {
105
105
  generateTitle: {
106
106
  model: openai("gpt-4.1-nano"), // Use cheaper model for titles
107
+ instructions: "Generate a concise title for this conversation based on the first user message.",
107
108
  },
108
109
  },
109
110
  },
@@ -265,10 +265,11 @@ Configuration options for memory management:
265
265
  parameters: [
266
266
  {
267
267
  name: "generateTitle",
268
- type: "boolean | { model: LanguageModelV1 | ((ctx: RuntimeContext) => LanguageModelV1 | Promise<LanguageModelV1>) }",
268
+ type: "boolean | { model: LanguageModelV1 | ((ctx: RuntimeContext) => LanguageModelV1 | Promise<LanguageModelV1>), instructions: string | ((ctx: RuntimeContext) => string | Promise<string>) }",
269
269
  isOptional: true,
270
270
  description:
271
- "Controls automatic thread title generation from the user's first message. Can be a boolean to enable/disable using the agent's model, or an object with a custom model for title generation (useful for cost optimization). Example: { model: openai('gpt-4.1-nano') }",
271
+ `Controls automatic thread title generation from the user's first message. Can be a boolean to enable/disable using the agent's model, or an object specifying a custom model and/or custom instructions for title generation (useful for cost optimization or title customization).
272
+ Example: { model: openai('gpt-4.1-nano'), instructions: 'Generate a concise title based on the initial user message.' }`,
272
273
  },
273
274
  ],
274
275
  },
@@ -271,10 +271,11 @@ Configuration options for memory management:
271
271
  parameters: [
272
272
  {
273
273
  name: "generateTitle",
274
- type: "boolean | { model: LanguageModelV1 | ((ctx: RuntimeContext) => LanguageModelV1 | Promise<LanguageModelV1>) }",
274
+ type: "boolean | { model: LanguageModelV1 | ((ctx: RuntimeContext) => LanguageModelV1 | Promise<LanguageModelV1>), instructions: string | ((ctx: RuntimeContext) => string | Promise<string>) }",
275
275
  isOptional: true,
276
276
  description:
277
- "Controls automatic thread title generation from the user's first message. Can be a boolean to enable/disable using the agent's model, or an object with a custom model for title generation (useful for cost optimization). Example: { model: openai('gpt-4.1-nano') }",
277
+ `Controls automatic thread title generation from the user's first message. Can be a boolean to enable/disable using the agent's model, or an object specifying a custom model and/or custom instructions for title generation (useful for cost optimization or title customization).
278
+ Example: { model: openai('gpt-4.1-nano'), instructions: 'Generate a concise title based on the initial user message.' }`,
278
279
  },
279
280
  ],
280
281
  },
@@ -48,6 +48,18 @@ mastra dev [options]
48
48
  description: "Path to custom environment file",
49
49
  isOptional: true,
50
50
  },
51
+ {
52
+ name: "--inspect",
53
+ type: "boolean",
54
+ description: "Start the dev server in inspect mode for debugging (cannot be used with --inspect-brk)",
55
+ isOptional: true,
56
+ },
57
+ {
58
+ name: "--inspect-brk",
59
+ type: "boolean",
60
+ description: "Start the dev server in inspect mode and break at the beginning of the script (cannot be used with --inspect)",
61
+ isOptional: true,
62
+ },
51
63
  {
52
64
  name: "--help",
53
65
  type: "boolean",
@@ -74,7 +74,3 @@ try {
74
74
  - [Workflow Class Reference](./workflow.mdx)
75
75
  - [Step Class Reference](./step-class.mdx)
76
76
  - See the [Creating a Workflow](../../examples/workflows_legacy/creating-a-workflow.mdx) example for complete usage
77
-
78
- ```
79
-
80
- ```
@@ -63,6 +63,7 @@ const memory = new Memory({
63
63
  // Or use a different model for title generation
64
64
  // generateTitle: {
65
65
  // model: openai("gpt-4.1-nano"), // Use cheaper model for titles
66
+ // instructions: "Generate a concise title based on the initial user message.", // Custom instructions for title
66
67
  // },
67
68
  },
68
69
  },
@@ -109,9 +110,9 @@ const memory = new Memory({
109
110
  });
110
111
  ```
111
112
 
112
- #### Cost Optimization with Custom Models
113
+ #### Cost Optimization with Custom Models and Instructions
113
114
 
114
- You can specify a different (typically cheaper) model for title generation while using a high-quality model for the main conversation:
115
+ You can specify a different (typically cheaper) model and custom instructions for title generation while using a high-quality model for the main conversation:
115
116
 
116
117
  ```typescript copy showLineNumbers
117
118
  import { openai } from "@ai-sdk/openai";
@@ -121,6 +122,7 @@ const memory = new Memory({
121
122
  threads: {
122
123
  generateTitle: {
123
124
  model: openai("gpt-4.1-nano"), // Cheaper model for titles
125
+ instructions: "Generate a concise, friendly title based on the initial user message.", // Custom title instructions
124
126
  },
125
127
  },
126
128
  },
@@ -132,9 +134,9 @@ const agent = new Agent({
132
134
  });
133
135
  ```
134
136
 
135
- #### Dynamic Model Selection
137
+ #### Dynamic Model Selection and Instructions
136
138
 
137
- You can also use a function to dynamically determine the model based on runtime context:
139
+ You can also use a function to dynamically determine the model and instructions based on runtime context:
138
140
 
139
141
  ```typescript copy showLineNumbers
140
142
  const memory = new Memory({
@@ -148,6 +150,10 @@ const memory = new Memory({
148
150
  ? openai("gpt-4.1")
149
151
  : openai("gpt-4.1-nano");
150
152
  },
153
+ instructions: (ctx: RuntimeContext) => {
154
+ const language = ctx.get("userLanguage") || "English";
155
+ return `Generate a concise, engaging title in ${language} based on the user's first message.`;
156
+ },
151
157
  },
152
158
  },
153
159
  },
@@ -285,9 +291,9 @@ Mastra supports many embedding models through the [Vercel AI SDK](https://sdk.ve
285
291
  },
286
292
  {
287
293
  name: "threads",
288
- type: "{ generateTitle?: boolean | { model: LanguageModelV1 | ((ctx: RuntimeContext) => LanguageModelV1 | Promise<LanguageModelV1>) } }",
294
+ type: "{ generateTitle?: boolean | { model: LanguageModelV1 | ((ctx: RuntimeContext) => LanguageModelV1 | Promise<LanguageModelV1>), instructions?: string | ((ctx: RuntimeContext) => string | Promise<string>) } }",
289
295
  description:
290
- "Settings related to memory thread creation. `generateTitle` controls automatic thread title generation from the user's first message. Can be a boolean to enable/disable using the agent's model, or an object with a custom model for title generation (useful for cost optimization). Example: { generateTitle: { model: openai('gpt-4.1-nano') } }",
296
+ "Settings related to memory thread creation. `generateTitle` controls automatic thread title generation from the user's first message. Can be a boolean to enable/disable using the agent's model, or an object specifying a custom model or custom instructions for title generation (useful for cost optimization or title customization). Example: { generateTitle: { model: openai('gpt-4.1-nano'), instructions: 'Concise title based on the initial user message.' } }",
291
297
  isOptional: true,
292
298
  defaultValue: "{ generateTitle: false }",
293
299
  },
@@ -1,11 +1,11 @@
1
1
  ---
2
- title: "Reference: Workflow.createRun() | Building Workflows | Mastra Docs"
3
- description: Documentation for the `.createRun()` method in workflows, which creates a new workflow run instance.
2
+ title: "Reference: Workflow.createRunAsync() | Building Workflows | Mastra Docs"
3
+ description: Documentation for the `.createRunAsync()` method in workflows, which creates a new workflow run instance.
4
4
  ---
5
5
 
6
- # Workflow.createRun()
6
+ # Workflow.createRunAsync()
7
7
 
8
- The `.createRun()` method creates a new workflow run instance, allowing you to execute the workflow with specific input data.
8
+ The `.createRunAsync()` method creates a new workflow run instance, allowing you to execute the workflow with specific input data.
9
9
 
10
10
  ## Usage
11
11
 
@@ -0,0 +1,49 @@
1
+ ---
2
+ title: "Reference: Workflow.sendEvent() | Building Workflows | Mastra Docs"
3
+ description: Documentation for the `.sendEvent()` method in workflows, which resumes execution when an event is sent.
4
+ ---
5
+
6
+ # Workflow.sendEvent()
7
+
8
+ The `.sendEvent()` resumes execution when an event is sent.
9
+
10
+ ## Usage
11
+
12
+ ```typescript
13
+ workflow.sendEvent('my-event-name', step1);
14
+ ```
15
+
16
+ ## Parameters
17
+
18
+ <PropertiesTable
19
+ content={[
20
+ {
21
+ name: "eventName",
22
+ type: "string",
23
+ description: "The name of the event to send",
24
+ isOptional: false,
25
+ },
26
+ {
27
+ name: "step",
28
+ type: "Step",
29
+ description: "The step to resume after the event is sent",
30
+ isOptional: false,
31
+ },
32
+ ]}
33
+ />
34
+
35
+ ## Returns
36
+
37
+ <PropertiesTable
38
+ content={[
39
+ {
40
+ name: "workflow",
41
+ type: "Workflow",
42
+ description: "The workflow instance for method chaining",
43
+ },
44
+ ]}
45
+ />
46
+
47
+ ## Related
48
+
49
+ - [Sleep & Events](../../docs/workflows/pausing-execution.mdx)
@@ -176,7 +176,7 @@ console.log(JSON.stringify(result, null, 2));
176
176
 
177
177
  To trigger this workflow, run the following:
178
178
 
179
- ```bash
179
+ ```bash copy
180
180
  npx tsx src/test-workflow.ts
181
181
  ```
182
182
 
@@ -5,56 +5,93 @@ description: "Pausing execution in Mastra workflows allows you to pause executio
5
5
 
6
6
  # Sleep & Events
7
7
 
8
- Pausing execution in Mastra workflows allows you to pause execution while waiting for external input or resources via `sleep()`, `sleepUntil()` and `waitForEvent()`.
8
+ Mastra lets you pause workflow execution when waiting for external input or timing conditions. This can be useful for things like polling, delayed retries, or waiting on user actions.
9
9
 
10
- This sets the workflow status to `waiting`.
10
+ You can pause execution using:
11
11
 
12
- ## sleep()
12
+ - `sleep()`: Pause for a set number of milliseconds
13
+ - `sleepUntil()`: Pause until a specific timestamp
14
+ - `waitForEvent()`: Pause until an external event is received
15
+ - `sendEvent()`: Send an event to resume a waiting workflow
13
16
 
14
- `sleep()` pauses execution for a specified number of milliseconds.
17
+ When using any of these methods, the workflow status is set to `waiting` until execution resumes.
15
18
 
16
- ```typescript
17
- workflow
18
- .then(step1)
19
- .sleep(1000)
20
- .then(step2)
21
- .commit();
19
+ ## sleep
20
+
21
+ The `sleep()` method pauses execution between steps for a specified number of milliseconds.
22
+
23
+ ```typescript {9} filename="src/mastra/workflows/test-workflow.ts" showLineNumbers copy
24
+ import { createWorkflow, createStep } from "@mastra/core/workflows";
25
+ import { z } from "zod";
26
+
27
+ const step1 = createStep({...});
28
+ const step2 = createStep({...});
29
+
30
+ export const testWorkflow = createWorkflow({...})
31
+ .then(step1)
32
+ .sleep(1000)
33
+ .then(step2)
34
+ .commit();
22
35
  ```
23
36
 
24
- ## sleepUntil()
37
+ ## sleepUntil
25
38
 
26
- `sleepUntil()` pauses execution until a specified date.
39
+ The `sleepUntil()` method pauses execution between steps until a specified date.
27
40
 
28
- ```typescript
29
- workflow
30
- .then(step1)
31
- .sleepUntil(new Date(Date.now() + 1000))
32
- .then(step2)
33
- .commit();
41
+ ```typescript {9} filename="src/mastra/workflows/test-workflow.ts" showLineNumbers copy
42
+ import { createWorkflow, createStep } from "@mastra/core/workflows";
43
+ import { z } from "zod";
44
+
45
+ const step1 = createStep({...});
46
+ const step2 = createStep({...});
47
+
48
+ export const testWorkflow = createWorkflow({...})
49
+ .then(step1)
50
+ .sleepUntil(new Date(Date.now() + 5000))
51
+ .then(step2)
52
+ .commit();
34
53
  ```
35
54
 
36
- ## waitForEvent()
55
+ > `Date.now()` is evaluated when the workflow starts, not at the moment the `sleepUntil()` method is called.
37
56
 
38
- `waitForEvent()` pauses execution until an event is received. Events can be sent to the workflow using `run.sendEvent()`. The event name and the step to resume after the event is received are provided as arguments to `waitForEvent()`.
57
+ ## waitForEvent
39
58
 
40
- `.sendEvent()` takes as arguments the event name and the event data. The event data is optional and can be any JSON-serializable value.
59
+ The `waitForEvent()` method pauses execution until a specific event is received. Use `run.sendEvent()` to send the event. You must provide both the event name and the step to resume.
41
60
 
42
- ```typescript
43
- workflow
44
- .then(step1)
45
- .waitForEvent('my-event-name', step2)
46
- .then(step3)
47
- .commit();
61
+ ```typescript {10} filename="src/mastra/workflows/test-workflow.ts" showLineNumbers copy
62
+ import { createWorkflow, createStep } from "@mastra/core/workflows";
63
+ import { z } from "zod";
48
64
 
49
- const run = await workflow.createRunAsync()
50
- run.start({})
65
+ const step1 = createStep({...});
66
+ const step2 = createStep({...});
67
+ const step3 = createStep({...});
68
+
69
+ export const testWorkflow = createWorkflow({...})
70
+ .then(step1)
71
+ .waitForEvent("my-event-name", step2)
72
+ .then(step3)
73
+ .commit();
74
+ ```
75
+ ## sendEvent
76
+
77
+ The `.sendEvent()` method sends an event to the workflow. It accepts the event name and optional event data, which can be any JSON-serializable value.
78
+
79
+ ```typescript {5,12,15} filename="src/test-workflow.ts" showLineNumbers copy
80
+ import { mastra } from "./mastra";
81
+
82
+ const run = await mastra.getWorkflow("testWorkflow").createRunAsync();
83
+
84
+ const result = run.start({
85
+ inputData: {
86
+ value: "hello"
87
+ }
88
+ });
51
89
 
52
90
  setTimeout(() => {
53
- run.sendEvent('my-event-name', {
54
- data1: 'hello',
55
- data2: {
56
- anyData: 12
57
- }
58
- })
59
- }, 2e3)
60
- ```
91
+ run.sendEvent("my-event-name", { value: "from event" });
92
+ }, 3000);
93
+
94
+ console.log(JSON.stringify(await result, null, 2));
95
+ ```
96
+
97
+ > In this example, avoid using `await run.start()` directly, it would block sending the event before the workflow reaches its waiting state.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@mastra/mcp-docs-server",
3
- "version": "0.13.4-alpha.0",
3
+ "version": "0.13.5-alpha.0",
4
4
  "description": "MCP server for accessing Mastra.ai documentation, changelogs, and news.",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",
@@ -32,7 +32,7 @@
32
32
  "uuid": "^11.1.0",
33
33
  "zod": "^3.25.67",
34
34
  "zod-to-json-schema": "^3.24.5",
35
- "@mastra/mcp": "^0.10.5"
35
+ "@mastra/mcp": "^0.10.6-alpha.0"
36
36
  },
37
37
  "devDependencies": {
38
38
  "@hono/node-server": "^1.14.4",
@@ -42,13 +42,13 @@
42
42
  "@wong2/mcp-cli": "^1.10.0",
43
43
  "cross-env": "^7.0.3",
44
44
  "eslint": "^9.29.0",
45
- "hono": "^4.8.3",
45
+ "hono": "^4.8.4",
46
46
  "tsup": "^8.5.0",
47
47
  "tsx": "^4.19.4",
48
48
  "typescript": "^5.8.3",
49
49
  "vitest": "^3.2.4",
50
- "@internal/lint": "0.0.16",
51
- "@mastra/core": "0.10.10-alpha.0"
50
+ "@internal/lint": "0.0.17",
51
+ "@mastra/core": "0.10.11-alpha.2"
52
52
  },
53
53
  "peerDependencies": {
54
54
  "@mastra/core": "^0.10.0-alpha.0"