@mastra/mcp-docs-server 0.13.21 → 0.13.22-alpha.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +8 -8
  2. package/.docs/organized/changelogs/%40internal%2Ftypes-builder.md +2 -0
  3. package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +8 -0
  4. package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +10 -10
  5. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +36 -36
  6. package/.docs/organized/changelogs/%40mastra%2Fcloud.md +12 -12
  7. package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +10 -10
  8. package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +10 -10
  9. package/.docs/organized/changelogs/%40mastra%2Fcore.md +87 -87
  10. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +29 -29
  11. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +17 -17
  12. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +18 -18
  13. package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +17 -17
  14. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +62 -62
  15. package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +10 -10
  16. package/.docs/organized/changelogs/%40mastra%2Fevals.md +12 -12
  17. package/.docs/organized/changelogs/%40mastra%2Ffirecrawl.md +9 -9
  18. package/.docs/organized/changelogs/%40mastra%2Flance.md +10 -10
  19. package/.docs/organized/changelogs/%40mastra%2Flibsql.md +28 -28
  20. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +24 -24
  21. package/.docs/organized/changelogs/%40mastra%2Fmem0.md +10 -10
  22. package/.docs/organized/changelogs/%40mastra%2Fmemory.md +9 -9
  23. package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +10 -10
  24. package/.docs/organized/changelogs/%40mastra%2Fmssql.md +11 -11
  25. package/.docs/organized/changelogs/%40mastra%2Fpg.md +19 -19
  26. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +29 -29
  27. package/.docs/organized/changelogs/%40mastra%2Frag.md +10 -10
  28. package/.docs/organized/changelogs/%40mastra%2Fschema-compat.md +6 -0
  29. package/.docs/organized/changelogs/%40mastra%2Fserver.md +32 -32
  30. package/.docs/organized/changelogs/%40mastra%2Fupstash.md +11 -11
  31. package/.docs/organized/changelogs/create-mastra.md +17 -17
  32. package/.docs/organized/changelogs/mastra.md +29 -29
  33. package/.docs/organized/code-examples/agent.md +36 -0
  34. package/.docs/organized/code-examples/heads-up-game.md +4 -4
  35. package/.docs/raw/memory/memory-processors.mdx +5 -0
  36. package/.docs/raw/memory/overview.mdx +159 -104
  37. package/.docs/raw/memory/semantic-recall.mdx +5 -0
  38. package/.docs/raw/memory/working-memory.mdx +5 -0
  39. package/.docs/raw/reference/agents/agent.mdx +6 -0
  40. package/.docs/raw/reference/agents/generateVNext.mdx +2 -1
  41. package/.docs/raw/reference/agents/listAgents.mdx +68 -0
  42. package/.docs/raw/reference/agents/streamVNext.mdx +1 -1
  43. package/.docs/raw/reference/client-js/agents.mdx +54 -1
  44. package/.docs/raw/reference/client-js/workflows.mdx +36 -17
  45. package/.docs/raw/reference/core/mastra-class.mdx +2 -2
  46. package/.docs/raw/reference/memory/Memory.mdx +91 -239
  47. package/.docs/raw/reference/memory/createThread.mdx +36 -16
  48. package/.docs/raw/reference/memory/deleteMessages.mdx +39 -74
  49. package/.docs/raw/reference/memory/getThreadById.mdx +11 -11
  50. package/.docs/raw/reference/memory/getThreadsByResourceId.mdx +26 -29
  51. package/.docs/raw/reference/memory/getThreadsByResourceIdPaginated.mdx +46 -124
  52. package/.docs/raw/reference/memory/query.mdx +76 -90
  53. package/CHANGELOG.md +14 -0
  54. package/README.md +8 -0
  55. package/package.json +4 -4
@@ -1,170 +1,218 @@
1
- import { Steps } from "nextra/components";
1
+ ---
2
+ title: "Memory Overview | Memory | Mastra Docs"
3
+ description: "Learn how Mastra's memory system works with working memory, conversation history, and semantic recall."
4
+ ---
5
+
6
+ import { Steps, Callout } from "nextra/components";
2
7
 
3
8
  # Memory overview
4
9
 
5
- Memory is how agents manage the context that's available to them, it's a condensation of all chat messages into their context window.
10
+ Memory in Mastra helps agents manage context across conversations by condensing relevant information into the language model’s context window.
11
+
12
+ Mastra supports three complementary memory systems: [working memory](./working-memory.mdx), [conversation history](#conversation-history), and [semantic recall](./semantic-recall.mdx). Together, they allow agents to track preferences, maintain conversational flow, and retrieve relevant historical messages.
13
+
14
+ To persist and recall information between conversations, memory requires a storage adapter.
15
+
16
+ Supported options include:
17
+
18
+ - [Memory with LibSQL](/examples/memory/memory-with-libsql)
19
+ - [Memory with Postgres](/examples/memory/memory-with-pg)
20
+ - [Memory with Upstash](/examples/memory/memory-with-upstash)
21
+
22
+ ## Types of memory
6
23
 
7
- ## The Context Window
24
+ All memory types are [thread-scoped](./working-memory.mdx#thread-scoped-memory-default) by default, meaning they apply only to a single conversation. [Resource-scoped](./working-memory.mdx#resource-scoped-memory) configuration allows working memory and semantic recall to persist across all threads that use the same user or entity.
8
25
 
9
- The context window is the total information visible to the language model at any given time.
10
26
 
11
- In Mastra, context is broken up into three parts: system instructions and information about the user ([working memory](./working-memory.mdx)), recent messages ([message history](#conversation-history)), and older messages that are relevant to the user’s query ([semantic recall](./semantic-recall.mdx)).
27
+ ### Working memory
12
28
 
13
- Working memory can persist at different scopes - either per conversation thread (default) or across all threads for the same user (resource-scoped), enabling persistent user profiles that remember context across conversations.
29
+ Stores persistent user-specific details such as names, preferences, goals, and other structured data. Uses [Markdown templates](./working-memory-template.mdx) or [Zod schemas](./working-memory-schema.mdx) to define structure.
14
30
 
15
- In addition, we provide [memory processors](./memory-processors.mdx) to trim context or remove information if the context is too long.
31
+ ### Conversation history
16
32
 
17
- ## Quick Start
33
+ Captures recent messages from the current conversation, providing short-term continuity and maintaining dialogue flow.
18
34
 
19
- The fastest way to see memory in action is using the built-in development playground.
35
+ ### Semantic recall
20
36
 
21
- If you haven't already, create a new Mastra project following the main [Getting Started guide](/docs/getting-started/installation).
37
+ Retrieves older messages from past conversations based on semantic relevance. Matches are retrieved using vector search and can include surrounding context for better comprehension.
22
38
 
23
- <Steps>
39
+ ## How memory works together
24
40
 
25
- ### Install the memory package
41
+ Mastra combines all memory types into a single context window. If the total exceeds the model’s token limit, use [memory processors](./memory-processors.mdx) to trim or filter messages before sending them to the model.
26
42
 
27
- ```bash npm2yarn copy
28
- npm install @mastra/memory@latest
43
+ ## Getting started
44
+
45
+ To use memory, install the required dependencies:
46
+
47
+ ```bash copy
48
+ npm install @mastra/core @mastra/memory @mastra/libsql
29
49
  ```
30
50
 
31
- ### Create an agent and attach a `Memory` instance
51
+ ### Shared storage
32
52
 
33
- ```typescript filename="src/mastra/agents/index.ts" {6-18}
34
- import { Agent } from "@mastra/core/agent";
35
- import { Memory } from "@mastra/memory";
36
- import { openai } from "@ai-sdk/openai";
53
+ To share memory across agents, add a storage adapter to the main Mastra instance. Any agent with memory enabled will use this shared storage to store and recall interactions.
54
+
55
+ ```typescript {6-8} filename="src/mastra/index.ts" showLineNumbers copy
56
+ import { Mastra } from "@mastra/core/mastra";
37
57
  import { LibSQLStore } from "@mastra/libsql";
38
58
 
39
- // Initialize memory with LibSQLStore for persistence
40
- const memory = new Memory({
59
+ export const mastra = new Mastra({
60
+ // ...
41
61
  storage: new LibSQLStore({
42
- url: "file:../mastra.db", // Or your database URL
43
- }),
44
- });
45
-
46
- export const myMemoryAgent = new Agent({
47
- name: "MemoryAgent",
48
- instructions: "...",
49
- model: openai("gpt-4o"),
50
- memory,
62
+ url: ":memory:"
63
+ })
51
64
  });
52
65
  ```
53
66
 
54
- ### Start the Development Server
55
-
56
- ```bash npm2yarn copy
57
- npm run dev
58
- ```
67
+ ### Adding working memory to agents
59
68
 
60
- ### Open the playground and select your `MemoryAgent`
69
+ Enable working memory by passing a `Memory` instance to the agent's `memory` parameter and setting `workingMemory.enabled` to `true`:
61
70
 
62
- Open the playground at [http://localhost:4111](http://localhost:4111). Send a few messages and notice that it remembers information across turns:
71
+ ```typescript {1,6-12} filename="src/mastra/agents/test-agent.ts" showLineNumbers copy
72
+ import { Memory } from "@mastra/memory";
73
+ import { Agent } from "@mastra/core/agent";
63
74
 
64
- ```
65
- ➡️ You: My favorite color is blue.
66
- ⬅️ Agent: Got it! I'll remember that your favorite color is blue.
67
- ➡️ You: What is my favorite color?
68
- ⬅️ Agent: Your favorite color is blue.
75
+ export const testAgent = new Agent({
76
+ // ..
77
+ memory: new Memory({
78
+ options: {
79
+ workingMemory: {
80
+ enabled: true
81
+ }
82
+ }
83
+ })
84
+ })
69
85
  ```
70
86
 
71
- </Steps>
87
+ ## Dedicated storage
72
88
 
73
- ## Memory Threads
89
+ Agents can be configured with their own dedicated storage, keeping tasks, conversations, and recalled information separate across agents.
74
90
 
75
- Mastra organizes memory into threads, which are records that identify specific conversation histories, using two identifiers:
91
+ ### Adding storage to agents
76
92
 
77
- 1. **`threadId`**: A globally unique conversation id (e.g., `support_123`). Thread IDs must be unique across all resources.
78
- 2. **`resourceId`**: The user or entity id that owns each thread (e.g., `user_123`, `org_456`).
93
+ To assign dedicated storage to an agent, install and import the required dependency and pass a `storage` instance to the `Memory` constructor:
79
94
 
80
- The `resourceId` is particularly important for [resource-scoped working memory](./working-memory.mdx#resource-scoped-memory), which allows memory to persist across all conversation threads for the same user.
95
+ ```typescript {3, 9-11} filename="src/mastra/agents/test-agent.ts" showLineNumbers copy
96
+ import { Memory } from "@mastra/memory";
97
+ import { Agent } from "@mastra/core/agent";
98
+ import { LibSQLStore } from "@mastra/libsql";
81
99
 
82
- ```typescript {2,3}
83
- const response = await myMemoryAgent.stream("Hello, my name is Alice.", {
84
- resourceId: "user_alice",
85
- threadId: "conversation_123",
100
+ export const testAgent = new Agent({
101
+ // ...
102
+ memory: new Memory({
103
+ // ...
104
+ storage: new LibSQLStore({
105
+ url: "file:agent-memory.db"
106
+ })
107
+ // ...
108
+ })
86
109
  });
87
110
  ```
88
111
 
89
- **Important:** without these ID's your agent will not use memory, even if memory is properly configured. The playground handles this for you, but you need to add ID's yourself when using memory in your application.
112
+ ## Memory threads
90
113
 
91
- > **Thread ID Uniqueness:** Each thread ID must be globally unique across all resources. A thread is permanently associated with the resource that created it. If you need to have similar thread names for different resources (e.g., a "general" thread for multiple users), include the resource ID in the thread ID: `${resourceId}-general` or `user_alice_general`.
114
+ Mastra organizes memory into threads, which are records that group related interactions, using two identifiers:
92
115
 
93
- ### Thread Title Generation
116
+ 1. **`thread`**: A globally unique ID representing the conversation (e.g., `support_123`). Must be unique across all resources.
117
+ 2. **`resource`**: The user or entity that owns the thread (e.g., `user_123`, `org_456`).
94
118
 
95
- Mastra can automatically generate meaningful titles for conversation threads based on the user's first message. This helps organize and identify conversations in your application UI.
119
+ The `resource` is especially important for [resource-scoped memory](./working-memory.mdx#resource-scoped-memory), which allows memory to persist across all threads associated with the same user or entity.
96
120
 
97
- ```typescript {3-7}
98
- const memory = new Memory({
99
- options: {
100
- threads: {
101
- generateTitle: true, // Enable automatic title generation
102
- },
103
- },
121
+ ```typescript {4} showLineNumbers
122
+ const stream = await agent.stream("message for agent", {
123
+ memory: {
124
+ thread: "user-123",
125
+ resource: "test-123"
126
+ }
104
127
  });
105
128
  ```
106
129
 
107
- By default, title generation uses the same model and default instructions as your agent. For customization or cost optimization, you can specify a different model or provide custom instructions specifically for title generation:
130
+ <Callout type="warning">
131
+ Even with memory configured, agents won’t store or recall information unless both `thread` and `resource` are provided.
132
+ </Callout>
108
133
 
109
- ```typescript {5-7}
110
- const memory = new Memory({
111
- options: {
112
- threads: {
113
- generateTitle: {
114
- model: openai("gpt-4.1-nano"), // Use cheaper model for titles
115
- instructions: "Generate a concise title for this conversation based on the first user message.",
116
- },
134
+ > Mastra Playground sets `thread` and `resource` IDs automatically. In your own application, you must provide them manually as part of each `.generate()` or `.stream()` call.
135
+
136
+ ### Thread title generation
137
+
138
+ Mastra can automatically generate descriptive thread titles based on the user's first message. Enable this by setting `generateTitle` to `true`. This improves organization and makes it easier to display conversations in your UI.
139
+
140
+ ```typescript {3-7} showLineNumbers
141
+ export const testAgent = new Agent({
142
+ memory: new Memory({
143
+ options: {
144
+ threads: {
145
+ generateTitle: true,
146
+ }
117
147
  },
118
- },
148
+ })
119
149
  });
120
150
  ```
121
151
 
122
- Title generation happens asynchronously after the agent responds, so it doesn't impact response time. See the [full configuration reference](../../reference/memory/Memory.mdx#thread-title-generation) for more details and examples.
152
+ > Title generation runs asynchronously after the agent responds and does not affect response time. See the [full configuration reference](../../reference/memory/Memory.mdx#thread-title-generation) for details and examples.
123
153
 
124
- ## Conversation History
154
+ #### Optimizing title generation
125
155
 
126
- By default, the `Memory` instance includes the [last 10 messages](../../reference/memory/Memory.mdx) from the current Memory thread in each new request. This provides the agent with immediate conversational context.
156
+ Titles are generated using your agent's model by default. To optimize cost or behavior, provide a smaller `model` and custom `instructions`. This keeps title generation separate from main conversation logic.
127
157
 
128
- ```ts {3}
129
- const memory = new Memory({
130
- options: {
131
- lastMessages: 10,
132
- },
158
+ ```typescript {5-9} showLineNumbers
159
+ export const testAgent = new Agent({
160
+ // ...
161
+ memory: new Memory({
162
+ options: {
163
+ threads: {
164
+ generateTitle: {
165
+ model: openai("gpt-4.1-nano"),
166
+ instructions: "Generate a concise title based on the user's first message",
167
+ },
168
+ },
169
+ }
170
+ })
133
171
  });
134
172
  ```
135
173
 
136
- **Important:** Only send the newest user message in each agent call. Mastra handles retrieving and injecting the necessary history. Sending the full history yourself will cause duplication. See the [AI SDK Memory Example](../../docs/frameworks/agentic-uis/ai-sdk.mdx) for how to handle this with when using the `useChat` frontend hooks.
174
+ #### Dynamic model selection and instructions
137
175
 
138
- ### Storage Configuration
176
+ You can configure thread title generation dynamically by passing functions to `model` and `instructions`. These functions receive the `runtimeContext` object, allowing you to adapt title generation based on user-specific values.
139
177
 
140
- Conversation history relies on a [storage adapter](/reference/memory/Memory#parameters) to store messages.
141
- By default it uses the same storage provided to the [main Mastra instance](https://mastra.ai/reference/core/mastra-class#initialization)
178
+ ```typescript {7-16} showLineNumbers
179
+ export const testAgent = new Agent({
180
+ // ...
181
+ memory: new Memory({
182
+ options: {
183
+ threads: {
184
+ generateTitle: {
185
+ model: ({ runtimeContext }) => {
186
+ const userTier = runtimeContext.get("userTier");
187
+ return userTier === "premium" ? openai("gpt-4.1") : openai("gpt-4.1-nano");
188
+ },
189
+ instructions: ({ runtimeContext }) => {
190
+ const language = runtimeContext.get("userLanguage") || "English";
191
+ return `Generate a concise, engaging title in ${language} based on the user's first message.`;
192
+ }
193
+ }
194
+ }
195
+ }
196
+ })
197
+ });
198
+ ```
142
199
 
143
- If neither the `Memory` instance nor the `Mastra` object specify a storage provider, Mastra will not persist memory data across application restarts or deployments. For any deployment beyond local testing you should provide your own storage configuration either on `Mastra` or directly within `new Memory()`.
200
+ ## Increasing conversation history
144
201
 
145
- When `storage` **is** given on the `Mastra` instance it will automatically be used by every `Memory` attached to agents. In that case you do not need to pass `storage` to `new Memory()` unless you want a per-agent override.
202
+ By default, each request includes the last 10 messages from the current memory thread, giving the agent short-term conversational context. This limit can be increased using the `lastMessages` parameter.
146
203
 
147
- ```ts {7-9}
148
- import { Memory } from "@mastra/memory";
149
- import { Agent } from "@mastra/core/agent";
150
- import { LibSQLStore } from "@mastra/libsql";
151
-
152
- const agent = new Agent({
204
+ ```typescript {3-7} showLineNumbers
205
+ export const testAgent = new Agent({
206
+ // ...
153
207
  memory: new Memory({
154
- storage: new LibSQLStore({
155
- url: "file:./local.db",
156
- }),
157
- }),
208
+ options: {
209
+ lastMessages: 100
210
+ },
211
+ })
158
212
  });
159
213
  ```
160
214
 
161
- **Storage code Examples**:
162
-
163
- - [LibSQL](/examples/memory/memory-with-libsql)
164
- - [Postgres](/examples/memory/memory-with-pg)
165
- - [Upstash](/examples/memory/memory-with-upstash)
166
-
167
- ## Viewing Retrieved Messages
215
+ ## Viewing retrieved messages
168
216
 
169
217
  If tracing is enabled in your Mastra deployment and memory is configured either with `lastMessages` and/or `semanticRecall`, the agent’s trace output will show all messages retrieved for context—including both recent conversation history and messages recalled via semantic recall.
170
218
 
@@ -172,6 +220,13 @@ This is helpful for debugging, understanding agent decisions, and verifying that
172
220
 
173
221
  For more details on enabling and configuring tracing, see [Tracing](../observability/tracing).
174
222
 
223
+ ## Local development with LibSQL
224
+
225
+ For local development with `LibSQLStore`, you can inspect stored memory using the [SQLite Viewer](https://marketplace.visualstudio.com/items?itemName=qwtel.sqlite-viewer) extension in VS Code.
226
+
227
+ ![SQLite Viewer](/image/memory/memory-sqlite-viewer.jpg)
228
+
229
+
175
230
  ## Next Steps
176
231
 
177
232
  Now that you understand the core concepts, continue to [semantic recall](./semantic-recall.mdx) to learn how to add RAG memory to your Mastra agents.
@@ -1,3 +1,8 @@
1
+ ---
2
+ title: "Semantic Recall | Memory | Mastra Docs"
3
+ description: "Learn how to use semantic recall in Mastra to retrieve relevant messages from past conversations using vector search and embeddings."
4
+ ---
5
+
1
6
  # Semantic Recall
2
7
 
3
8
  If you ask your friend what they did last weekend, they will search in their memory for events associated with "last weekend" and then tell you what they did. That's sort of like how semantic recall works in Mastra.
@@ -1,3 +1,8 @@
1
+ ---
2
+ title: "Working Memory | Memory | Mastra Docs"
3
+ description: "Learn how to configure working memory in Mastra to store persistent user data, preferences."
4
+ ---
5
+
1
6
  import YouTube from "@/components/youtube";
2
7
 
3
8
  # Working Memory
@@ -54,6 +54,12 @@ export const agent = new Agent({
54
54
  isOptional: false,
55
55
  description: "The language model used by the agent. Can be provided statically or resolved at runtime.",
56
56
  },
57
+ {
58
+ name: "agents",
59
+ type: "Record<string, Agent> | ({ runtimeContext: RuntimeContext }) => Record<string, Agent> | Promise<Record<string, Agent>>",
60
+ isOptional: true,
61
+ description: "Sub-Agents that the agent can access. Can be provided statically or resolved dynamically.",
62
+ },
57
63
  {
58
64
  name: "tools",
59
65
  type: "ToolsInput | ({ runtimeContext: RuntimeContext }) => ToolsInput | Promise<ToolsInput>",
@@ -224,7 +224,8 @@ const aiSdkResult = await agent.generateVNext("message for agent", {
224
224
  name: "output",
225
225
  type: "Zod schema | JsonSchema7",
226
226
  isOptional: true,
227
- description: "Schema for structured output generation (Zod schema or JSON Schema).",
227
+ description:
228
+ "**Deprecated.** Use structuredOutput with maxSteps:1 to achieve the same thing. Defines the expected structure of the output. Can be a JSON Schema object or a Zod schema.",
228
229
  },
229
230
  {
230
231
  name: "memory",
@@ -0,0 +1,68 @@
1
+ ---
2
+ title: "Reference: Agent.listAgents() | Agents | Mastra Docs"
3
+ description: "Documentation for the `Agent.listAgents()` method in Mastra agents, which retrieves the sub-agents that the agent can access."
4
+ ---
5
+
6
+ # Agent.listAgents()
7
+
8
+ The `.listAgents()` method retrieves the sub-agents configured for an agent, resolving them if they're a function. These sub-agents enable the agent to access other agents and perform complex actions.
9
+
10
+ ## Usage example
11
+
12
+ ```typescript copy
13
+ await agent.listAgents();
14
+ ```
15
+
16
+ ## Parameters
17
+
18
+ <PropertiesTable
19
+ content={[
20
+ {
21
+ name: "options",
22
+ type: "{ runtimeContext?: RuntimeContext }",
23
+ isOptional: true,
24
+ defaultValue: "{}",
25
+ description: "Optional configuration object containing runtime context.",
26
+ },
27
+ ]}
28
+ />
29
+
30
+ ## Returns
31
+
32
+ <PropertiesTable
33
+ content={[
34
+ {
35
+ name: "agents",
36
+ type: "Promise<Record<string, Agent>>",
37
+ description: "A promise that resolves to a record of agent names to their corresponding Agent instances.",
38
+ },
39
+ ]}
40
+ />
41
+
42
+ ## Extended usage example
43
+
44
+ ```typescript copy
45
+ import { RuntimeContext } from "@mastra/core/runtime-context";
46
+
47
+ await agent.listAgents({
48
+ runtimeContext: new RuntimeContext()
49
+ });
50
+ ```
51
+
52
+ ### Options parameters
53
+
54
+ <PropertiesTable
55
+ content={[
56
+ {
57
+ name: "runtimeContext",
58
+ type: "RuntimeContext",
59
+ isOptional: true,
60
+ defaultValue: "new RuntimeContext()",
61
+ description: "Runtime context for dependency injection and contextual information.",
62
+ },
63
+ ]}
64
+ />
65
+
66
+ ## Related
67
+
68
+ - [Agents overview](../../docs/agents/overview.mdx)
@@ -226,7 +226,7 @@ const aiSdkStream = await agent.streamVNext("message for agent", {
226
226
  type: "Zod schema | JsonSchema7",
227
227
  isOptional: true,
228
228
  description:
229
- "Defines the expected structure of the output. Can be a JSON Schema object or a Zod schema.",
229
+ "**Deprecated.** Use structuredOutput with maxSteps:1 to achieve the same thing. Defines the expected structure of the output. Can be a JSON Schema object or a Zod schema.",
230
230
  },
231
231
  {
232
232
  name: "memory",
@@ -2,6 +2,7 @@
2
2
  title: Mastra Client Agents API
3
3
  description: Learn how to interact with Mastra AI agents, including generating responses, streaming interactions, and managing agent tools using the client-js SDK.
4
4
  ---
5
+ import { StreamVNextCallout } from "@/components/streamVNext-callout.tsx"
5
6
 
6
7
  # Agents API
7
8
 
@@ -105,7 +106,7 @@ Client-side tools allow you to execute custom functions on the client side when
105
106
  #### Basic Usage
106
107
 
107
108
  ```typescript
108
- import { createTool } from '@mastra/core/tools';
109
+ import { createTool } from '@mastra/client-js';
109
110
  import { z } from 'zod';
110
111
 
111
112
  const colorChangeTool = createTool({
@@ -158,3 +159,55 @@ const evals = await agent.evals();
158
159
  // Get live evaluations
159
160
  const liveEvals = await agent.liveEvals();
160
161
  ```
162
+
163
+
164
+ ### Stream VNext (Experimental)
165
+
166
+ <StreamVNextCallout />
167
+
168
+ Stream responses using the enhanced VNext API with improved method signatures. This method provides enhanced capabilities and format flexibility, with support for both Mastra's native format and AI SDK v5 compatibility:
169
+
170
+ ```typescript
171
+ const response = await agent.streamVNext(
172
+ "Tell me a story",
173
+ {
174
+ format: 'mastra', // Default: Mastra's native format
175
+ threadId: "thread-1",
176
+ clientTools: { colorChangeTool },
177
+ }
178
+ );
179
+
180
+ // AI SDK v5 compatible format
181
+ const response = await agent.streamVNext(
182
+ "Tell me a story",
183
+ {
184
+ format: 'aisdk', // Enable AI SDK v5 compatibility
185
+ threadId: "thread-1",
186
+ }
187
+ );
188
+
189
+ // Process the stream
190
+ response.processDataStream({
191
+ onChunk: (chunk) => {
192
+ console.log(chunk);
193
+ },
194
+ });
195
+ ```
196
+
197
+ The `format` parameter determines the output stream format:
198
+ - `'mastra'` (default): Returns Mastra's native format
199
+ - `'aisdk'`: Returns AI SDK v5 compatible format for frontend integration
200
+
201
+ ### Generate VNext (Experimental)
202
+
203
+ Generate a response using the enhanced VNext API with improved method signatures and AI SDK v5 compatibility:
204
+
205
+ ```typescript
206
+ const response = await agent.generateVNext(
207
+ "Hello, how are you?",
208
+ {
209
+ threadId: "thread-1",
210
+ resourceid: "resource-1",
211
+ }
212
+ );
213
+ ```
@@ -44,10 +44,9 @@ const details = await workflow.details();
44
44
  Start a workflow run with inputData and await full run results:
45
45
 
46
46
  ```typescript
47
- const run = await workflow.createRun();
47
+ const run = await workflow.createRunAsync();
48
48
 
49
- const result = await workflow.startAsync({
50
- runId: run.runId,
49
+ const result = await run.startAsync({
51
50
  inputData: {
52
51
  city: "New York",
53
52
  },
@@ -59,10 +58,9 @@ const result = await workflow.startAsync({
59
58
  Resume a suspended workflow step and await full run result:
60
59
 
61
60
  ```typescript
62
- const run = await workflow.createRun();
61
+ const run = await workflow.createRunAsync();
63
62
 
64
- const result = await workflow.resumeAsync({
65
- runId: run.runId,
63
+ const result = await run.resumeAsync({
66
64
  step: "step-id",
67
65
  resumeData: { key: "value" },
68
66
  });
@@ -76,14 +74,13 @@ Watch workflow transitions:
76
74
  try {
77
75
  const workflow = mastraClient.getWorkflow("testWorkflow");
78
76
 
79
- const run = await workflow.createRun();
77
+ const run = await workflow.createRunAsync();
80
78
 
81
- workflow.watch({ runId: run.runId }, (record) => {
79
+ run.watch((record) => {
82
80
  console.log(record);
83
81
  });
84
82
 
85
- const result = await workflow.start({
86
- runId: run.runId,
83
+ const result = await run.start({
87
84
  inputData: {
88
85
  city: "New York",
89
86
  },
@@ -101,14 +98,13 @@ Resume workflow run and watch workflow step transitions:
101
98
  try {
102
99
  const workflow = mastraClient.getWorkflow("testWorkflow");
103
100
 
104
- const run = await workflow.createRun({ runId: prevRunId });
101
+ const run = await workflow.createRunAsync({ runId: prevRunId });
105
102
 
106
- workflow.watch({ runId: run.runId }, (record) => {
103
+ run.watch((record) => {
107
104
  console.log(record);
108
105
  });
109
106
 
110
- workflow.resume({
111
- runId: run.runId,
107
+ run.resume({
112
108
  step: "step-id",
113
109
  resumeData: { key: "value" },
114
110
  });
@@ -117,6 +113,30 @@ try {
117
113
  }
118
114
  ```
119
115
 
116
+ ### Stream Workflow
117
+
118
+ Stream workflow execution for real-time updates:
119
+
120
+ ```typescript
121
+ try {
122
+ const workflow = mastraClient.getWorkflow("testWorkflow");
123
+
124
+ const run = await workflow.createRunAsync();
125
+
126
+ const stream = await run.stream({
127
+ inputData: {
128
+ city: 'New York',
129
+ },
130
+ });
131
+
132
+ for await (const chunk of stream) {
133
+ console.log(JSON.stringify(chunk, null, 2));
134
+ }
135
+ } catch (e) {
136
+ console.error('Workflow error:', e);
137
+ }
138
+ ```
139
+
120
140
  ### Get Workflow Run result
121
141
 
122
142
  Get the result of a workflow run:
@@ -125,11 +145,10 @@ Get the result of a workflow run:
125
145
  try {
126
146
  const workflow = mastraClient.getWorkflow("testWorkflow");
127
147
 
128
- const run = await workflow.createRun();
148
+ const run = await workflow.createRunAsync();
129
149
 
130
150
  // start the workflow run
131
- const startResult = await workflow.start({
132
- runId: run.runId,
151
+ const startResult = await run.start({
133
152
  inputData: {
134
153
  city: "New York",
135
154
  },
@@ -1,6 +1,6 @@
1
1
  ---
2
- title: "Mastra Core"
3
- description: Documentation for the Mastra Class, the core entry point for managing agents, workflows, MCP servers, and server endpoints.
2
+ title: "Reference: Mastra Class | Core | Mastra Docs"
3
+ description: "Documentation for the `Mastra` class in Mastra, the core entry point for managing agents, workflows, MCP servers, and server endpoints."
4
4
  ---
5
5
 
6
6
  # Mastra Class