@mastra/mcp-docs-server 0.13.21 → 0.13.22-alpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +8 -8
  2. package/.docs/organized/changelogs/%40internal%2Ftypes-builder.md +2 -0
  3. package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +8 -0
  4. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +29 -29
  5. package/.docs/organized/changelogs/%40mastra%2Fcloud.md +12 -12
  6. package/.docs/organized/changelogs/%40mastra%2Fcore.md +81 -81
  7. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +21 -21
  8. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +17 -17
  9. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +18 -18
  10. package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +17 -17
  11. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +54 -54
  12. package/.docs/organized/changelogs/%40mastra%2Fevals.md +12 -12
  13. package/.docs/organized/changelogs/%40mastra%2Ffirecrawl.md +9 -9
  14. package/.docs/organized/changelogs/%40mastra%2Flibsql.md +19 -19
  15. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +17 -17
  16. package/.docs/organized/changelogs/%40mastra%2Fmem0.md +10 -10
  17. package/.docs/organized/changelogs/%40mastra%2Fmemory.md +9 -9
  18. package/.docs/organized/changelogs/%40mastra%2Fpg.md +10 -10
  19. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +29 -29
  20. package/.docs/organized/changelogs/%40mastra%2Frag.md +10 -10
  21. package/.docs/organized/changelogs/%40mastra%2Fschema-compat.md +6 -0
  22. package/.docs/organized/changelogs/%40mastra%2Fserver.md +25 -25
  23. package/.docs/organized/changelogs/create-mastra.md +17 -17
  24. package/.docs/organized/changelogs/mastra.md +29 -29
  25. package/.docs/organized/code-examples/heads-up-game.md +4 -4
  26. package/.docs/raw/memory/memory-processors.mdx +5 -0
  27. package/.docs/raw/memory/overview.mdx +159 -104
  28. package/.docs/raw/memory/semantic-recall.mdx +5 -0
  29. package/.docs/raw/memory/working-memory.mdx +5 -0
  30. package/.docs/raw/reference/agents/agent.mdx +6 -0
  31. package/.docs/raw/reference/agents/generateVNext.mdx +2 -1
  32. package/.docs/raw/reference/agents/listAgents.mdx +68 -0
  33. package/.docs/raw/reference/agents/streamVNext.mdx +1 -1
  34. package/.docs/raw/reference/client-js/agents.mdx +54 -1
  35. package/.docs/raw/reference/client-js/workflows.mdx +36 -17
  36. package/.docs/raw/reference/core/mastra-class.mdx +2 -2
  37. package/.docs/raw/reference/memory/Memory.mdx +91 -239
  38. package/.docs/raw/reference/memory/createThread.mdx +36 -16
  39. package/.docs/raw/reference/memory/deleteMessages.mdx +39 -74
  40. package/.docs/raw/reference/memory/getThreadById.mdx +11 -11
  41. package/.docs/raw/reference/memory/getThreadsByResourceId.mdx +26 -29
  42. package/.docs/raw/reference/memory/getThreadsByResourceIdPaginated.mdx +46 -124
  43. package/.docs/raw/reference/memory/query.mdx +76 -90
  44. package/CHANGELOG.md +7 -0
  45. package/README.md +8 -0
  46. package/package.json +4 -4
@@ -44,10 +44,9 @@ const details = await workflow.details();
44
44
  Start a workflow run with inputData and await full run results:
45
45
 
46
46
  ```typescript
47
- const run = await workflow.createRun();
47
+ const run = await workflow.createRunAsync();
48
48
 
49
- const result = await workflow.startAsync({
50
- runId: run.runId,
49
+ const result = await run.startAsync({
51
50
  inputData: {
52
51
  city: "New York",
53
52
  },
@@ -59,10 +58,9 @@ const result = await workflow.startAsync({
59
58
  Resume a suspended workflow step and await full run result:
60
59
 
61
60
  ```typescript
62
- const run = await workflow.createRun();
61
+ const run = await workflow.createRunAsync();
63
62
 
64
- const result = await workflow.resumeAsync({
65
- runId: run.runId,
63
+ const result = await run.resumeAsync({
66
64
  step: "step-id",
67
65
  resumeData: { key: "value" },
68
66
  });
@@ -76,14 +74,13 @@ Watch workflow transitions:
76
74
  try {
77
75
  const workflow = mastraClient.getWorkflow("testWorkflow");
78
76
 
79
- const run = await workflow.createRun();
77
+ const run = await workflow.createRunAsync();
80
78
 
81
- workflow.watch({ runId: run.runId }, (record) => {
79
+ run.watch((record) => {
82
80
  console.log(record);
83
81
  });
84
82
 
85
- const result = await workflow.start({
86
- runId: run.runId,
83
+ const result = await run.start({
87
84
  inputData: {
88
85
  city: "New York",
89
86
  },
@@ -101,14 +98,13 @@ Resume workflow run and watch workflow step transitions:
101
98
  try {
102
99
  const workflow = mastraClient.getWorkflow("testWorkflow");
103
100
 
104
- const run = await workflow.createRun({ runId: prevRunId });
101
+ const run = await workflow.createRunAsync({ runId: prevRunId });
105
102
 
106
- workflow.watch({ runId: run.runId }, (record) => {
103
+ run.watch((record) => {
107
104
  console.log(record);
108
105
  });
109
106
 
110
- workflow.resume({
111
- runId: run.runId,
107
+ run.resume({
112
108
  step: "step-id",
113
109
  resumeData: { key: "value" },
114
110
  });
@@ -117,6 +113,30 @@ try {
117
113
  }
118
114
  ```
119
115
 
116
+ ### Stream Workflow
117
+
118
+ Stream workflow execution for real-time updates:
119
+
120
+ ```typescript
121
+ try {
122
+ const workflow = mastraClient.getWorkflow("testWorkflow");
123
+
124
+ const run = await workflow.createRunAsync();
125
+
126
+ const stream = await run.stream({
127
+ inputData: {
128
+ city: 'New York',
129
+ },
130
+ });
131
+
132
+ for await (const chunk of stream) {
133
+ console.log(JSON.stringify(chunk, null, 2));
134
+ }
135
+ } catch (e) {
136
+ console.error('Workflow error:', e);
137
+ }
138
+ ```
139
+
120
140
  ### Get Workflow Run result
121
141
 
122
142
  Get the result of a workflow run:
@@ -125,11 +145,10 @@ Get the result of a workflow run:
125
145
  try {
126
146
  const workflow = mastraClient.getWorkflow("testWorkflow");
127
147
 
128
- const run = await workflow.createRun();
148
+ const run = await workflow.createRunAsync();
129
149
 
130
150
  // start the workflow run
131
- const startResult = await workflow.start({
132
- runId: run.runId,
151
+ const startResult = await run.start({
133
152
  inputData: {
134
153
  city: "New York",
135
154
  },
@@ -1,6 +1,6 @@
1
1
  ---
2
- title: "Mastra Core"
3
- description: Documentation for the Mastra Class, the core entry point for managing agents, workflows, MCP servers, and server endpoints.
2
+ title: "Reference: Mastra Class | Core | Mastra Docs"
3
+ description: "Documentation for the `Mastra` class in Mastra, the core entry point for managing agents, workflows, MCP servers, and server endpoints."
4
4
  ---
5
5
 
6
6
  # Mastra Class
@@ -1,305 +1,157 @@
1
- # Memory Class Reference
1
+ ---
2
+ title: "Reference: Memory Class | Memory | Mastra Docs"
3
+ description: "Documentation for the `Memory` class in Mastra, which provides a robust system for managing conversation history and thread-based message storage."
4
+ ---
2
5
 
3
- The `Memory` class provides a robust system for managing conversation history and thread-based message storage in Mastra. It enables persistent storage of conversations, semantic search capabilities, and efficient message retrieval. You must configure a storage provider for conversation history, and if you enable semantic recall you will also need to provide a vector store and embedder.
4
-
5
- ## Basic Usage
6
-
7
- ```typescript copy showLineNumbers
8
- import { Memory } from "@mastra/memory";
9
- import { Agent } from "@mastra/core/agent";
10
-
11
- const agent = new Agent({
12
- memory: new Memory(),
13
- ...otherOptions,
14
- });
15
- ```
16
-
17
- ## Custom Configuration
18
-
19
- ```typescript copy showLineNumbers
20
- import { Memory } from "@mastra/memory";
21
- import { LibSQLStore, LibSQLVector } from "@mastra/libsql";
22
- import { Agent } from "@mastra/core/agent";
23
-
24
- const memory = new Memory({
25
- // Optional storage configuration - libsql will be used by default
26
- storage: new LibSQLStore({
27
- url: "file:./memory.db",
28
- }),
29
-
30
- // Optional vector database for semantic search
31
- vector: new LibSQLVector({
32
- url: "file:./vector.db",
33
- }),
34
-
35
- // Memory configuration options
36
- options: {
37
- // Number of recent messages to include
38
- lastMessages: 20,
39
-
40
- // Semantic search configuration
41
- semanticRecall: {
42
- topK: 3, // Number of similar messages to retrieve
43
- messageRange: {
44
- // Messages to include around each result
45
- before: 2,
46
- after: 1,
47
- },
48
- },
49
-
50
- // Working memory configuration
51
- workingMemory: {
52
- enabled: true,
53
- template: `
54
- # User
55
- - First Name:
56
- - Last Name:
57
- `,
58
- },
59
-
60
- // Thread configuration
61
- threads: {
62
- generateTitle: true, // Enable title generation using agent's model
63
- // Or use a different model for title generation
64
- // generateTitle: {
65
- // model: openai("gpt-4.1-nano"), // Use cheaper model for titles
66
- // instructions: "Generate a concise title based on the initial user message.", // Custom instructions for title
67
- // },
68
- },
69
- },
70
- });
71
-
72
- const agent = new Agent({
73
- memory,
74
- ...otherOptions,
75
- });
76
- ```
77
-
78
- ### Working Memory
79
-
80
- The working memory feature allows agents to maintain persistent information across conversations. When enabled, the Memory class automatically manages working memory updates using a dedicated tool call.
81
-
82
- Example configuration:
83
-
84
- ```typescript copy showLineNumbers
85
- const memory = new Memory({
86
- options: {
87
- workingMemory: {
88
- enabled: true,
89
- template: "# User\n- **First Name**:\n- **Last Name**:",
90
- },
91
- },
92
- });
93
- ```
94
-
95
- If no template is provided, the Memory class uses a default template that includes fields for user details, preferences, goals, and other contextual information in Markdown format. See the [Working Memory guide](/docs/memory/working-memory.mdx#designing-effective-templates) for detailed usage examples and best practices.
96
-
97
- ### Thread Title Generation
98
-
99
- The `generateTitle` feature automatically creates meaningful titles for conversation threads based on the user's first message. This helps organize and identify conversations in your application.
100
-
101
- #### Basic Usage
102
-
103
- ```typescript copy showLineNumbers
104
- const memory = new Memory({
105
- options: {
106
- threads: {
107
- generateTitle: true, // Use the agent's model for title generation
108
- },
109
- },
110
- });
111
- ```
112
-
113
- #### Cost Optimization with Custom Models and Instructions
114
-
115
- You can specify a different (typically cheaper) model and custom instructions for title generation while using a high-quality model for the main conversation:
116
-
117
- ```typescript copy showLineNumbers
118
- import { openai } from "@ai-sdk/openai";
119
-
120
- const memory = new Memory({
121
- options: {
122
- threads: {
123
- generateTitle: {
124
- model: openai("gpt-4.1-nano"), // Cheaper model for titles
125
- instructions: "Generate a concise, friendly title based on the initial user message.", // Custom title instructions
126
- },
127
- },
128
- },
129
- });
130
-
131
- const agent = new Agent({
132
- model: openai("gpt-4o"), // High-quality model for main conversation
133
- memory,
134
- });
135
- ```
136
-
137
- #### Dynamic Model Selection and Instructions
6
+ # Memory Class
138
7
 
139
- You can also use a function to dynamically determine the model and instructions based on runtime context:
140
-
141
- ```typescript copy showLineNumbers
142
- const memory = new Memory({
143
- options: {
144
- threads: {
145
- generateTitle: {
146
- model: (ctx: RuntimeContext) => {
147
- // Use different models based on context
148
- const userTier = ctx.get("userTier");
149
- return userTier === "premium"
150
- ? openai("gpt-4.1")
151
- : openai("gpt-4.1-nano");
152
- },
153
- instructions: (ctx: RuntimeContext) => {
154
- const language = ctx.get("userLanguage") || "English";
155
- return `Generate a concise, engaging title in ${language} based on the user's first message.`;
156
- },
157
- },
158
- },
159
- },
160
- });
161
- ```
162
- ### embedder
163
-
164
- An embedding model is required if `semanticRecall` is enabled.
165
-
166
- One option is to use `@mastra/fastembed`, which provides an on-device/local embedding model using [FastEmbed](https://github.com/Anush008/fastembed-js). This model runs locally and does not require API keys or network requests.
167
-
168
- To use it, first install the package:
169
-
170
- ```bash npm2yarn copy
171
- npm install @mastra/fastembed
172
- ```
8
+ The `Memory` class provides a robust system for managing conversation history and thread-based message storage in Mastra. It enables persistent storage of conversations, semantic search capabilities, and efficient message retrieval. You must configure a storage provider for conversation history, and if you enable semantic recall you will also need to provide a vector store and embedder.
173
9
 
174
- Then, configure it in your `Memory` instance:
10
+ ## Usage example
175
11
 
176
- ```typescript {2,7}
12
+ ```typescript filename="src/mastra/agents/test-agent.ts" showLineNumbers copy
177
13
  import { Memory } from "@mastra/memory";
178
- import { fastembed } from "@mastra/fastembed";
179
14
  import { Agent } from "@mastra/core/agent";
180
-
181
- const agent = new Agent({
182
- memory: new Memory({
183
- embedder: fastembed,
184
- // ... other memory config
185
- }),
186
- });
187
- ```
188
-
189
- Note that, depending on where you're deploying your project, your project may not deploy due to FastEmbeds large internal dependencies.
190
-
191
- Alternatively, you can use an API-based embedder like OpenAI (which doesn't have this problem):
192
-
193
- ```typescript {2,7}
194
- import { Memory } from "@mastra/memory";
195
15
  import { openai } from "@ai-sdk/openai";
196
- import { Agent } from "@mastra/core/agent";
197
16
 
198
- const agent = new Agent({
17
+ export const agent = new Agent({
18
+ name: "test-agent",
19
+ instructions: "You are an agent with memory.",
20
+ model: openai("gpt-4o"),
199
21
  memory: new Memory({
200
- embedder: openai.embedding("text-embedding-3-small"),
201
- }),
22
+ options: {
23
+ workingMemory: {
24
+ enabled: true
25
+ }
26
+ }
27
+ })
202
28
  });
203
29
  ```
204
30
 
205
- Mastra supports many embedding models through the [Vercel AI SDK](https://sdk.vercel.ai/docs/ai-sdk-core/embeddings), including options from OpenAI, Google, Mistral, and Cohere.
31
+ > To enable `workingMemory` on an agent, you’ll need a storage provider configured on your main Mastra instance. See [Mastra class](../core/mastra-class.mdx) for more information.
206
32
 
207
- ## Parameters
33
+ ## Constructor parameters
208
34
 
209
35
  <PropertiesTable
210
36
  content={[
211
37
  {
212
38
  name: "storage",
213
39
  type: "MastraStorage",
214
- description: "Storage implementation for persisting memory data",
40
+ description: "Storage implementation for persisting memory data. Defaults to `new DefaultStorage({ config: { url: \"file:memory.db\" } })` if not provided.",
215
41
  isOptional: true,
216
42
  },
217
43
  {
218
44
  name: "vector",
219
- type: "MastraVector",
220
- description: "Vector store for semantic search capabilities",
45
+ type: "MastraVector | false",
46
+ description: "Vector store for semantic search capabilities. Set to `false` to disable vector operations.",
221
47
  isOptional: true,
222
48
  },
223
49
  {
224
50
  name: "embedder",
225
- type: "EmbeddingModel",
226
- description:
227
- "Embedder instance for vector embeddings. Required when semantic recall is enabled",
51
+ type: "EmbeddingModel<string> | EmbeddingModelV2<string>",
52
+ description: "Embedder instance for vector embeddings. Required when semantic recall is enabled.",
228
53
  isOptional: true,
229
54
  },
230
55
  {
231
56
  name: "options",
232
57
  type: "MemoryConfig",
233
- description: "General memory configuration options",
58
+ description: "Memory configuration options.",
59
+ isOptional: true,
60
+ },
61
+ {
62
+ name: "processors",
63
+ type: "MemoryProcessor[]",
64
+ description: "Array of memory processors that can filter or transform messages before they're sent to the LLM.",
234
65
  isOptional: true,
235
66
  },
236
67
  ]}
237
68
  />
238
69
 
239
- ### options
70
+ ### Options parameters
240
71
 
241
72
  <PropertiesTable
242
73
  content={[
243
74
  {
244
75
  name: "lastMessages",
245
76
  type: "number | false",
246
- description:
247
- "Number of most recent messages to retrieve. Set to false to disable.",
77
+ description: "Number of most recent messages to retrieve. Set to false to disable.",
248
78
  isOptional: true,
249
79
  defaultValue: "10",
250
80
  },
251
81
  {
252
82
  name: "semanticRecall",
253
- type: "boolean | SemanticRecallConfig",
254
- description:
255
- "Enable semantic search in message history. Automatically enabled when vector store is provided.",
83
+ type: "boolean | { topK: number; messageRange: number | { before: number; after: number }; scope?: 'thread' | 'resource' }",
84
+ description: "Enable semantic search in message history. Can be a boolean or an object with configuration options. When enabled, requires both vector store and embedder to be configured.",
256
85
  isOptional: true,
257
86
  defaultValue: "false",
258
87
  },
259
- {
260
- name: "topK",
261
- type: "number",
262
- description:
263
- "Number of similar messages to retrieve when using semantic search",
264
- isOptional: true,
265
- defaultValue: "2",
266
- },
267
- {
268
- name: "messageRange",
269
- type: "number | { before: number; after: number }",
270
- description:
271
- "Range of messages to include around semantic search results",
272
- isOptional: true,
273
- defaultValue: "2",
274
- },
275
- {
276
- name: "scope",
277
- type: "'thread' | 'resource'",
278
- description:
279
- "Scope for semantic search. 'thread' searches within the current thread only (default). 'resource' searches across all threads for a given resourceId, allowing agents to recall information from any of the user's past conversations. The 'resource' scope is currently supported by LibSQL, Postgres, and Upstash storage adapters.",
280
- isOptional: true,
281
- defaultValue: "'thread'",
282
- },
283
88
  {
284
89
  name: "workingMemory",
285
- type: "{ enabled: boolean; template?: string }",
286
- description:
287
- "Configuration for working memory feature that allows persistent storage of user information across conversations. Working memory uses Markdown format to structure and store continuously relevant information.",
90
+ type: "WorkingMemory",
91
+ description: "Configuration for working memory feature. Can be `{ enabled: boolean; template?: string; schema?: ZodObject<any> | JSONSchema7; scope?: 'thread' | 'resource' }` or `{ enabled: boolean }` to disable.",
288
92
  isOptional: true,
289
- defaultValue:
290
- "{ enabled: false, template: '# User Information\\n- **First Name**:\\n- **Last Name**:\\n...' }",
93
+ defaultValue: "{ enabled: false, template: '# User Information\\n- **First Name**:\\n- **Last Name**:\\n...' }",
291
94
  },
292
95
  {
293
96
  name: "threads",
294
- type: "{ generateTitle?: boolean | { model: MastraLanguageModel | ((ctx: RuntimeContext) => MastraLanguageModel | Promise<MastraLanguageModel>), instructions?: string | ((ctx: RuntimeContext) => string | Promise<string>) } }",
295
- description:
296
- "Settings related to memory thread creation. `generateTitle` controls automatic thread title generation from the user's first message. Can be a boolean to enable/disable using the agent's model, or an object specifying a custom model or custom instructions for title generation (useful for cost optimization or title customization). Example: { generateTitle: { model: openai('gpt-4.1-nano'), instructions: 'Concise title based on the initial user message.' } }",
97
+ type: "{ generateTitle?: boolean | { model: DynamicArgument<MastraLanguageModel>; instructions?: DynamicArgument<string> } }",
98
+ description: "Settings related to memory thread creation. `generateTitle` controls automatic thread title generation from the user's first message. Can be a boolean or an object with custom model and instructions.",
297
99
  isOptional: true,
298
100
  defaultValue: "{ generateTitle: false }",
299
101
  },
300
102
  ]}
301
103
  />
302
104
 
105
+ ## Returns
106
+
107
+ <PropertiesTable
108
+ content={[
109
+ {
110
+ name: "memory",
111
+ type: "Memory",
112
+ description: "A new Memory instance with the specified configuration.",
113
+ },
114
+ ]}
115
+ />
116
+
117
+
118
+ ## Extended usage example
119
+
120
+ ```typescript filename="src/mastra/agents/test-agent.ts" showLineNumbers copy
121
+ import { Memory } from "@mastra/memory";
122
+ import { Agent } from "@mastra/core/agent";
123
+ import { openai } from "@ai-sdk/openai";
124
+ import { LibSQLStore, LibSQLVector } from "@mastra/libsql";
125
+
126
+ export const agent = new Agent({
127
+ name: "test-agent",
128
+ instructions: "You are an agent with memory.",
129
+ model: openai("gpt-4o"),
130
+ memory: new Memory({
131
+ storage: new LibSQLStore({
132
+ url: "file:./working-memory.db"
133
+ }),
134
+ vector: new LibSQLVector({
135
+ connectionUrl: "file:./vector-memory.db"
136
+ }),
137
+ options: {
138
+ lastMessages: 10,
139
+ semanticRecall: {
140
+ topK: 3,
141
+ messageRange: 2,
142
+ scope: 'resource'
143
+ },
144
+ workingMemory: {
145
+ enabled: true
146
+ },
147
+ threads: {
148
+ generateTitle: true
149
+ }
150
+ }
151
+ })
152
+ });
153
+ ```
154
+
303
155
  ### Related
304
156
 
305
157
  - [Getting Started with Memory](/docs/memory/overview.mdx)
@@ -1,23 +1,16 @@
1
- # createThread
1
+ ---
2
+ title: "Reference: Memory.createThread() | Memory | Mastra Docs"
3
+ description: "Documentation for the `Memory.createThread()` method in Mastra, which creates a new conversation thread in the memory system."
4
+ ---
2
5
 
3
- Creates a new conversation thread in the memory system. Each thread represents a distinct conversation or context and can contain multiple messages.
6
+ # Memory.createThread()
4
7
 
5
- ## Usage Example
8
+ The `.createThread()` method creates a new conversation thread in the memory system. Each thread represents a distinct conversation or context and can contain multiple messages.
6
9
 
7
- ```typescript
8
- import { Memory } from "@mastra/memory";
10
+ ## Usage Example
9
11
 
10
- const memory = new Memory({
11
- /* config */
12
- });
13
- const thread = await memory.createThread({
14
- resourceId: "user-123",
15
- title: "Support Conversation",
16
- metadata: {
17
- category: "support",
18
- priority: "high",
19
- },
20
- });
12
+ ```typescript copy
13
+ await memory?.createThread({ resourceId: "user-123" });
21
14
  ```
22
15
 
23
16
  ## Parameters
@@ -90,6 +83,33 @@ const thread = await memory.createThread({
90
83
  ]}
91
84
  />
92
85
 
86
+ ## Extended usage example
87
+
88
+ ```typescript filename="src/test-memory.ts" showLineNumbers copy
89
+ import { mastra } from "./mastra";
90
+
91
+ const agent = mastra.getAgent("agent");
92
+ const memory = await agent.getMemory();
93
+
94
+ const thread = await memory?.createThread({
95
+ resourceId: "user-123",
96
+ title: "Memory Test Thread",
97
+ metadata: {
98
+ source: "test-script",
99
+ purpose: "memory-testing"
100
+ }
101
+ });
102
+
103
+ const response = await agent.generate("message for agent", {
104
+ memory: {
105
+ thread: thread!.id,
106
+ resource: thread!.resourceId
107
+ }
108
+ });
109
+
110
+ console.log(response.text);
111
+ ```
112
+
93
113
  ### Related
94
114
 
95
115
  - [Memory Class Reference](/reference/memory/Memory.mdx)