@mastra/mcp-docs-server 0.13.21 → 0.13.22-alpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +8 -8
  2. package/.docs/organized/changelogs/%40internal%2Ftypes-builder.md +2 -0
  3. package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +8 -0
  4. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +29 -29
  5. package/.docs/organized/changelogs/%40mastra%2Fcloud.md +12 -12
  6. package/.docs/organized/changelogs/%40mastra%2Fcore.md +81 -81
  7. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +21 -21
  8. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +17 -17
  9. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +18 -18
  10. package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +17 -17
  11. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +54 -54
  12. package/.docs/organized/changelogs/%40mastra%2Fevals.md +12 -12
  13. package/.docs/organized/changelogs/%40mastra%2Ffirecrawl.md +9 -9
  14. package/.docs/organized/changelogs/%40mastra%2Flibsql.md +19 -19
  15. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +17 -17
  16. package/.docs/organized/changelogs/%40mastra%2Fmem0.md +10 -10
  17. package/.docs/organized/changelogs/%40mastra%2Fmemory.md +9 -9
  18. package/.docs/organized/changelogs/%40mastra%2Fpg.md +10 -10
  19. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +29 -29
  20. package/.docs/organized/changelogs/%40mastra%2Frag.md +10 -10
  21. package/.docs/organized/changelogs/%40mastra%2Fschema-compat.md +6 -0
  22. package/.docs/organized/changelogs/%40mastra%2Fserver.md +25 -25
  23. package/.docs/organized/changelogs/create-mastra.md +17 -17
  24. package/.docs/organized/changelogs/mastra.md +29 -29
  25. package/.docs/organized/code-examples/heads-up-game.md +4 -4
  26. package/.docs/raw/memory/memory-processors.mdx +5 -0
  27. package/.docs/raw/memory/overview.mdx +159 -104
  28. package/.docs/raw/memory/semantic-recall.mdx +5 -0
  29. package/.docs/raw/memory/working-memory.mdx +5 -0
  30. package/.docs/raw/reference/agents/agent.mdx +6 -0
  31. package/.docs/raw/reference/agents/generateVNext.mdx +2 -1
  32. package/.docs/raw/reference/agents/listAgents.mdx +68 -0
  33. package/.docs/raw/reference/agents/streamVNext.mdx +1 -1
  34. package/.docs/raw/reference/client-js/agents.mdx +54 -1
  35. package/.docs/raw/reference/client-js/workflows.mdx +36 -17
  36. package/.docs/raw/reference/core/mastra-class.mdx +2 -2
  37. package/.docs/raw/reference/memory/Memory.mdx +91 -239
  38. package/.docs/raw/reference/memory/createThread.mdx +36 -16
  39. package/.docs/raw/reference/memory/deleteMessages.mdx +39 -74
  40. package/.docs/raw/reference/memory/getThreadById.mdx +11 -11
  41. package/.docs/raw/reference/memory/getThreadsByResourceId.mdx +26 -29
  42. package/.docs/raw/reference/memory/getThreadsByResourceIdPaginated.mdx +46 -124
  43. package/.docs/raw/reference/memory/query.mdx +76 -90
  44. package/CHANGELOG.md +7 -0
  45. package/README.md +8 -0
  46. package/package.json +4 -4
@@ -1,5 +1,33 @@
1
1
  # mastra
2
2
 
3
+ ## 0.12.4-alpha.0
4
+
5
+ ### Patch Changes
6
+
7
+ - fix minor playground stuff for observability ([#7765](https://github.com/mastra-ai/mastra/pull/7765))
8
+
9
+ - Handle zod intersections in dynamic form ([#7768](https://github.com/mastra-ai/mastra/pull/7768))
10
+
11
+ - Playground ui -pass runtimeContext to client SDK get methods ([#7767](https://github.com/mastra-ai/mastra/pull/7767))
12
+
13
+ - Updated dependencies [[`5802bf5`](https://github.com/mastra-ai/mastra/commit/5802bf57f6182e4b67c28d7d91abed349a8d14f3), [`5bda53a`](https://github.com/mastra-ai/mastra/commit/5bda53a9747bfa7d876d754fc92c83a06e503f62), [`f26a8fd`](https://github.com/mastra-ai/mastra/commit/f26a8fd99fcb0497a5d86c28324430d7f6a5fb83), [`f0ab020`](https://github.com/mastra-ai/mastra/commit/f0ab02034532a4afb71a1ef4fe243f9a8dffde84), [`1a1fbe6`](https://github.com/mastra-ai/mastra/commit/1a1fbe66efb7d94abc373ed0dd9676adb8122454), [`36f39c0`](https://github.com/mastra-ai/mastra/commit/36f39c00dc794952dc3c11aab91c2fa8bca74b11)]:
14
+ - @mastra/core@0.16.4-alpha.0
15
+ - @mastra/deployer@0.16.4-alpha.0
16
+
17
+ ## 0.12.3
18
+
19
+ ### Patch Changes
20
+
21
+ - Client SDK Agents, Mastra server - support runtimeContext with GET requests ([#7734](https://github.com/mastra-ai/mastra/pull/7734))
22
+
23
+ - Add new scorers to list ([#7614](https://github.com/mastra-ai/mastra/pull/7614))
24
+
25
+ - fix playground UI issue about dynmic workflow exec in agent thread ([#7665](https://github.com/mastra-ai/mastra/pull/7665))
26
+
27
+ - Updated dependencies [[`b4379f7`](https://github.com/mastra-ai/mastra/commit/b4379f703fd74474f253420e8c3a684f2c4b2f8e), [`b4379f7`](https://github.com/mastra-ai/mastra/commit/b4379f703fd74474f253420e8c3a684f2c4b2f8e), [`2a6585f`](https://github.com/mastra-ai/mastra/commit/2a6585f7cb71f023f805d521d1c3c95fb9a3aa59), [`3d26e83`](https://github.com/mastra-ai/mastra/commit/3d26e8353a945719028f087cc6ac4b06f0ce27d2), [`dd9119b`](https://github.com/mastra-ai/mastra/commit/dd9119b175a8f389082f75c12750e51f96d65dca), [`d34aaa1`](https://github.com/mastra-ai/mastra/commit/d34aaa1da5d3c5f991740f59e2fe6d28d3e2dd91), [`56e55d1`](https://github.com/mastra-ai/mastra/commit/56e55d1e9eb63e7d9e41aa46e012aae471256812), [`ce1e580`](https://github.com/mastra-ai/mastra/commit/ce1e580f6391e94a0c6816a9c5db0a21566a262f), [`b2babfa`](https://github.com/mastra-ai/mastra/commit/b2babfa9e75b22f2759179e71d8473f6dc5421ed), [`d8c3ba5`](https://github.com/mastra-ai/mastra/commit/d8c3ba516f4173282d293f7e64769cfc8738d360), [`a566c4e`](https://github.com/mastra-ai/mastra/commit/a566c4e92d86c1671707c54359b1d33934f7cc13), [`0666082`](https://github.com/mastra-ai/mastra/commit/06660820230dcb1fa7c1d51c8254107afd68cd67), [`af333aa`](https://github.com/mastra-ai/mastra/commit/af333aa30fe6d1b127024b03a64736c46eddeca2), [`4c81b65`](https://github.com/mastra-ai/mastra/commit/4c81b65a28d128560bdf63bc9b8a1bddd4884812), [`3863c52`](https://github.com/mastra-ai/mastra/commit/3863c52d44b4e5779968b802d977e87adf939d8e), [`6424c7e`](https://github.com/mastra-ai/mastra/commit/6424c7ec38b6921d66212431db1e0958f441b2a7), [`db94750`](https://github.com/mastra-ai/mastra/commit/db94750a41fd29b43eb1f7ce8e97ba8b9978c91b), [`a66a371`](https://github.com/mastra-ai/mastra/commit/a66a3716b00553d7f01842be9deb34f720b10fab), [`69fc3cd`](https://github.com/mastra-ai/mastra/commit/69fc3cd0fd814901785bdcf49bf536ab1e7fd975)]:
28
+ - @mastra/core@0.16.3
29
+ - @mastra/deployer@0.16.3
30
+
3
31
  ## 0.12.3-alpha.1
4
32
 
5
33
  ### Patch Changes
@@ -270,33 +298,5 @@
270
298
 
271
299
  ### Patch Changes
272
300
 
273
- - [#7329](https://github.com/mastra-ai/mastra/pull/7329) [`26b0d7c`](https://github.com/mastra-ai/mastra/commit/26b0d7c7cba46469351d453714e119ac7aae9da2) Thanks [@wardpeet](https://github.com/wardpeet)! - Cleanup pkg output
274
-
275
- - [#7218](https://github.com/mastra-ai/mastra/pull/7218) [`f539199`](https://github.com/mastra-ai/mastra/commit/f53919950a9320b292732e0cfcdf61cdae6c8742) Thanks [@TheIsrael1](https://github.com/TheIsrael1)! - mastra start - load env files, custom env. Deperecate --env flag for mastra build
276
-
277
- - [#5816](https://github.com/mastra-ai/mastra/pull/5816) [`ab48c97`](https://github.com/mastra-ai/mastra/commit/ab48c979098ea571faf998a55d3a00e7acd7a715) Thanks [@dane-ai-mastra](https://github.com/apps/dane-ai-mastra)! - dependencies updates:
278
- - Updated dependency [`zod-to-json-schema@^3.24.6` ↗︎](https://www.npmjs.com/package/zod-to-json-schema/v/3.24.6) (from `^3.24.5`, in `dependencies`)
279
-
280
- - [#6946](https://github.com/mastra-ai/mastra/pull/6946) [`8f22a2c`](https://github.com/mastra-ai/mastra/commit/8f22a2c35a0a9ddd2f34a9c3ebb6ff6668aa9ea9) Thanks [@LekoArts](https://github.com/LekoArts)! - During package installation do not print audit, funding or any non-error logs
281
-
282
- - Updated dependencies [[`ab48c97`](https://github.com/mastra-ai/mastra/commit/ab48c979098ea571faf998a55d3a00e7acd7a715), [`3e0bd2a`](https://github.com/mastra-ai/mastra/commit/3e0bd2aa0a19823939f9a973d44791f4927ff5c3), [`ff89505`](https://github.com/mastra-ai/mastra/commit/ff895057c8c7e91a5535faef46c5e5391085ddfa), [`183dc95`](https://github.com/mastra-ai/mastra/commit/183dc95596f391b977bd1a2c050b8498dac74891), [`a1111e2`](https://github.com/mastra-ai/mastra/commit/a1111e24e705488adfe5e0a6f20c53bddf26cb22), [`61debef`](https://github.com/mastra-ai/mastra/commit/61debefd80ad3a7ed5737e19df6a23d40091689a), [`9beaeff`](https://github.com/mastra-ai/mastra/commit/9beaeffa4a97b1d5fd01a7f8af8708b16067f67c), [`ad78bfc`](https://github.com/mastra-ai/mastra/commit/ad78bfc4ea6a1fff140432bf4f638e01af7af668), [`9eee594`](https://github.com/mastra-ai/mastra/commit/9eee594e35e0ca2a650fcc33fa82009a142b9ed0), [`979912c`](https://github.com/mastra-ai/mastra/commit/979912cfd180aad53287cda08af771df26454e2c), [`7dcf4c0`](https://github.com/mastra-ai/mastra/commit/7dcf4c04f44d9345b1f8bc5d41eae3f11ac61611), [`ad78bfc`](https://github.com/mastra-ai/mastra/commit/ad78bfc4ea6a1fff140432bf4f638e01af7af668), [`48f0742`](https://github.com/mastra-ai/mastra/commit/48f0742662414610dc9a7a99d45902d059ee123d), [`12adcc8`](https://github.com/mastra-ai/mastra/commit/12adcc8929db79b3cf7b83237ebaf6ba2db0181e), [`0ce418a`](https://github.com/mastra-ai/mastra/commit/0ce418a1ccaa5e125d4483a9651b635046152569), [`8387952`](https://github.com/mastra-ai/mastra/commit/838795227b4edf758c84a2adf6f7fba206c27719), [`5eca5d2`](https://github.com/mastra-ai/mastra/commit/5eca5d2655788863ea0442a46c9ef5d3c6dbe0a8), [`8f22a2c`](https://github.com/mastra-ai/mastra/commit/8f22a2c35a0a9ddd2f34a9c3ebb6ff6668aa9ea9)]:
283
- - @mastra/core@0.15.3-alpha.4
284
- - @mastra/deployer@0.15.3-alpha.4
285
- - @mastra/mcp@0.11.3-alpha.1
286
-
287
- ## 0.11.3-alpha.1
288
-
289
- ### Patch Changes
290
-
291
- - [#7090](https://github.com/mastra-ai/mastra/pull/7090) [`e3d8fea`](https://github.com/mastra-ai/mastra/commit/e3d8feaacfb8b5c5c03c13604cc06ea2873d45fe) Thanks [@K-Mistele](https://github.com/K-Mistele)! - Support Inngest flow control features for Mastra Inngest workflows
292
-
293
- - [#7210](https://github.com/mastra-ai/mastra/pull/7210) [`87de958`](https://github.com/mastra-ai/mastra/commit/87de95832a7bdfa9ecb14473c84dc874331f1a7d) Thanks [@mfrachet](https://github.com/mfrachet)! - fix chat outline
294
-
295
- - Updated dependencies [[`aedbbfa`](https://github.com/mastra-ai/mastra/commit/aedbbfa064124ddde039111f12629daebfea7e48), [`71b657b`](https://github.com/mastra-ai/mastra/commit/71b657bffebbdcfdf1ce9c6d72003041bd6e200a), [`f643c65`](https://github.com/mastra-ai/mastra/commit/f643c651bdaf57c2343cf9dbfc499010495701fb), [`fef7375`](https://github.com/mastra-ai/mastra/commit/fef737534574f41b432a7361a285f776c3bac42b), [`6d98856`](https://github.com/mastra-ai/mastra/commit/6d98856ed7cf56cbd6c4e02b3254e3dfb1e455db), [`e3d8fea`](https://github.com/mastra-ai/mastra/commit/e3d8feaacfb8b5c5c03c13604cc06ea2873d45fe), [`3412597`](https://github.com/mastra-ai/mastra/commit/3412597a6644c0b6bf3236d6e319ed1450c5bae8)]:
296
- - @mastra/core@0.15.3-alpha.3
297
- - @mastra/deployer@0.15.3-alpha.3
298
-
299
- ## 0.11.3-alpha.0
300
-
301
301
 
302
- ... 5458 more lines hidden. See full changelog in package directory.
302
+ ... 5486 more lines hidden. See full changelog in package directory.
@@ -7,15 +7,15 @@
7
7
  },
8
8
  "dependencies": {
9
9
  "@ai-sdk/openai": "^1.3.23",
10
- "@mastra/core": "^0.16.2",
11
- "@mastra/libsql": "^0.14.0",
10
+ "@mastra/core": "^0.16.3",
11
+ "@mastra/libsql": "^0.14.1",
12
12
  "@mastra/loggers": "^0.10.11",
13
- "@mastra/memory": "^0.15.0",
13
+ "@mastra/memory": "^0.15.1",
14
14
  "zod": "^3.25.76"
15
15
  },
16
16
  "devDependencies": {
17
17
  "@types/node": "^24.3.0",
18
- "mastra": "^0.12.2",
18
+ "mastra": "^0.12.3",
19
19
  "typescript": "^5.9.2"
20
20
  }
21
21
  }
@@ -1,3 +1,8 @@
1
+ ---
2
+ title: "Memory Processors | Memory | Mastra Docs"
3
+ description: "Learn how to use memory processors in Mastra to filter, trim, and transform messages before they're sent to the language model to manage context window limits."
4
+ ---
5
+
1
6
  # Memory Processors
2
7
 
3
8
  Memory Processors allow you to modify the list of messages retrieved from memory _before_ they are added to the agent's context window and sent to the LLM. This is useful for managing context size, filtering content, and optimizing performance.
@@ -1,170 +1,218 @@
1
- import { Steps } from "nextra/components";
1
+ ---
2
+ title: "Memory Overview | Memory | Mastra Docs"
3
+ description: "Learn how Mastra's memory system works with working memory, conversation history, and semantic recall."
4
+ ---
5
+
6
+ import { Steps, Callout } from "nextra/components";
2
7
 
3
8
  # Memory overview
4
9
 
5
- Memory is how agents manage the context that's available to them, it's a condensation of all chat messages into their context window.
10
+ Memory in Mastra helps agents manage context across conversations by condensing relevant information into the language model’s context window.
11
+
12
+ Mastra supports three complementary memory systems: [working memory](./working-memory.mdx), [conversation history](#conversation-history), and [semantic recall](./semantic-recall.mdx). Together, they allow agents to track preferences, maintain conversational flow, and retrieve relevant historical messages.
13
+
14
+ To persist and recall information between conversations, memory requires a storage adapter.
15
+
16
+ Supported options include:
17
+
18
+ - [Memory with LibSQL](/examples/memory/memory-with-libsql)
19
+ - [Memory with Postgres](/examples/memory/memory-with-pg)
20
+ - [Memory with Upstash](/examples/memory/memory-with-upstash)
21
+
22
+ ## Types of memory
6
23
 
7
- ## The Context Window
24
+ All memory types are [thread-scoped](./working-memory.mdx#thread-scoped-memory-default) by default, meaning they apply only to a single conversation. [Resource-scoped](./working-memory.mdx#resource-scoped-memory) configuration allows working memory and semantic recall to persist across all threads that use the same user or entity.
8
25
 
9
- The context window is the total information visible to the language model at any given time.
10
26
 
11
- In Mastra, context is broken up into three parts: system instructions and information about the user ([working memory](./working-memory.mdx)), recent messages ([message history](#conversation-history)), and older messages that are relevant to the user’s query ([semantic recall](./semantic-recall.mdx)).
27
+ ### Working memory
12
28
 
13
- Working memory can persist at different scopes - either per conversation thread (default) or across all threads for the same user (resource-scoped), enabling persistent user profiles that remember context across conversations.
29
+ Stores persistent user-specific details such as names, preferences, goals, and other structured data. Uses [Markdown templates](./working-memory-template.mdx) or [Zod schemas](./working-memory-schema.mdx) to define structure.
14
30
 
15
- In addition, we provide [memory processors](./memory-processors.mdx) to trim context or remove information if the context is too long.
31
+ ### Conversation history
16
32
 
17
- ## Quick Start
33
+ Captures recent messages from the current conversation, providing short-term continuity and maintaining dialogue flow.
18
34
 
19
- The fastest way to see memory in action is using the built-in development playground.
35
+ ### Semantic recall
20
36
 
21
- If you haven't already, create a new Mastra project following the main [Getting Started guide](/docs/getting-started/installation).
37
+ Retrieves older messages from past conversations based on semantic relevance. Matches are retrieved using vector search and can include surrounding context for better comprehension.
22
38
 
23
- <Steps>
39
+ ## How memory works together
24
40
 
25
- ### Install the memory package
41
+ Mastra combines all memory types into a single context window. If the total exceeds the model’s token limit, use [memory processors](./memory-processors.mdx) to trim or filter messages before sending them to the model.
26
42
 
27
- ```bash npm2yarn copy
28
- npm install @mastra/memory@latest
43
+ ## Getting started
44
+
45
+ To use memory, install the required dependencies:
46
+
47
+ ```bash copy
48
+ npm install @mastra/core @mastra/memory @mastra/libsql
29
49
  ```
30
50
 
31
- ### Create an agent and attach a `Memory` instance
51
+ ### Shared storage
32
52
 
33
- ```typescript filename="src/mastra/agents/index.ts" {6-18}
34
- import { Agent } from "@mastra/core/agent";
35
- import { Memory } from "@mastra/memory";
36
- import { openai } from "@ai-sdk/openai";
53
+ To share memory across agents, add a storage adapter to the main Mastra instance. Any agent with memory enabled will use this shared storage to store and recall interactions.
54
+
55
+ ```typescript {6-8} filename="src/mastra/index.ts" showLineNumbers copy
56
+ import { Mastra } from "@mastra/core/mastra";
37
57
  import { LibSQLStore } from "@mastra/libsql";
38
58
 
39
- // Initialize memory with LibSQLStore for persistence
40
- const memory = new Memory({
59
+ export const mastra = new Mastra({
60
+ // ...
41
61
  storage: new LibSQLStore({
42
- url: "file:../mastra.db", // Or your database URL
43
- }),
44
- });
45
-
46
- export const myMemoryAgent = new Agent({
47
- name: "MemoryAgent",
48
- instructions: "...",
49
- model: openai("gpt-4o"),
50
- memory,
62
+ url: ":memory:"
63
+ })
51
64
  });
52
65
  ```
53
66
 
54
- ### Start the Development Server
55
-
56
- ```bash npm2yarn copy
57
- npm run dev
58
- ```
67
+ ### Adding working memory to agents
59
68
 
60
- ### Open the playground and select your `MemoryAgent`
69
+ Enable working memory by passing a `Memory` instance to the agent's `memory` parameter and setting `workingMemory.enabled` to `true`:
61
70
 
62
- Open the playground at [http://localhost:4111](http://localhost:4111). Send a few messages and notice that it remembers information across turns:
71
+ ```typescript {1,6-12} filename="src/mastra/agents/test-agent.ts" showLineNumbers copy
72
+ import { Memory } from "@mastra/memory";
73
+ import { Agent } from "@mastra/core/agent";
63
74
 
64
- ```
65
- ➡️ You: My favorite color is blue.
66
- ⬅️ Agent: Got it! I'll remember that your favorite color is blue.
67
- ➡️ You: What is my favorite color?
68
- ⬅️ Agent: Your favorite color is blue.
75
+ export const testAgent = new Agent({
76
+ // ..
77
+ memory: new Memory({
78
+ options: {
79
+ workingMemory: {
80
+ enabled: true
81
+ }
82
+ }
83
+ })
84
+ })
69
85
  ```
70
86
 
71
- </Steps>
87
+ ## Dedicated storage
72
88
 
73
- ## Memory Threads
89
+ Agents can be configured with their own dedicated storage, keeping tasks, conversations, and recalled information separate across agents.
74
90
 
75
- Mastra organizes memory into threads, which are records that identify specific conversation histories, using two identifiers:
91
+ ### Adding storage to agents
76
92
 
77
- 1. **`threadId`**: A globally unique conversation id (e.g., `support_123`). Thread IDs must be unique across all resources.
78
- 2. **`resourceId`**: The user or entity id that owns each thread (e.g., `user_123`, `org_456`).
93
+ To assign dedicated storage to an agent, install and import the required dependency and pass a `storage` instance to the `Memory` constructor:
79
94
 
80
- The `resourceId` is particularly important for [resource-scoped working memory](./working-memory.mdx#resource-scoped-memory), which allows memory to persist across all conversation threads for the same user.
95
+ ```typescript {3, 9-11} filename="src/mastra/agents/test-agent.ts" showLineNumbers copy
96
+ import { Memory } from "@mastra/memory";
97
+ import { Agent } from "@mastra/core/agent";
98
+ import { LibSQLStore } from "@mastra/libsql";
81
99
 
82
- ```typescript {2,3}
83
- const response = await myMemoryAgent.stream("Hello, my name is Alice.", {
84
- resourceId: "user_alice",
85
- threadId: "conversation_123",
100
+ export const testAgent = new Agent({
101
+ // ...
102
+ memory: new Memory({
103
+ // ...
104
+ storage: new LibSQLStore({
105
+ url: "file:agent-memory.db"
106
+ })
107
+ // ...
108
+ })
86
109
  });
87
110
  ```
88
111
 
89
- **Important:** without these ID's your agent will not use memory, even if memory is properly configured. The playground handles this for you, but you need to add ID's yourself when using memory in your application.
112
+ ## Memory threads
90
113
 
91
- > **Thread ID Uniqueness:** Each thread ID must be globally unique across all resources. A thread is permanently associated with the resource that created it. If you need to have similar thread names for different resources (e.g., a "general" thread for multiple users), include the resource ID in the thread ID: `${resourceId}-general` or `user_alice_general`.
114
+ Mastra organizes memory into threads, which are records that group related interactions, using two identifiers:
92
115
 
93
- ### Thread Title Generation
116
+ 1. **`thread`**: A globally unique ID representing the conversation (e.g., `support_123`). Must be unique across all resources.
117
+ 2. **`resource`**: The user or entity that owns the thread (e.g., `user_123`, `org_456`).
94
118
 
95
- Mastra can automatically generate meaningful titles for conversation threads based on the user's first message. This helps organize and identify conversations in your application UI.
119
+ The `resource` is especially important for [resource-scoped memory](./working-memory.mdx#resource-scoped-memory), which allows memory to persist across all threads associated with the same user or entity.
96
120
 
97
- ```typescript {3-7}
98
- const memory = new Memory({
99
- options: {
100
- threads: {
101
- generateTitle: true, // Enable automatic title generation
102
- },
103
- },
121
+ ```typescript {4} showLineNumbers
122
+ const stream = await agent.stream("message for agent", {
123
+ memory: {
124
+ thread: "user-123",
125
+ resource: "test-123"
126
+ }
104
127
  });
105
128
  ```
106
129
 
107
- By default, title generation uses the same model and default instructions as your agent. For customization or cost optimization, you can specify a different model or provide custom instructions specifically for title generation:
130
+ <Callout type="warning">
131
+ Even with memory configured, agents won’t store or recall information unless both `thread` and `resource` are provided.
132
+ </Callout>
108
133
 
109
- ```typescript {5-7}
110
- const memory = new Memory({
111
- options: {
112
- threads: {
113
- generateTitle: {
114
- model: openai("gpt-4.1-nano"), // Use cheaper model for titles
115
- instructions: "Generate a concise title for this conversation based on the first user message.",
116
- },
134
+ > Mastra Playground sets `thread` and `resource` IDs automatically. In your own application, you must provide them manually as part of each `.generate()` or `.stream()` call.
135
+
136
+ ### Thread title generation
137
+
138
+ Mastra can automatically generate descriptive thread titles based on the user's first message. Enable this by setting `generateTitle` to `true`. This improves organization and makes it easier to display conversations in your UI.
139
+
140
+ ```typescript {3-7} showLineNumbers
141
+ export const testAgent = new Agent({
142
+ memory: new Memory({
143
+ options: {
144
+ threads: {
145
+ generateTitle: true,
146
+ }
117
147
  },
118
- },
148
+ })
119
149
  });
120
150
  ```
121
151
 
122
- Title generation happens asynchronously after the agent responds, so it doesn't impact response time. See the [full configuration reference](../../reference/memory/Memory.mdx#thread-title-generation) for more details and examples.
152
+ > Title generation runs asynchronously after the agent responds and does not affect response time. See the [full configuration reference](../../reference/memory/Memory.mdx#thread-title-generation) for details and examples.
123
153
 
124
- ## Conversation History
154
+ #### Optimizing title generation
125
155
 
126
- By default, the `Memory` instance includes the [last 10 messages](../../reference/memory/Memory.mdx) from the current Memory thread in each new request. This provides the agent with immediate conversational context.
156
+ Titles are generated using your agent's model by default. To optimize cost or behavior, provide a smaller `model` and custom `instructions`. This keeps title generation separate from main conversation logic.
127
157
 
128
- ```ts {3}
129
- const memory = new Memory({
130
- options: {
131
- lastMessages: 10,
132
- },
158
+ ```typescript {5-9} showLineNumbers
159
+ export const testAgent = new Agent({
160
+ // ...
161
+ memory: new Memory({
162
+ options: {
163
+ threads: {
164
+ generateTitle: {
165
+ model: openai("gpt-4.1-nano"),
166
+ instructions: "Generate a concise title based on the user's first message",
167
+ },
168
+ },
169
+ }
170
+ })
133
171
  });
134
172
  ```
135
173
 
136
- **Important:** Only send the newest user message in each agent call. Mastra handles retrieving and injecting the necessary history. Sending the full history yourself will cause duplication. See the [AI SDK Memory Example](../../docs/frameworks/agentic-uis/ai-sdk.mdx) for how to handle this with when using the `useChat` frontend hooks.
174
+ #### Dynamic model selection and instructions
137
175
 
138
- ### Storage Configuration
176
+ You can configure thread title generation dynamically by passing functions to `model` and `instructions`. These functions receive the `runtimeContext` object, allowing you to adapt title generation based on user-specific values.
139
177
 
140
- Conversation history relies on a [storage adapter](/reference/memory/Memory#parameters) to store messages.
141
- By default it uses the same storage provided to the [main Mastra instance](https://mastra.ai/reference/core/mastra-class#initialization)
178
+ ```typescript {7-16} showLineNumbers
179
+ export const testAgent = new Agent({
180
+ // ...
181
+ memory: new Memory({
182
+ options: {
183
+ threads: {
184
+ generateTitle: {
185
+ model: ({ runtimeContext }) => {
186
+ const userTier = runtimeContext.get("userTier");
187
+ return userTier === "premium" ? openai("gpt-4.1") : openai("gpt-4.1-nano");
188
+ },
189
+ instructions: ({ runtimeContext }) => {
190
+ const language = runtimeContext.get("userLanguage") || "English";
191
+ return `Generate a concise, engaging title in ${language} based on the user's first message.`;
192
+ }
193
+ }
194
+ }
195
+ }
196
+ })
197
+ });
198
+ ```
142
199
 
143
- If neither the `Memory` instance nor the `Mastra` object specify a storage provider, Mastra will not persist memory data across application restarts or deployments. For any deployment beyond local testing you should provide your own storage configuration either on `Mastra` or directly within `new Memory()`.
200
+ ## Increasing conversation history
144
201
 
145
- When `storage` **is** given on the `Mastra` instance it will automatically be used by every `Memory` attached to agents. In that case you do not need to pass `storage` to `new Memory()` unless you want a per-agent override.
202
+ By default, each request includes the last 10 messages from the current memory thread, giving the agent short-term conversational context. This limit can be increased using the `lastMessages` parameter.
146
203
 
147
- ```ts {7-9}
148
- import { Memory } from "@mastra/memory";
149
- import { Agent } from "@mastra/core/agent";
150
- import { LibSQLStore } from "@mastra/libsql";
151
-
152
- const agent = new Agent({
204
+ ```typescript {3-7} showLineNumbers
205
+ export const testAgent = new Agent({
206
+ // ...
153
207
  memory: new Memory({
154
- storage: new LibSQLStore({
155
- url: "file:./local.db",
156
- }),
157
- }),
208
+ options: {
209
+ lastMessages: 100
210
+ },
211
+ })
158
212
  });
159
213
  ```
160
214
 
161
- **Storage code Examples**:
162
-
163
- - [LibSQL](/examples/memory/memory-with-libsql)
164
- - [Postgres](/examples/memory/memory-with-pg)
165
- - [Upstash](/examples/memory/memory-with-upstash)
166
-
167
- ## Viewing Retrieved Messages
215
+ ## Viewing retrieved messages
168
216
 
169
217
  If tracing is enabled in your Mastra deployment and memory is configured either with `lastMessages` and/or `semanticRecall`, the agent’s trace output will show all messages retrieved for context—including both recent conversation history and messages recalled via semantic recall.
170
218
 
@@ -172,6 +220,13 @@ This is helpful for debugging, understanding agent decisions, and verifying that
172
220
 
173
221
  For more details on enabling and configuring tracing, see [Tracing](../observability/tracing).
174
222
 
223
+ ## Local development with LibSQL
224
+
225
+ For local development with `LibSQLStore`, you can inspect stored memory using the [SQLite Viewer](https://marketplace.visualstudio.com/items?itemName=qwtel.sqlite-viewer) extension in VS Code.
226
+
227
+ ![SQLite Viewer](/image/memory/memory-sqlite-viewer.jpg)
228
+
229
+
175
230
  ## Next Steps
176
231
 
177
232
  Now that you understand the core concepts, continue to [semantic recall](./semantic-recall.mdx) to learn how to add RAG memory to your Mastra agents.
@@ -1,3 +1,8 @@
1
+ ---
2
+ title: "Semantic Recall | Memory | Mastra Docs"
3
+ description: "Learn how to use semantic recall in Mastra to retrieve relevant messages from past conversations using vector search and embeddings."
4
+ ---
5
+
1
6
  # Semantic Recall
2
7
 
3
8
  If you ask your friend what they did last weekend, they will search in their memory for events associated with "last weekend" and then tell you what they did. That's sort of like how semantic recall works in Mastra.
@@ -1,3 +1,8 @@
1
+ ---
2
+ title: "Working Memory | Memory | Mastra Docs"
3
+ description: "Learn how to configure working memory in Mastra to store persistent user data, preferences."
4
+ ---
5
+
1
6
  import YouTube from "@/components/youtube";
2
7
 
3
8
  # Working Memory
@@ -54,6 +54,12 @@ export const agent = new Agent({
54
54
  isOptional: false,
55
55
  description: "The language model used by the agent. Can be provided statically or resolved at runtime.",
56
56
  },
57
+ {
58
+ name: "agents",
59
+ type: "Record<string, Agent> | ({ runtimeContext: RuntimeContext }) => Record<string, Agent> | Promise<Record<string, Agent>>",
60
+ isOptional: true,
61
+ description: "Sub-Agents that the agent can access. Can be provided statically or resolved dynamically.",
62
+ },
57
63
  {
58
64
  name: "tools",
59
65
  type: "ToolsInput | ({ runtimeContext: RuntimeContext }) => ToolsInput | Promise<ToolsInput>",
@@ -224,7 +224,8 @@ const aiSdkResult = await agent.generateVNext("message for agent", {
224
224
  name: "output",
225
225
  type: "Zod schema | JsonSchema7",
226
226
  isOptional: true,
227
- description: "Schema for structured output generation (Zod schema or JSON Schema).",
227
+ description:
228
+ "**Deprecated.** Use structuredOutput with maxSteps:1 to achieve the same thing. Defines the expected structure of the output. Can be a JSON Schema object or a Zod schema.",
228
229
  },
229
230
  {
230
231
  name: "memory",
@@ -0,0 +1,68 @@
1
+ ---
2
+ title: "Reference: Agent.listAgents() | Agents | Mastra Docs"
3
+ description: "Documentation for the `Agent.listAgents()` method in Mastra agents, which retrieves the sub-agents that the agent can access."
4
+ ---
5
+
6
+ # Agent.listAgents()
7
+
8
+ The `.listAgents()` method retrieves the sub-agents configured for an agent, resolving them if they're a function. These sub-agents enable the agent to access other agents and perform complex actions.
9
+
10
+ ## Usage example
11
+
12
+ ```typescript copy
13
+ await agent.listAgents();
14
+ ```
15
+
16
+ ## Parameters
17
+
18
+ <PropertiesTable
19
+ content={[
20
+ {
21
+ name: "options",
22
+ type: "{ runtimeContext?: RuntimeContext }",
23
+ isOptional: true,
24
+ defaultValue: "{}",
25
+ description: "Optional configuration object containing runtime context.",
26
+ },
27
+ ]}
28
+ />
29
+
30
+ ## Returns
31
+
32
+ <PropertiesTable
33
+ content={[
34
+ {
35
+ name: "agents",
36
+ type: "Promise<Record<string, Agent>>",
37
+ description: "A promise that resolves to a record of agent names to their corresponding Agent instances.",
38
+ },
39
+ ]}
40
+ />
41
+
42
+ ## Extended usage example
43
+
44
+ ```typescript copy
45
+ import { RuntimeContext } from "@mastra/core/runtime-context";
46
+
47
+ await agent.listAgents({
48
+ runtimeContext: new RuntimeContext()
49
+ });
50
+ ```
51
+
52
+ ### Options parameters
53
+
54
+ <PropertiesTable
55
+ content={[
56
+ {
57
+ name: "runtimeContext",
58
+ type: "RuntimeContext",
59
+ isOptional: true,
60
+ defaultValue: "new RuntimeContext()",
61
+ description: "Runtime context for dependency injection and contextual information.",
62
+ },
63
+ ]}
64
+ />
65
+
66
+ ## Related
67
+
68
+ - [Agents overview](../../docs/agents/overview.mdx)
@@ -226,7 +226,7 @@ const aiSdkStream = await agent.streamVNext("message for agent", {
226
226
  type: "Zod schema | JsonSchema7",
227
227
  isOptional: true,
228
228
  description:
229
- "Defines the expected structure of the output. Can be a JSON Schema object or a Zod schema.",
229
+ "**Deprecated.** Use structuredOutput with maxSteps:1 to achieve the same thing. Defines the expected structure of the output. Can be a JSON Schema object or a Zod schema.",
230
230
  },
231
231
  {
232
232
  name: "memory",
@@ -2,6 +2,7 @@
2
2
  title: Mastra Client Agents API
3
3
  description: Learn how to interact with Mastra AI agents, including generating responses, streaming interactions, and managing agent tools using the client-js SDK.
4
4
  ---
5
+ import { StreamVNextCallout } from "@/components/streamVNext-callout.tsx"
5
6
 
6
7
  # Agents API
7
8
 
@@ -105,7 +106,7 @@ Client-side tools allow you to execute custom functions on the client side when
105
106
  #### Basic Usage
106
107
 
107
108
  ```typescript
108
- import { createTool } from '@mastra/core/tools';
109
+ import { createTool } from '@mastra/client-js';
109
110
  import { z } from 'zod';
110
111
 
111
112
  const colorChangeTool = createTool({
@@ -158,3 +159,55 @@ const evals = await agent.evals();
158
159
  // Get live evaluations
159
160
  const liveEvals = await agent.liveEvals();
160
161
  ```
162
+
163
+
164
+ ### Stream VNext (Experimental)
165
+
166
+ <StreamVNextCallout />
167
+
168
+ Stream responses using the enhanced VNext API with improved method signatures. This method provides enhanced capabilities and format flexibility, with support for both Mastra's native format and AI SDK v5 compatibility:
169
+
170
+ ```typescript
171
+ const response = await agent.streamVNext(
172
+ "Tell me a story",
173
+ {
174
+ format: 'mastra', // Default: Mastra's native format
175
+ threadId: "thread-1",
176
+ clientTools: { colorChangeTool },
177
+ }
178
+ );
179
+
180
+ // AI SDK v5 compatible format
181
+ const response = await agent.streamVNext(
182
+ "Tell me a story",
183
+ {
184
+ format: 'aisdk', // Enable AI SDK v5 compatibility
185
+ threadId: "thread-1",
186
+ }
187
+ );
188
+
189
+ // Process the stream
190
+ response.processDataStream({
191
+ onChunk: (chunk) => {
192
+ console.log(chunk);
193
+ },
194
+ });
195
+ ```
196
+
197
+ The `format` parameter determines the output stream format:
198
+ - `'mastra'` (default): Returns Mastra's native format
199
+ - `'aisdk'`: Returns AI SDK v5 compatible format for frontend integration
200
+
201
+ ### Generate VNext (Experimental)
202
+
203
+ Generate a response using the enhanced VNext API with improved method signatures and AI SDK v5 compatibility:
204
+
205
+ ```typescript
206
+ const response = await agent.generateVNext(
207
+ "Hello, how are you?",
208
+ {
209
+ threadId: "thread-1",
210
+ resourceid: "resource-1",
211
+ }
212
+ );
213
+ ```