@mastra/memory 1.0.0-beta.1 → 1.0.0-beta.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. package/CHANGELOG.md +355 -0
  2. package/dist/_types/@internal_ai-sdk-v4/dist/index.d.ts +7549 -0
  3. package/dist/chunk-DGUM43GV.js +10 -0
  4. package/dist/chunk-DGUM43GV.js.map +1 -0
  5. package/dist/chunk-JEQ2X3Z6.cjs +12 -0
  6. package/dist/chunk-JEQ2X3Z6.cjs.map +1 -0
  7. package/dist/chunk-KMQS2YEC.js +79 -0
  8. package/dist/chunk-KMQS2YEC.js.map +1 -0
  9. package/dist/chunk-MMUHFOCG.js +79 -0
  10. package/dist/chunk-MMUHFOCG.js.map +1 -0
  11. package/dist/chunk-QY6BZOPJ.js +250 -0
  12. package/dist/chunk-QY6BZOPJ.js.map +1 -0
  13. package/dist/chunk-SG3GRV3O.cjs +84 -0
  14. package/dist/chunk-SG3GRV3O.cjs.map +1 -0
  15. package/dist/chunk-W72AYUIF.cjs +252 -0
  16. package/dist/chunk-W72AYUIF.cjs.map +1 -0
  17. package/dist/chunk-WC4XBMZT.js +250 -0
  18. package/dist/chunk-WC4XBMZT.js.map +1 -0
  19. package/dist/chunk-YMNW6DEN.cjs +252 -0
  20. package/dist/chunk-YMNW6DEN.cjs.map +1 -0
  21. package/dist/chunk-ZUQPUTTO.cjs +84 -0
  22. package/dist/chunk-ZUQPUTTO.cjs.map +1 -0
  23. package/dist/docs/README.md +36 -0
  24. package/dist/docs/SKILL.md +42 -0
  25. package/dist/docs/SOURCE_MAP.json +31 -0
  26. package/dist/docs/agents/01-agent-memory.md +160 -0
  27. package/dist/docs/agents/02-networks.md +236 -0
  28. package/dist/docs/agents/03-agent-approval.md +317 -0
  29. package/dist/docs/core/01-reference.md +114 -0
  30. package/dist/docs/memory/01-overview.md +76 -0
  31. package/dist/docs/memory/02-storage.md +181 -0
  32. package/dist/docs/memory/03-working-memory.md +386 -0
  33. package/dist/docs/memory/04-semantic-recall.md +235 -0
  34. package/dist/docs/memory/05-memory-processors.md +319 -0
  35. package/dist/docs/memory/06-reference.md +617 -0
  36. package/dist/docs/processors/01-reference.md +81 -0
  37. package/dist/docs/storage/01-reference.md +972 -0
  38. package/dist/docs/vectors/01-reference.md +929 -0
  39. package/dist/index.cjs +14845 -115
  40. package/dist/index.cjs.map +1 -1
  41. package/dist/index.d.ts +145 -5
  42. package/dist/index.d.ts.map +1 -1
  43. package/dist/index.js +14807 -119
  44. package/dist/index.js.map +1 -1
  45. package/dist/token-6GSAFR2W-JV3TZR4M.cjs +63 -0
  46. package/dist/token-6GSAFR2W-JV3TZR4M.cjs.map +1 -0
  47. package/dist/token-6GSAFR2W-K2BTU23I.js +61 -0
  48. package/dist/token-6GSAFR2W-K2BTU23I.js.map +1 -0
  49. package/dist/token-6GSAFR2W-VLY2XUPA.js +61 -0
  50. package/dist/token-6GSAFR2W-VLY2XUPA.js.map +1 -0
  51. package/dist/token-6GSAFR2W-YCB5SK2Z.cjs +63 -0
  52. package/dist/token-6GSAFR2W-YCB5SK2Z.cjs.map +1 -0
  53. package/dist/token-util-NEHG7TUY-7IL6JUVY.cjs +10 -0
  54. package/dist/token-util-NEHG7TUY-7IL6JUVY.cjs.map +1 -0
  55. package/dist/token-util-NEHG7TUY-HF7KBP2H.cjs +10 -0
  56. package/dist/token-util-NEHG7TUY-HF7KBP2H.cjs.map +1 -0
  57. package/dist/token-util-NEHG7TUY-KSXDO2NO.js +8 -0
  58. package/dist/token-util-NEHG7TUY-KSXDO2NO.js.map +1 -0
  59. package/dist/token-util-NEHG7TUY-TIJ3LMSH.js +8 -0
  60. package/dist/token-util-NEHG7TUY-TIJ3LMSH.js.map +1 -0
  61. package/dist/tools/working-memory.d.ts +10 -2
  62. package/dist/tools/working-memory.d.ts.map +1 -1
  63. package/package.json +19 -25
  64. package/dist/processors/index.cjs +0 -165
  65. package/dist/processors/index.cjs.map +0 -1
  66. package/dist/processors/index.d.ts +0 -3
  67. package/dist/processors/index.d.ts.map +0 -1
  68. package/dist/processors/index.js +0 -158
  69. package/dist/processors/index.js.map +0 -1
  70. package/dist/processors/token-limiter.d.ts +0 -32
  71. package/dist/processors/token-limiter.d.ts.map +0 -1
  72. package/dist/processors/tool-call-filter.d.ts +0 -20
  73. package/dist/processors/tool-call-filter.d.ts.map +0 -1
@@ -0,0 +1,76 @@
1
+ > Learn how Mastra
2
+
3
+ # Memory
4
+
5
+ Memory gives your agent coherence across interactions and allows it to improve over time by retaining relevant information from past conversations.
6
+
7
+ Mastra requires a [storage provider](./storage) to persist memory and supports three types:
8
+
9
+ - [**Message history**](https://mastra.ai/docs/v1/memory/message-history) captures recent messages from the current conversation, providing short-term continuity and maintaining dialogue flow.
10
+ - [**Working memory**](https://mastra.ai/docs/v1/memory/working-memory) stores persistent user-specific details such as names, preferences, goals, and other structured data.
11
+ - [**Semantic recall**](https://mastra.ai/docs/v1/memory/semantic-recall) retrieves older messages from past conversations based on semantic relevance. Matches are retrieved using vector search and can include surrounding context for better comprehension.
12
+
13
+ You can enable any combination of these memory types. Mastra assembles the relevant memories into the model’s context window. If the total exceeds the model's token limit, use [memory processors](https://mastra.ai/docs/v1/memory/memory-processors) to trim or filter messages before sending them to the model.
14
+
15
+ ## Getting started
16
+
17
+ Install Mastra's memory module and the storage adapter for your preferred database (see the storage section below):
18
+
19
+ ```bash
20
+ npm install @mastra/memory@beta @mastra/libsql@beta
21
+ ```
22
+
23
+ Add the storage adapter to the main Mastra instance:
24
+
25
+ ```typescript title="src/mastra/index.ts"
26
+ import { Mastra } from "@mastra/core";
27
+ import { LibSQLStore } from "@mastra/libsql";
28
+
29
+ export const mastra = new Mastra({
30
+ storage: new LibSQLStore({
31
+ id: 'mastra-storage',
32
+ url: ":memory:",
33
+ }),
34
+ });
35
+ ```
36
+
37
+ Enable memory by passing a `Memory` instance to your agent:
38
+
39
+ ```typescript title="src/mastra/agents/test-agent.ts"
40
+ import { Memory } from "@mastra/memory";
41
+ import { Agent } from "@mastra/core/agent";
42
+
43
+ export const testAgent = new Agent({
44
+ id: "test-agent",
45
+ memory: new Memory({
46
+ options: {
47
+ lastMessages: 20,
48
+ },
49
+ }),
50
+ });
51
+ ```
52
+ When you send a new message, the model can now "see" the previous 20 messages, which gives it better context for the conversation and leads to more coherent, accurate replies.
53
+
54
+ This example configures basic [message history](https://mastra.ai/docs/v1/memory/message-history). You can also enable [working memory](https://mastra.ai/docs/v1/memory/working-memory) and [semantic recall](https://mastra.ai/docs/v1/memory/semantic-recall) by passing additional options to `Memory`.
55
+
56
+ ## Storage
57
+
58
+ Before enabling memory, you must first configure a storage adapter. Mastra supports multiple database providers including PostgreSQL, MongoDB, libSQL, and more.
59
+
60
+ Storage can be configured at the instance level (shared across all agents) or at the agent level (dedicated per agent). You can also use different databases for storage and vector operations.
61
+
62
+ See the [Storage](https://mastra.ai/docs/v1/memory/storage) documentation for configuration options, supported providers, and examples.
63
+
64
+ ## Debugging memory
65
+
66
+ When tracing is enabled, you can inspect exactly which messages the agent uses for context in each request. The trace output shows all memory included in the agent's context window - both recent message history and messages recalled via semantic recall.
67
+
68
+ This visibility helps you understand why an agent made specific decisions and verify that memory retrieval is working as expected.
69
+
70
+ For more details on enabling and configuring tracing, see [Tracing](https://mastra.ai/docs/v1/observability/tracing/overview).
71
+
72
+ ## Next Steps
73
+
74
+ - Learn more about [Storage](https://mastra.ai/docs/v1/memory/storage) providers and configuration options
75
+ - Add [Message History](https://mastra.ai/docs/v1/memory/message-history), [Working Memory](https://mastra.ai/docs/v1/memory/working-memory), or [Semantic Recall](https://mastra.ai/docs/v1/memory/semantic-recall)
76
+ - Visit [Memory configuration reference](https://mastra.ai/reference/v1/memory/memory-class) for all available options
@@ -0,0 +1,181 @@
1
+ > Configure storage for Mastra
2
+
3
+ # Storage
4
+
5
+ For Mastra to remember previous interactions, you must configure a storage adapter. Mastra is designed to work with your preferred database provider - choose from the [supported providers](#supported-providers) and pass it to your Mastra instance.
6
+
7
+ ```typescript
8
+ import { Mastra } from "@mastra/core";
9
+ import { LibSQLStore } from "@mastra/libsql";
10
+
11
+ const mastra = new Mastra({
12
+ storage: new LibSQLStore({
13
+ id: 'mastra-storage',
14
+ url: "file:./mastra.db",
15
+ }),
16
+ });
17
+ ```
18
+ On first interaction, Mastra automatically creates the necessary tables following the [core schema](https://mastra.ai/reference/v1/storage/overview#core-schema). This includes tables for messages, threads, resources, workflows, traces, and evaluation datasets.
19
+
20
+ ## Supported Providers
21
+
22
+ Each provider page includes installation instructions, configuration parameters, and usage examples:
23
+
24
+ - [libSQL Storage](https://mastra.ai/reference/v1/storage/libsql)
25
+ - [PostgreSQL Storage](https://mastra.ai/reference/v1/storage/postgresql)
26
+ - [MongoDB Storage](https://mastra.ai/reference/v1/storage/mongodb)
27
+ - [Upstash Storage](https://mastra.ai/reference/v1/storage/upstash)
28
+ - [Cloudflare D1](https://mastra.ai/reference/v1/storage/cloudflare-d1)
29
+ - [Cloudflare Durable Objects](https://mastra.ai/reference/v1/storage/cloudflare)
30
+ - [Convex](https://mastra.ai/reference/v1/storage/convex)
31
+ - [DynamoDB](https://mastra.ai/reference/v1/storage/dynamodb)
32
+ - [LanceDB](https://mastra.ai/reference/v1/storage/lance)
33
+ - [Microsoft SQL Server](https://mastra.ai/reference/v1/storage/mssql)
34
+
35
+ > **Note:**
36
+ libSQL is the easiest way to get started because it doesn’t require running a separate database server
37
+
38
+ ## Configuration Scope
39
+
40
+ You can configure storage at two different scopes:
41
+
42
+ ### Instance-level storage
43
+
44
+ Add storage to your Mastra instance so all agents share the same memory provider:
45
+
46
+ ```typescript
47
+ import { Mastra } from "@mastra/core";
48
+ import { PostgresStore } from "@mastra/pg";
49
+
50
+ const mastra = new Mastra({
51
+ storage: new PostgresStore({
52
+ id: 'mastra-storage',
53
+ connectionString: process.env.DATABASE_URL,
54
+ }),
55
+ });
56
+
57
+ // All agents automatically use this storage
58
+ const agent1 = new Agent({ memory: new Memory() });
59
+ const agent2 = new Agent({ memory: new Memory() });
60
+ ```
61
+
62
+ ### Agent-level storage
63
+
64
+ Add storage to a specific agent when you need data boundaries or compliance requirements:
65
+
66
+ ```typescript
67
+ import { Agent } from "@mastra/core/agent";
68
+ import { Memory } from "@mastra/memory";
69
+ import { PostgresStore } from "@mastra/pg";
70
+
71
+ const agent = new Agent({
72
+ memory: new Memory({
73
+ storage: new PostgresStore({
74
+ id: 'agent-storage',
75
+ connectionString: process.env.AGENT_DATABASE_URL,
76
+ }),
77
+ }),
78
+ });
79
+ ```
80
+
81
+ This is useful when different agents need to store data in separate databases for security, compliance, or organizational reasons.
82
+
83
+ ## Threads and Resources
84
+
85
+ Mastra organizes memory into threads using two identifiers:
86
+
87
+ - **Thread**: A conversation session containing a sequence of messages (e.g., `convo_123`)
88
+ - **Resource**: An identifier for the entity the thread belongs to, typically a user (e.g., `user_123`)
89
+
90
+ Both identifiers are required for agents to store and recall information:
91
+
92
+ ```typescript
93
+ const stream = await agent.stream("message for agent", {
94
+ memory: {
95
+ thread: "convo_123",
96
+ resource: "user_123",
97
+ },
98
+ });
99
+ ```
100
+
101
+ > **Note:**
102
+ [Studio](https://mastra.ai/docs/v1/getting-started/studio) automatically generates a thread and resource ID for you. Remember to to pass these explicitly when calling `stream` or `generate` yourself.
103
+
104
+ ### Thread title generation
105
+
106
+ Mastra can automatically generate descriptive thread titles based on the user's first message.
107
+
108
+ Use this option when implementing a ChatGPT-style chat interface to render a title alongside each thread in the conversation list (for example, in a sidebar) derived from the thread’s initial user message.
109
+
110
+ ```typescript
111
+ export const testAgent = new Agent({
112
+ memory: new Memory({
113
+ options: {
114
+ generateTitle: true,
115
+ },
116
+ }),
117
+ });
118
+ ```
119
+
120
+ Title generation runs asynchronously after the agent responds and does not affect response time.
121
+
122
+ To optimize cost or behavior, provide a smaller `model` and custom `instructions`:
123
+
124
+ ```typescript
125
+ export const testAgent = new Agent({
126
+ memory: new Memory({
127
+ options: {
128
+ threads: {
129
+ generateTitle: {
130
+ model: "openai/gpt-4o-mini",
131
+ instructions: "Generate a concise title based on the user's first message",
132
+ },
133
+ },
134
+ },
135
+ }),
136
+ });
137
+ ```
138
+
139
+ ## Semantic recall
140
+
141
+ Semantic recall uses vector embeddings to retrieve relevant past messages based on meaning rather than recency. This requires a vector database instance, which can be configured at the instance or agent level.
142
+
143
+ The vector database doesn't have to be the same as your storage provider. For example, you might use PostgreSQL for storage and Pinecone for vectors:
144
+
145
+ ```typescript
146
+ import { Mastra } from "@mastra/core";
147
+ import { Agent } from "@mastra/core/agent";
148
+ import { Memory } from "@mastra/memory";
149
+ import { PostgresStore } from "@mastra/pg";
150
+ import { PineconeVector } from "@mastra/pinecone";
151
+
152
+ // Instance-level vector configuration
153
+ const mastra = new Mastra({
154
+ storage: new PostgresStore({
155
+ id: 'mastra-storage',
156
+ connectionString: process.env.DATABASE_URL,
157
+ }),
158
+ });
159
+
160
+ // Agent-level vector configuration
161
+ const agent = new Agent({
162
+ memory: new Memory({
163
+ vector: new PineconeVector({
164
+ id: 'agent-vector',
165
+ apiKey: process.env.PINECONE_API_KEY,
166
+ environment: process.env.PINECONE_ENVIRONMENT,
167
+ indexName: 'agent-embeddings',
168
+ }),
169
+ options: {
170
+ semanticRecall: {
171
+ topK: 5,
172
+ messageRange: 2,
173
+ },
174
+ },
175
+ }),
176
+ });
177
+ ```
178
+
179
+ We support all popular vector providers including [Pinecone](https://mastra.ai/reference/v1/vectors/pinecone), [Chroma](https://mastra.ai/reference/v1/vectors/chroma), [Qdrant](https://mastra.ai/reference/v1/vectors/qdrant), and many more.
180
+
181
+ For more information on configuring semantic recall, see the [Semantic Recall](./semantic-recall) documentation.
@@ -0,0 +1,386 @@
1
+ > Learn how to configure working memory in Mastra to store persistent user data, preferences.
2
+
3
+ # Working Memory
4
+
5
+ While [message history](https://mastra.ai/docs/v1/memory/message-history) and [semantic recall](./semantic-recall) help agents remember conversations, working memory allows them to maintain persistent information about users across interactions.
6
+
7
+ Think of it as the agent's active thoughts or scratchpad – the key information they keep available about the user or task. It's similar to how a person would naturally remember someone's name, preferences, or important details during a conversation.
8
+
9
+ This is useful for maintaining ongoing state that's always relevant and should always be available to the agent.
10
+
11
+ Working memory can persist at two different scopes:
12
+
13
+ - **Resource-scoped** (default): Memory persists across all conversation threads for the same user
14
+ - **Thread-scoped**: Memory is isolated per conversation thread
15
+
16
+ **Important:** Switching between scopes means the agent won't see memory from the other scope - thread-scoped memory is completely separate from resource-scoped memory.
17
+
18
+ ## Quick Start
19
+
20
+ Here's a minimal example of setting up an agent with working memory:
21
+
22
+ ```typescript {11-15}
23
+ import { Agent } from "@mastra/core/agent";
24
+ import { Memory } from "@mastra/memory";
25
+
26
+ // Create agent with working memory enabled
27
+ const agent = new Agent({
28
+ id: "personal-assistant",
29
+ name: "PersonalAssistant",
30
+ instructions: "You are a helpful personal assistant.",
31
+ model: "openai/gpt-5.1",
32
+ memory: new Memory({
33
+ options: {
34
+ workingMemory: {
35
+ enabled: true,
36
+ },
37
+ },
38
+ }),
39
+ });
40
+ ```
41
+
42
+ ## How it Works
43
+
44
+ Working memory is a block of Markdown text that the agent is able to update over time to store continuously relevant information:
45
+
46
+ <YouTube id="UMy_JHLf1n8" />
47
+
48
+ ## Memory Persistence Scopes
49
+
50
+ Working memory can operate in two different scopes, allowing you to choose how memory persists across conversations:
51
+
52
+ ### Resource-Scoped Memory (Default)
53
+
54
+ By default, working memory persists across all conversation threads for the same user (resourceId), enabling persistent user memory:
55
+
56
+ ```typescript
57
+ const memory = new Memory({
58
+ storage,
59
+ options: {
60
+ workingMemory: {
61
+ enabled: true,
62
+ scope: "resource", // Memory persists across all user threads
63
+ template: `# User Profile
64
+ - **Name**:
65
+ - **Location**:
66
+ - **Interests**:
67
+ - **Preferences**:
68
+ - **Long-term Goals**:
69
+ `,
70
+ },
71
+ },
72
+ });
73
+ ```
74
+
75
+ **Use cases:**
76
+
77
+ - Personal assistants that remember user preferences
78
+ - Customer service bots that maintain customer context
79
+ - Educational applications that track student progress
80
+
81
+ ### Usage with Agents
82
+
83
+ When using resource-scoped memory, make sure to pass the `resourceId` parameter:
84
+
85
+ ```typescript
86
+ // Resource-scoped memory requires resourceId
87
+ const response = await agent.generate("Hello!", {
88
+ threadId: "conversation-123",
89
+ resourceId: "user-alice-456", // Same user across different threads
90
+ });
91
+ ```
92
+
93
+ ### Thread-Scoped Memory
94
+
95
+ Thread-scoped memory isolates working memory to individual conversation threads. Each thread maintains its own isolated memory:
96
+
97
+ ```typescript
98
+ const memory = new Memory({
99
+ storage,
100
+ options: {
101
+ workingMemory: {
102
+ enabled: true,
103
+ scope: "thread", // Memory is isolated per thread
104
+ template: `# User Profile
105
+ - **Name**:
106
+ - **Interests**:
107
+ - **Current Goal**:
108
+ `,
109
+ },
110
+ },
111
+ });
112
+ ```
113
+
114
+ **Use cases:**
115
+
116
+ - Different conversations about separate topics
117
+ - Temporary or session-specific information
118
+ - Workflows where each thread needs working memory but threads are ephemeral and not related to each other
119
+
120
+ ## Storage Adapter Support
121
+
122
+ Resource-scoped working memory requires specific storage adapters that support the `mastra_resources` table:
123
+
124
+ ### Supported Storage Adapters
125
+
126
+ - **libSQL** (`@mastra/libsql`)
127
+ - **PostgreSQL** (`@mastra/pg`)
128
+ - **Upstash** (`@mastra/upstash`)
129
+ - **MongoDB** (`@mastra/mongodb`)
130
+
131
+ ## Custom Templates
132
+
133
+ Templates guide the agent on what information to track and update in working memory. While a default template is used if none is provided, you'll typically want to define a custom template tailored to your agent's specific use case to ensure it remembers the most relevant information.
134
+
135
+ Here's an example of a custom template. In this example the agent will store the users name, location, timezone, etc as soon as the user sends a message containing any of the info:
136
+
137
+ ```typescript {5-28}
138
+ const memory = new Memory({
139
+ options: {
140
+ workingMemory: {
141
+ enabled: true,
142
+ template: `
143
+ # User Profile
144
+
145
+ ## Personal Info
146
+
147
+ - Name:
148
+ - Location:
149
+ - Timezone:
150
+
151
+ ## Preferences
152
+
153
+ - Communication Style: [e.g., Formal, Casual]
154
+ - Project Goal:
155
+ - Key Deadlines:
156
+ - [Deadline 1]: [Date]
157
+ - [Deadline 2]: [Date]
158
+
159
+ ## Session State
160
+
161
+ - Last Task Discussed:
162
+ - Open Questions:
163
+ - [Question 1]
164
+ - [Question 2]
165
+ `,
166
+ },
167
+ },
168
+ });
169
+ ```
170
+
171
+ ## Designing Effective Templates
172
+
173
+ A well-structured template keeps the information easy for the agent to parse and update. Treat the
174
+ template as a short form that you want the assistant to keep up to date.
175
+
176
+ - **Short, focused labels.** Avoid paragraphs or very long headings. Keep labels brief (for example
177
+ `## Personal Info` or `- Name:`) so updates are easy to read and less likely to be truncated.
178
+ - **Use consistent casing.** Inconsistent capitalization (`Timezone:` vs `timezone:`) can cause messy
179
+ updates. Stick to Title Case or lower case for headings and bullet labels.
180
+ - **Keep placeholder text simple.** Use hints such as `[e.g., Formal]` or `[Date]` to help the LLM
181
+ fill in the correct spots.
182
+ - **Abbreviate very long values.** If you only need a short form, include guidance like
183
+ `- Name: [First name or nickname]` or `- Address (short):` rather than the full legal text.
184
+ - **Mention update rules in `instructions`.** You can instruct how and when to fill or clear parts of
185
+ the template directly in the agent's `instructions` field.
186
+
187
+ ### Alternative Template Styles
188
+
189
+ Use a shorter single block if you only need a few items:
190
+
191
+ ```typescript
192
+ const basicMemory = new Memory({
193
+ options: {
194
+ workingMemory: {
195
+ enabled: true,
196
+ template: `User Facts:\n- Name:\n- Favorite Color:\n- Current Topic:`,
197
+ },
198
+ },
199
+ });
200
+ ```
201
+
202
+ You can also store the key facts in a short paragraph format if you prefer a more narrative style:
203
+
204
+ ```typescript
205
+ const paragraphMemory = new Memory({
206
+ options: {
207
+ workingMemory: {
208
+ enabled: true,
209
+ template: `Important Details:\n\nKeep a short paragraph capturing the user's important facts (name, main goal, current task).`,
210
+ },
211
+ },
212
+ });
213
+ ```
214
+
215
+ ## Structured Working Memory
216
+
217
+ Working memory can also be defined using a structured schema instead of a Markdown template. This allows you to specify the exact fields and types that should be tracked, using a [Zod](https://zod.dev/) schema. When using a schema, the agent will see and update working memory as a JSON object matching your schema.
218
+
219
+ **Important:** You must specify either `template` or `schema`, but not both.
220
+
221
+ ### Example: Schema-Based Working Memory
222
+
223
+ ```typescript
224
+ import { z } from "zod";
225
+ import { Memory } from "@mastra/memory";
226
+
227
+ const userProfileSchema = z.object({
228
+ name: z.string().optional(),
229
+ location: z.string().optional(),
230
+ timezone: z.string().optional(),
231
+ preferences: z
232
+ .object({
233
+ communicationStyle: z.string().optional(),
234
+ projectGoal: z.string().optional(),
235
+ deadlines: z.array(z.string()).optional(),
236
+ })
237
+ .optional(),
238
+ });
239
+
240
+ const memory = new Memory({
241
+ options: {
242
+ workingMemory: {
243
+ enabled: true,
244
+ schema: userProfileSchema,
245
+ // template: ... (do not set)
246
+ },
247
+ },
248
+ });
249
+ ```
250
+
251
+ When a schema is provided, the agent receives the working memory as a JSON object. For example:
252
+
253
+ ```json
254
+ {
255
+ "name": "Sam",
256
+ "location": "Berlin",
257
+ "timezone": "CET",
258
+ "preferences": {
259
+ "communicationStyle": "Formal",
260
+ "projectGoal": "Launch MVP",
261
+ "deadlines": ["2025-07-01"]
262
+ }
263
+ }
264
+ ```
265
+
266
+ ### Merge Semantics for Schema-Based Memory
267
+
268
+ Schema-based working memory uses **merge semantics**, meaning the agent only needs to include fields it wants to add or update. Existing fields are preserved automatically.
269
+
270
+ - **Object fields are deep merged:** Only provided fields are updated; others remain unchanged
271
+ - **Set a field to `null` to delete it:** This explicitly removes the field from memory
272
+ - **Arrays are replaced entirely:** When an array field is provided, it replaces the existing array (arrays are not merged element-by-element)
273
+
274
+ ## Choosing Between Template and Schema
275
+
276
+ - Use a **template** (Markdown) if you want the agent to maintain memory as a free-form text block, such as a user profile or scratchpad. Templates use **replace semantics** — the agent must provide the complete memory content on each update.
277
+ - Use a **schema** if you need structured, type-safe data that can be validated and programmatically accessed as JSON. Schemas use **merge semantics** — the agent only provides fields to update, and existing fields are preserved.
278
+ - Only one mode can be active at a time: setting both `template` and `schema` is not supported.
279
+
280
+ ## Example: Multi-step Retention
281
+
282
+ Below is a simplified view of how the `User Profile` template updates across a short user
283
+ conversation:
284
+
285
+ ```nohighlight
286
+ # User Profile
287
+
288
+ ## Personal Info
289
+
290
+ - Name:
291
+ - Location:
292
+ - Timezone:
293
+
294
+ --- After user says "My name is **Sam** and I'm from **Berlin**" ---
295
+
296
+ # User Profile
297
+ - Name: Sam
298
+ - Location: Berlin
299
+ - Timezone:
300
+
301
+ --- After user adds "By the way I'm normally in **CET**" ---
302
+
303
+ # User Profile
304
+ - Name: Sam
305
+ - Location: Berlin
306
+ - Timezone: CET
307
+ ```
308
+
309
+ The agent can now refer to `Sam` or `Berlin` in later responses without requesting the information
310
+ again because it has been stored in working memory.
311
+
312
+ If your agent is not properly updating working memory when you expect it to, you can add system
313
+ instructions on _how_ and _when_ to use this template in your agent's `instructions` setting.
314
+
315
+ ## Setting Initial Working Memory
316
+
317
+ While agents typically update working memory through the `updateWorkingMemory` tool, you can also set initial working memory programmatically when creating or updating threads. This is useful for injecting user data (like their name, preferences, or other info) that you want available to the agent without passing it in every request.
318
+
319
+ ### Setting Working Memory via Thread Metadata
320
+
321
+ When creating a thread, you can provide initial working memory through the metadata's `workingMemory` key:
322
+
323
+ ```typescript title="src/app/medical-consultation.ts"
324
+ // Create a thread with initial working memory
325
+ const thread = await memory.createThread({
326
+ threadId: "thread-123",
327
+ resourceId: "user-456",
328
+ title: "Medical Consultation",
329
+ metadata: {
330
+ workingMemory: `# Patient Profile
331
+ - Name: John Doe
332
+ - Blood Type: O+
333
+ - Allergies: Penicillin
334
+ - Current Medications: None
335
+ - Medical History: Hypertension (controlled)
336
+ `,
337
+ },
338
+ });
339
+
340
+ // The agent will now have access to this information in all messages
341
+ await agent.generate("What's my blood type?", {
342
+ threadId: thread.id,
343
+ resourceId: "user-456",
344
+ });
345
+ // Response: "Your blood type is O+."
346
+ ```
347
+
348
+ ### Updating Working Memory Programmatically
349
+
350
+ You can also update an existing thread's working memory:
351
+
352
+ ```typescript title="src/app/medical-consultation.ts"
353
+ // Update thread metadata to add/modify working memory
354
+ await memory.updateThread({
355
+ id: "thread-123",
356
+ title: thread.title,
357
+ metadata: {
358
+ ...thread.metadata,
359
+ workingMemory: `# Patient Profile
360
+ - Name: John Doe
361
+ - Blood Type: O+
362
+ - Allergies: Penicillin, Ibuprofen // Updated
363
+ - Current Medications: Lisinopril 10mg daily // Added
364
+ - Medical History: Hypertension (controlled)
365
+ `,
366
+ },
367
+ });
368
+ ```
369
+
370
+ ### Direct Memory Update
371
+
372
+ Alternatively, use the `updateWorkingMemory` method directly:
373
+
374
+ ```typescript title="src/app/medical-consultation.ts"
375
+ await memory.updateWorkingMemory({
376
+ threadId: "thread-123",
377
+ resourceId: "user-456", // Required for resource-scoped memory
378
+ workingMemory: "Updated memory content...",
379
+ });
380
+ ```
381
+
382
+ ## Examples
383
+
384
+ - [Working memory with template](https://github.com/mastra-ai/mastra/tree/main/examples/memory-with-template)
385
+ - [Working memory with schema](https://github.com/mastra-ai/mastra/tree/main/examples/memory-with-schema)
386
+ - [Per-resource working memory](https://github.com/mastra-ai/mastra/tree/main/examples/memory-per-resource-example) - Complete example showing resource-scoped memory persistence