@mastra/memory 1.2.0 → 1.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. package/CHANGELOG.md +94 -0
  2. package/dist/{chunk-5YW6JV6Y.js → chunk-F5P5HTMC.js} +135 -67
  3. package/dist/chunk-F5P5HTMC.js.map +1 -0
  4. package/dist/{chunk-7SCXX4S7.cjs → chunk-LXATBJ2L.cjs} +137 -66
  5. package/dist/chunk-LXATBJ2L.cjs.map +1 -0
  6. package/dist/docs/SKILL.md +1 -1
  7. package/dist/docs/assets/SOURCE_MAP.json +26 -14
  8. package/dist/docs/references/docs-memory-observational-memory.md +86 -11
  9. package/dist/docs/references/reference-memory-observational-memory.md +318 -9
  10. package/dist/index.cjs +22 -1
  11. package/dist/index.cjs.map +1 -1
  12. package/dist/index.d.ts.map +1 -1
  13. package/dist/index.js +22 -1
  14. package/dist/index.js.map +1 -1
  15. package/dist/observational-memory-3DA7KJIH.js +3 -0
  16. package/dist/{observational-memory-LI6QFTRE.js.map → observational-memory-3DA7KJIH.js.map} +1 -1
  17. package/dist/observational-memory-SA5RITIG.cjs +64 -0
  18. package/dist/{observational-memory-G3HACXHE.cjs.map → observational-memory-SA5RITIG.cjs.map} +1 -1
  19. package/dist/processors/index.cjs +24 -12
  20. package/dist/processors/index.js +1 -1
  21. package/dist/processors/observational-memory/index.d.ts +1 -1
  22. package/dist/processors/observational-memory/index.d.ts.map +1 -1
  23. package/dist/processors/observational-memory/observational-memory.d.ts +41 -4
  24. package/dist/processors/observational-memory/observational-memory.d.ts.map +1 -1
  25. package/package.json +7 -7
  26. package/dist/chunk-5YW6JV6Y.js.map +0 -1
  27. package/dist/chunk-7SCXX4S7.cjs.map +0 -1
  28. package/dist/observational-memory-G3HACXHE.cjs +0 -52
  29. package/dist/observational-memory-LI6QFTRE.js +0 -3
@@ -3,7 +3,7 @@ name: mastra-memory
3
3
  description: Documentation for @mastra/memory. Use when working with @mastra/memory APIs, configuration, or implementation.
4
4
  metadata:
5
5
  package: "@mastra/memory"
6
- version: "1.2.0"
6
+ version: "1.3.0"
7
7
  ---
8
8
 
9
9
  ## When to use
@@ -1,58 +1,70 @@
1
1
  {
2
- "version": "1.2.0",
2
+ "version": "1.3.0",
3
3
  "package": "@mastra/memory",
4
4
  "exports": {
5
5
  "OBSERVATIONAL_MEMORY_DEFAULTS": {
6
6
  "types": "dist/processors/index.d.ts",
7
- "implementation": "dist/chunk-5YW6JV6Y.js"
7
+ "implementation": "dist/chunk-F5P5HTMC.js"
8
+ },
9
+ "OBSERVATION_CONTEXT_INSTRUCTIONS": {
10
+ "types": "dist/processors/index.d.ts",
11
+ "implementation": "dist/chunk-F5P5HTMC.js"
12
+ },
13
+ "OBSERVATION_CONTEXT_PROMPT": {
14
+ "types": "dist/processors/index.d.ts",
15
+ "implementation": "dist/chunk-F5P5HTMC.js"
16
+ },
17
+ "OBSERVATION_CONTINUATION_HINT": {
18
+ "types": "dist/processors/index.d.ts",
19
+ "implementation": "dist/chunk-F5P5HTMC.js"
8
20
  },
9
21
  "OBSERVER_SYSTEM_PROMPT": {
10
22
  "types": "dist/processors/index.d.ts",
11
- "implementation": "dist/chunk-5YW6JV6Y.js"
23
+ "implementation": "dist/chunk-F5P5HTMC.js"
12
24
  },
13
25
  "ObservationalMemory": {
14
26
  "types": "dist/processors/index.d.ts",
15
- "implementation": "dist/chunk-5YW6JV6Y.js",
16
- "line": 1283
27
+ "implementation": "dist/chunk-F5P5HTMC.js",
28
+ "line": 1294
17
29
  },
18
30
  "TokenCounter": {
19
31
  "types": "dist/processors/index.d.ts",
20
- "implementation": "dist/chunk-5YW6JV6Y.js",
32
+ "implementation": "dist/chunk-F5P5HTMC.js",
21
33
  "line": 957
22
34
  },
23
35
  "buildObserverPrompt": {
24
36
  "types": "dist/processors/index.d.ts",
25
- "implementation": "dist/chunk-5YW6JV6Y.js",
37
+ "implementation": "dist/chunk-F5P5HTMC.js",
26
38
  "line": 646
27
39
  },
28
40
  "buildObserverSystemPrompt": {
29
41
  "types": "dist/processors/index.d.ts",
30
- "implementation": "dist/chunk-5YW6JV6Y.js",
42
+ "implementation": "dist/chunk-F5P5HTMC.js",
31
43
  "line": 405
32
44
  },
33
45
  "extractCurrentTask": {
34
46
  "types": "dist/processors/index.d.ts",
35
- "implementation": "dist/chunk-5YW6JV6Y.js",
47
+ "implementation": "dist/chunk-F5P5HTMC.js",
36
48
  "line": 732
37
49
  },
38
50
  "formatMessagesForObserver": {
39
51
  "types": "dist/processors/index.d.ts",
40
- "implementation": "dist/chunk-5YW6JV6Y.js",
52
+ "implementation": "dist/chunk-F5P5HTMC.js",
41
53
  "line": 492
42
54
  },
43
55
  "hasCurrentTaskSection": {
44
56
  "types": "dist/processors/index.d.ts",
45
- "implementation": "dist/chunk-5YW6JV6Y.js",
57
+ "implementation": "dist/chunk-F5P5HTMC.js",
46
58
  "line": 720
47
59
  },
48
60
  "optimizeObservationsForContext": {
49
61
  "types": "dist/processors/index.d.ts",
50
- "implementation": "dist/chunk-5YW6JV6Y.js",
62
+ "implementation": "dist/chunk-F5P5HTMC.js",
51
63
  "line": 743
52
64
  },
53
65
  "parseObserverOutput": {
54
66
  "types": "dist/processors/index.d.ts",
55
- "implementation": "dist/chunk-5YW6JV6Y.js",
67
+ "implementation": "dist/chunk-F5P5HTMC.js",
56
68
  "line": 677
57
69
  },
58
70
  "extractWorkingMemoryContent": {
@@ -84,7 +96,7 @@
84
96
  "processors": {
85
97
  "index": "dist/processors/index.js",
86
98
  "chunks": [
87
- "chunk-5YW6JV6Y.js"
99
+ "chunk-F5P5HTMC.js"
88
100
  ]
89
101
  }
90
102
  }
@@ -24,11 +24,21 @@ export const agent = new Agent({
24
24
  });
25
25
  ```
26
26
 
27
- That's it. The agent now has humanlike long-term memory that persists across conversations.
27
+ That's it. The agent now has humanlike long-term memory that persists across conversations. Setting `observationalMemory: true` uses `google/gemini-2.5-flash` by default. To use a different model or customize thresholds, pass a config object instead:
28
+
29
+ ```typescript
30
+ const memory = new Memory({
31
+ options: {
32
+ observationalMemory: {
33
+ model: "deepseek/deepseek-reasoner",
34
+ },
35
+ },
36
+ });
37
+ ```
28
38
 
29
39
  See [configuration options](https://mastra.ai/reference/memory/observational-memory) for full API details.
30
40
 
31
- > **Note:** OM currently only supports `@mastra/pg`, `@mastra/libsql`, and `@mastra/mongodb` storage adapters. It also uses background agents for managing memory. The default model (configurable) is `google/gemini-2.5-flash` as it's the one we've tested the most.
41
+ > **Note:** OM currently only supports `@mastra/pg`, `@mastra/libsql`, and `@mastra/mongodb` storage adapters. It uses background agents for managing memory. When using `observationalMemory: true`, the default model is `google/gemini-2.5-flash`. When passing a config object, a `model` must be explicitly set.
32
42
 
33
43
  ## Benefits
34
44
 
@@ -77,7 +87,9 @@ The result is a three-tier system:
77
87
 
78
88
  The Observer and Reflector run in the background. Any model that works with Mastra's model routing (e.g. `openai/...`, `google/...`, `deepseek/...`) can be used.
79
89
 
80
- The default is `google/gemini-2.5-flash` it works well for both observation and reflection, and its 1M token context window gives the Reflector headroom.
90
+ When using `observationalMemory: true`, the default model is `google/gemini-2.5-flash`. When passing a config object, a `model` must be explicitly set.
91
+
92
+ We recommend `google/gemini-2.5-flash` — it works well for both observation and reflection, and its 1M token context window gives the Reflector headroom.
81
93
 
82
94
  We've also tested `deepseek`, `qwen3`, and `glm-4.7` for the Observer. For the Reflector, make sure the model's context window can fit all observations. Note that Claude 4.5 models currently don't work well as observer or reflector.
83
95
 
@@ -97,24 +109,40 @@ See [model configuration](https://mastra.ai/reference/memory/observational-memor
97
109
 
98
110
  ### Thread scope (default)
99
111
 
100
- Each thread has its own observations.
112
+ Each thread has its own observations. This scope is well tested and works well as a general purpose memory system, especially for long horizon agentic use-cases.
101
113
 
102
114
  ```typescript
103
- observationalMemory: {
104
- scope: "thread",
105
- }
115
+ const memory = new Memory({
116
+ options: {
117
+ observationalMemory: {
118
+ model: "google/gemini-2.5-flash",
119
+ scope: "thread",
120
+ },
121
+ },
122
+ });
106
123
  ```
107
124
 
108
- ### Resource scope
125
+ ### Resource scope (experimental)
109
126
 
110
127
  Observations are shared across all threads for a resource (typically a user). Enables cross-conversation memory.
111
128
 
112
129
  ```typescript
113
- observationalMemory: {
114
- scope: "resource",
115
- }
130
+ const memory = new Memory({
131
+ options: {
132
+ observationalMemory: {
133
+ model: "google/gemini-2.5-flash",
134
+ scope: "resource",
135
+ },
136
+ },
137
+ });
116
138
  ```
117
139
 
140
+ Resource scope works, however it's marked as experimental for now until we prove task adherence/continuity across multiple ongoing simultaneous threads. As of today, you may need to tweak your system prompt to prevent one thread from continuing the work that another had already started (but hadn't finished).
141
+
142
+ This is because in resource scope, each thread is a perspective on _all_ threads for the resource.
143
+
144
+ For your use-case this may not be a problem, so your mileage may vary.
145
+
118
146
  > **Warning:** In resource scope, unobserved messages across _all_ threads are processed together. For users with many existing threads, this can be slow. Use thread scope for existing apps.
119
147
 
120
148
  ## Token Budgets
@@ -125,6 +153,7 @@ OM uses token thresholds to decide when to observe and reflect. See [token budge
125
153
  const memory = new Memory({
126
154
  options: {
127
155
  observationalMemory: {
156
+ model: "google/gemini-2.5-flash",
128
157
  observation: {
129
158
  // when to run the Observer (default: 30,000)
130
159
  messageTokens: 30_000,
@@ -134,12 +163,58 @@ const memory = new Memory({
134
163
  observationTokens: 40_000,
135
164
  },
136
165
  // let message history borrow from observation budget
166
+ // requires bufferTokens: false (temporary limitation)
137
167
  shareTokenBudget: false,
138
168
  },
139
169
  },
140
170
  });
141
171
  ```
142
172
 
173
+ ## Async Buffering
174
+
175
+ Without async buffering, the Observer runs synchronously when the message threshold is reached — the agent pauses mid-conversation while the Observer LLM call completes. With async buffering (enabled by default), observations are pre-computed in the background as the conversation grows. When the threshold is hit, buffered observations activate instantly with no pause.
176
+
177
+ ### How it works
178
+
179
+ As the agent converses, message tokens accumulate. At regular intervals (`bufferTokens`), a background Observer call runs without blocking the agent. Each call produces a "chunk" of observations that's stored in a buffer.
180
+
181
+ When message tokens reach the `messageTokens` threshold, buffered chunks activate: their observations move into the active observation log, and the corresponding raw messages are removed from the context window. The agent never pauses.
182
+
183
+ If the agent produces messages faster than the Observer can process them, a `blockAfter` safety threshold forces a synchronous observation as a last resort.
184
+
185
+ Reflection works similarly — the Reflector runs in the background when observations reach a fraction of the reflection threshold.
186
+
187
+ ### Settings
188
+
189
+ | Setting | Default | What it controls |
190
+ | ------------------------------ | ------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
191
+ | `observation.bufferTokens` | `0.2` | How often to buffer. `0.2` means every 20% of `messageTokens` — with the default 30k threshold, that's roughly every 6k tokens. Can also be an absolute token count (e.g. `5000`). |
192
+ | `observation.bufferActivation` | `0.8` | How aggressively to clear the message window on activation. `0.8` means remove enough messages to keep only 20% of `messageTokens` remaining. Lower values keep more message history. |
193
+ | `observation.blockAfter` | `1.2` | Safety threshold as a multiplier of `messageTokens`. At `1.2`, synchronous observation is forced at 36k tokens (1.2 × 30k). Only matters if buffering can't keep up. |
194
+ | `reflection.bufferActivation` | `0.5` | When to start background reflection. `0.5` means reflection begins when observations reach 50% of the `observationTokens` threshold. |
195
+ | `reflection.blockAfter` | `1.2` | Safety threshold for reflection, same logic as observation. |
196
+
197
+ ### Disabling
198
+
199
+ To disable async buffering and use synchronous observation/reflection instead:
200
+
201
+ ```typescript
202
+ const memory = new Memory({
203
+ options: {
204
+ observationalMemory: {
205
+ model: "google/gemini-2.5-flash",
206
+ observation: {
207
+ bufferTokens: false,
208
+ },
209
+ },
210
+ },
211
+ });
212
+ ```
213
+
214
+ Setting `bufferTokens: false` disables both observation and reflection async buffering. See [async buffering configuration](https://mastra.ai/reference/memory/observational-memory) for the full API.
215
+
216
+ > **Note:** Async buffering is not supported with `scope: 'resource'`. It is automatically disabled in resource scope.
217
+
143
218
  ## Migrating existing threads
144
219
 
145
220
  No manual migration needed. OM reads existing messages and observes them lazily when thresholds are exceeded.
@@ -24,17 +24,15 @@ export const agent = new Agent({
24
24
 
25
25
  ## Configuration
26
26
 
27
- The `observationalMemory` option accepts `true`, `false`, or a configuration object.
28
-
29
- Setting `observationalMemory: true` enables it with all defaults. Setting `observationalMemory: false` or omitting it disables it.
27
+ The `observationalMemory` option accepts `true`, a configuration object, or `false`. Setting `true` enables OM with `google/gemini-2.5-flash` as the default model. When passing a config object, a `model` must be explicitly set — either at the top level, or on `observation.model` and/or `reflection.model`.
30
28
 
31
29
  **enabled?:** (`boolean`): Enable or disable Observational Memory. When omitted from a config object, defaults to \`true\`. Only \`enabled: false\` explicitly disables it. (Default: `true`)
32
30
 
33
- **model?:** (`string | LanguageModel | DynamicModel | ModelWithRetries[]`): Model for both the Observer and Reflector agents. Sets the model for both at once. Cannot be used together with \`observation.model\` or \`reflection.model\` — an error will be thrown if both are set. (Default: `'google/gemini-2.5-flash'`)
31
+ **model?:** (`string | LanguageModel | DynamicModel | ModelWithRetries[]`): Model for both the Observer and Reflector agents. Sets the model for both at once. Cannot be used together with \`observation.model\` or \`reflection.model\` — an error will be thrown if both are set. When using \`observationalMemory: true\`, defaults to \`google/gemini-2.5-flash\`. When passing a config object, this or \`observation.model\`/\`reflection.model\` must be set. Use \`"default"\` to explicitly use the default model (\`google/gemini-2.5-flash\`). (Default: `'google/gemini-2.5-flash' (when using observationalMemory: true)`)
34
32
 
35
- **scope?:** (`'resource' | 'thread'`): Memory scope for observations. \`'thread'\` keeps observations per-thread. \`'resource'\` shares observations across all threads for a resource, enabling cross-conversation memory. (Default: `'thread'`)
33
+ **scope?:** (`'resource' | 'thread'`): Memory scope for observations. \`'thread'\` keeps observations per-thread. \`'resource'\` (experimental) shares observations across all threads for a resource, enabling cross-conversation memory. (Default: `'thread'`)
36
34
 
37
- **shareTokenBudget?:** (`boolean`): Share the token budget between messages and observations. When enabled, the total budget is \`observation.messageTokens + reflection.observationTokens\`. Messages can use more space when observations are small, and vice versa. This maximizes context usage through flexible allocation. (Default: `false`)
35
+ **shareTokenBudget?:** (`boolean`): Share the token budget between messages and observations. When enabled, the total budget is \`observation.messageTokens + reflection.observationTokens\`. Messages can use more space when observations are small, and vice versa. This maximizes context usage through flexible allocation. \*\*Note:\*\* \`shareTokenBudget\` is not yet compatible with async buffering. You must set \`observation: { bufferTokens: false }\` when using this option (this is a temporary limitation). (Default: `false`)
38
36
 
39
37
  **observation?:** (`ObservationalMemoryObservationConfig`): Configuration for the observation step. Controls when the Observer agent runs and how it behaves.
40
38
 
@@ -42,7 +40,7 @@ Setting `observationalMemory: true` enables it with all defaults. Setting `obser
42
40
 
43
41
  ### Observation config
44
42
 
45
- **model?:** (`string | LanguageModel | DynamicModel | ModelWithRetries[]`): Model for the Observer agent. Cannot be set if a top-level \`model\` is also provided. (Default: `'google/gemini-2.5-flash'`)
43
+ **model?:** (`string | LanguageModel | DynamicModel | ModelWithRetries[]`): Model for the Observer agent. Cannot be set if a top-level \`model\` is also provided. If neither this nor the top-level \`model\` is set, falls back to \`reflection.model\`.
46
44
 
47
45
  **messageTokens?:** (`number`): Token count of unobserved messages that triggers observation. When unobserved message tokens exceed this threshold, the Observer agent is called. (Default: `30000`)
48
46
 
@@ -50,14 +48,24 @@ Setting `observationalMemory: true` enables it with all defaults. Setting `obser
50
48
 
51
49
  **modelSettings?:** (`ObservationalMemoryModelSettings`): Model settings for the Observer agent. (Default: `{ temperature: 0.3, maxOutputTokens: 100_000 }`)
52
50
 
51
+ **bufferTokens?:** (`number | false`): Token interval for async background observation buffering. Can be an absolute token count (e.g. \`5000\`) or a fraction of \`messageTokens\` (e.g. \`0.25\` = buffer every 25% of threshold). When set, observations run in the background at this interval, storing results in a buffer. When the main \`messageTokens\` threshold is reached, buffered observations activate instantly without a blocking LLM call. Must resolve to less than \`messageTokens\`. Set to \`false\` to explicitly disable all async buffering (both observation and reflection). (Default: `0.2`)
52
+
53
+ **bufferActivation?:** (`number`): Ratio (0-1) controlling how much of the message window to retain after activation. For example, \`0.8\` means activate enough to keep only 20% of \`messageTokens\` remaining. Higher values remove more message history per activation. (Default: `0.8`)
54
+
55
+ **blockAfter?:** (`number`): Token threshold above which synchronous (blocking) observation is forced. Between \`messageTokens\` and \`blockAfter\`, only async buffering/activation is used. Above \`blockAfter\`, a synchronous observation runs as a last resort. Accepts a multiplier (1 < value < 2, multiplied by \`messageTokens\`) or an absolute token count (≥ 2, must be greater than \`messageTokens\`). Only relevant when \`bufferTokens\` is set. Defaults to \`1.2\` when async buffering is enabled. (Default: `1.2 (when bufferTokens is set)`)
56
+
53
57
  ### Reflection config
54
58
 
55
- **model?:** (`string | LanguageModel | DynamicModel | ModelWithRetries[]`): Model for the Reflector agent. Cannot be set if a top-level \`model\` is also provided. (Default: `'google/gemini-2.5-flash'`)
59
+ **model?:** (`string | LanguageModel | DynamicModel | ModelWithRetries[]`): Model for the Reflector agent. Cannot be set if a top-level \`model\` is also provided. If neither this nor the top-level \`model\` is set, falls back to \`observation.model\`.
56
60
 
57
61
  **observationTokens?:** (`number`): Token count of observations that triggers reflection. When observation tokens exceed this threshold, the Reflector agent is called to condense them. (Default: `40000`)
58
62
 
59
63
  **modelSettings?:** (`ObservationalMemoryModelSettings`): Model settings for the Reflector agent. (Default: `{ temperature: 0, maxOutputTokens: 100_000 }`)
60
64
 
65
+ **bufferActivation?:** (`number`): Ratio (0-1) controlling when async reflection buffering starts. When observation tokens reach \`observationTokens \* bufferActivation\`, reflection runs in the background. On activation at the full threshold, the buffered reflection replaces the observations it covers, preserving any new observations appended after that range. (Default: `0.5`)
66
+
67
+ **blockAfter?:** (`number`): Token threshold above which synchronous (blocking) reflection is forced. Between \`observationTokens\` and \`blockAfter\`, only async buffering/activation is used. Above \`blockAfter\`, a synchronous reflection runs as a last resort. Accepts a multiplier (1 < value < 2, multiplied by \`observationTokens\`) or an absolute token count (≥ 2, must be greater than \`observationTokens\`). Only relevant when \`bufferActivation\` is set. Defaults to \`1.2\` when async reflection is enabled. (Default: `1.2 (when bufferActivation is set)`)
68
+
61
69
  ### Model settings
62
70
 
63
71
  **temperature?:** (`number`): Temperature for generation. Lower values produce more consistent output. (Default: `0.3`)
@@ -66,7 +74,7 @@ Setting `observationalMemory: true` enables it with all defaults. Setting `obser
66
74
 
67
75
  ## Examples
68
76
 
69
- ### Resource scope with custom thresholds
77
+ ### Resource scope with custom thresholds (experimental)
70
78
 
71
79
  ```typescript
72
80
  import { Memory } from "@mastra/memory";
@@ -79,6 +87,7 @@ export const agent = new Agent({
79
87
  memory: new Memory({
80
88
  options: {
81
89
  observationalMemory: {
90
+ model: "google/gemini-2.5-flash",
82
91
  scope: "resource",
83
92
  observation: {
84
93
  messageTokens: 20_000,
@@ -108,6 +117,7 @@ export const agent = new Agent({
108
117
  shareTokenBudget: true,
109
118
  observation: {
110
119
  messageTokens: 20_000,
120
+ bufferTokens: false, // required when using shareTokenBudget (temporary limitation)
111
121
  },
112
122
  reflection: {
113
123
  observationTokens: 80_000,
@@ -165,6 +175,305 @@ export const agent = new Agent({
165
175
  });
166
176
  ```
167
177
 
178
+ ### Async buffering
179
+
180
+ Async buffering is **enabled by default**. It pre-computes observations in the background as the conversation grows — when the `messageTokens` threshold is reached, buffered observations activate instantly with no blocking LLM call.
181
+
182
+ The lifecycle is: **buffer → activate → remove messages → repeat**. Background Observer calls run at `bufferTokens` intervals, each producing a chunk of observations. At threshold, chunks activate: observations move into the log, raw messages are removed from context. The `blockAfter` threshold forces a synchronous fallback if buffering can't keep up.
183
+
184
+ Default settings:
185
+
186
+ - `observation.bufferTokens: 0.2` — buffer every 20% of `messageTokens` (e.g. every \~6k tokens with a 30k threshold)
187
+ - `observation.bufferActivation: 0.8` — on activation, remove enough messages to keep only 20% of the threshold remaining
188
+ - `reflection.bufferActivation: 0.5` — start background reflection at 50% of observation threshold
189
+
190
+ To customize:
191
+
192
+ ```typescript
193
+ import { Memory } from "@mastra/memory";
194
+ import { Agent } from "@mastra/core/agent";
195
+
196
+ export const agent = new Agent({
197
+ name: "my-agent",
198
+ instructions: "You are a helpful assistant.",
199
+ model: "openai/gpt-5-mini",
200
+ memory: new Memory({
201
+ options: {
202
+ observationalMemory: {
203
+ model: "google/gemini-2.5-flash",
204
+ observation: {
205
+ messageTokens: 30_000,
206
+ // Buffer every 5k tokens (runs in background)
207
+ bufferTokens: 5_000,
208
+ // Activate to retain 30% of threshold
209
+ bufferActivation: 0.7,
210
+ // Force synchronous observation at 1.5x threshold
211
+ blockAfter: 1.5,
212
+ },
213
+ reflection: {
214
+ observationTokens: 60_000,
215
+ // Start background reflection at 50% of threshold
216
+ bufferActivation: 0.5,
217
+ // Force synchronous reflection at 1.2x threshold
218
+ blockAfter: 1.2,
219
+ },
220
+ },
221
+ },
222
+ }),
223
+ });
224
+ ```
225
+
226
+ To disable async buffering entirely:
227
+
228
+ ```typescript
229
+ observationalMemory: {
230
+ model: "google/gemini-2.5-flash",
231
+ observation: {
232
+ bufferTokens: false,
233
+ },
234
+ }
235
+ ```
236
+
237
+ Setting `bufferTokens: false` disables both observation and reflection async buffering. Observations and reflections will run synchronously when their thresholds are reached.
238
+
239
+ > **Note:** Async buffering is not supported with `scope: 'resource'` and is automatically disabled in resource scope.
240
+
241
+ ## Streaming data parts
242
+
243
+ Observational Memory emits typed data parts during agent execution that clients can use for real-time UI feedback. These are streamed alongside the agent's response.
244
+
245
+ ### `data-om-status`
246
+
247
+ Emitted once per agent loop step, before model generation. Provides a snapshot of the current memory state, including token usage for both context windows and the state of any async buffered content.
248
+
249
+ ```typescript
250
+ interface DataOmStatusPart {
251
+ type: 'data-om-status';
252
+ data: {
253
+ windows: {
254
+ active: {
255
+ /** Unobserved message tokens and the threshold that triggers observation */
256
+ messages: { tokens: number; threshold: number };
257
+ /** Observation tokens and the threshold that triggers reflection */
258
+ observations: { tokens: number; threshold: number };
259
+ };
260
+ buffered: {
261
+ observations: {
262
+ /** Number of buffered chunks staged for activation */
263
+ chunks: number;
264
+ /** Total message tokens across all buffered chunks */
265
+ messageTokens: number;
266
+ /** Projected message tokens that would be removed if activation happened now (based on bufferActivation ratio and chunk boundaries) */
267
+ projectedMessageRemoval: number;
268
+ /** Observation tokens that will be added on activation */
269
+ observationTokens: number;
270
+ /** idle: no buffering in progress. running: background observer is working. complete: chunks are ready for activation. */
271
+ status: 'idle' | 'running' | 'complete';
272
+ };
273
+ reflection: {
274
+ /** Observation tokens that were fed into the reflector (pre-compression size) */
275
+ inputObservationTokens: number;
276
+ /** Observation tokens the reflection will produce on activation (post-compression size) */
277
+ observationTokens: number;
278
+ /** idle: no reflection buffered. running: background reflector is working. complete: reflection is ready for activation. */
279
+ status: 'idle' | 'running' | 'complete';
280
+ };
281
+ };
282
+ };
283
+ recordId: string;
284
+ threadId: string;
285
+ stepNumber: number;
286
+ /** Increments each time the Reflector creates a new generation */
287
+ generationCount: number;
288
+ };
289
+ }
290
+ ```
291
+
292
+ `buffered.reflection.inputObservationTokens` is the size of the observations that were sent to the Reflector. `buffered.reflection.observationTokens` is the compressed result — the size of what will replace those observations when the reflection activates. A client can use these two values to show a compression ratio.
293
+
294
+ Clients can derive percentages and post-activation estimates from the raw values:
295
+
296
+ ```typescript
297
+ // Message window usage %
298
+ const msgPercent = status.windows.active.messages.tokens
299
+ / status.windows.active.messages.threshold;
300
+
301
+ // Observation window usage %
302
+ const obsPercent = status.windows.active.observations.tokens
303
+ / status.windows.active.observations.threshold;
304
+
305
+ // Projected message tokens after buffered observations activate
306
+ // Uses projectedMessageRemoval which accounts for bufferActivation ratio and chunk boundaries
307
+ const postActivation = status.windows.active.messages.tokens
308
+ - status.windows.buffered.observations.projectedMessageRemoval;
309
+
310
+ // Reflection compression ratio (when buffered reflection exists)
311
+ const { inputObservationTokens, observationTokens } = status.windows.buffered.reflection;
312
+ if (inputObservationTokens > 0) {
313
+ const compressionRatio = observationTokens / inputObservationTokens;
314
+ }
315
+ ```
316
+
317
+ ### `data-om-observation-start`
318
+
319
+ Emitted when the Observer or Reflector agent begins processing.
320
+
321
+ **cycleId:** (`string`): Unique ID for this cycle — shared between start/end/failed markers.
322
+
323
+ **operationType:** (`'observation' | 'reflection'`): Whether this is an observation or reflection operation.
324
+
325
+ **startedAt:** (`string`): ISO timestamp when processing started.
326
+
327
+ **tokensToObserve:** (`number`): Message tokens (input) being processed in this batch.
328
+
329
+ **recordId:** (`string`): The OM record ID.
330
+
331
+ **threadId:** (`string`): This thread's ID.
332
+
333
+ **threadIds:** (`string[]`): All thread IDs in this batch (for resource-scoped).
334
+
335
+ **config:** (`ObservationMarkerConfig`): Snapshot of \`messageTokens\`, \`observationTokens\`, and \`scope\` at observation time.
336
+
337
+ ### `data-om-observation-end`
338
+
339
+ Emitted when observation or reflection completes successfully.
340
+
341
+ **cycleId:** (`string`): Matches the corresponding \`start\` marker.
342
+
343
+ **operationType:** (`'observation' | 'reflection'`): Type of operation that completed.
344
+
345
+ **completedAt:** (`string`): ISO timestamp when processing completed.
346
+
347
+ **durationMs:** (`number`): Duration in milliseconds.
348
+
349
+ **tokensObserved:** (`number`): Message tokens (input) that were processed.
350
+
351
+ **observationTokens:** (`number`): Resulting observation tokens (output) after the Observer compressed them.
352
+
353
+ **observations?:** (`string`): The generated observations text.
354
+
355
+ **currentTask?:** (`string`): Current task extracted by the Observer.
356
+
357
+ **suggestedResponse?:** (`string`): Suggested response extracted by the Observer.
358
+
359
+ **recordId:** (`string`): The OM record ID.
360
+
361
+ **threadId:** (`string`): This thread's ID.
362
+
363
+ ### `data-om-observation-failed`
364
+
365
+ Emitted when observation or reflection fails. The system falls back to synchronous processing.
366
+
367
+ **cycleId:** (`string`): Matches the corresponding \`start\` marker.
368
+
369
+ **operationType:** (`'observation' | 'reflection'`): Type of operation that failed.
370
+
371
+ **failedAt:** (`string`): ISO timestamp when the failure occurred.
372
+
373
+ **durationMs:** (`number`): Duration until failure in milliseconds.
374
+
375
+ **tokensAttempted:** (`number`): Message tokens (input) that were attempted.
376
+
377
+ **error:** (`string`): Error message.
378
+
379
+ **observations?:** (`string`): Any partial content available for display.
380
+
381
+ **recordId:** (`string`): The OM record ID.
382
+
383
+ **threadId:** (`string`): This thread's ID.
384
+
385
+ ### `data-om-buffering-start`
386
+
387
+ Emitted when async buffering begins in the background. Buffering pre-computes observations or reflections before the main threshold is reached.
388
+
389
+ **cycleId:** (`string`): Unique ID for this buffering cycle.
390
+
391
+ **operationType:** (`'observation' | 'reflection'`): Type of operation being buffered.
392
+
393
+ **startedAt:** (`string`): ISO timestamp when buffering started.
394
+
395
+ **tokensToBuffer:** (`number`): Message tokens (input) being buffered in this cycle.
396
+
397
+ **recordId:** (`string`): The OM record ID.
398
+
399
+ **threadId:** (`string`): This thread's ID.
400
+
401
+ **threadIds:** (`string[]`): All thread IDs being buffered (for resource-scoped).
402
+
403
+ **config:** (`ObservationMarkerConfig`): Snapshot of config at buffering time.
404
+
405
+ ### `data-om-buffering-end`
406
+
407
+ Emitted when async buffering completes. The content is stored but not yet activated in the main context.
408
+
409
+ **cycleId:** (`string`): Matches the corresponding \`buffering-start\` marker.
410
+
411
+ **operationType:** (`'observation' | 'reflection'`): Type of operation that was buffered.
412
+
413
+ **completedAt:** (`string`): ISO timestamp when buffering completed.
414
+
415
+ **durationMs:** (`number`): Duration in milliseconds.
416
+
417
+ **tokensBuffered:** (`number`): Message tokens (input) that were buffered.
418
+
419
+ **bufferedTokens:** (`number`): Observation tokens (output) after the Observer compressed them.
420
+
421
+ **observations?:** (`string`): The buffered content.
422
+
423
+ **recordId:** (`string`): The OM record ID.
424
+
425
+ **threadId:** (`string`): This thread's ID.
426
+
427
+ ### `data-om-buffering-failed`
428
+
429
+ Emitted when async buffering fails. The system falls back to synchronous processing when the threshold is reached.
430
+
431
+ **cycleId:** (`string`): Matches the corresponding \`buffering-start\` marker.
432
+
433
+ **operationType:** (`'observation' | 'reflection'`): Type of operation that failed.
434
+
435
+ **failedAt:** (`string`): ISO timestamp when the failure occurred.
436
+
437
+ **durationMs:** (`number`): Duration until failure in milliseconds.
438
+
439
+ **tokensAttempted:** (`number`): Message tokens (input) that were attempted to buffer.
440
+
441
+ **error:** (`string`): Error message.
442
+
443
+ **observations?:** (`string`): Any partial content.
444
+
445
+ **recordId:** (`string`): The OM record ID.
446
+
447
+ **threadId:** (`string`): This thread's ID.
448
+
449
+ ### `data-om-activation`
450
+
451
+ Emitted when buffered observations or reflections are activated (moved into the active context window). This is an instant operation — no LLM call is involved.
452
+
453
+ **cycleId:** (`string`): Unique ID for this activation event.
454
+
455
+ **operationType:** (`'observation' | 'reflection'`): Type of content activated.
456
+
457
+ **activatedAt:** (`string`): ISO timestamp when activation occurred.
458
+
459
+ **chunksActivated:** (`number`): Number of buffered chunks activated.
460
+
461
+ **tokensActivated:** (`number`): Message tokens (input) from activated chunks. For observation activation, these are removed from the message window. For reflection activation, this is the observation tokens that were compressed.
462
+
463
+ **observationTokens:** (`number`): Resulting observation tokens after activation.
464
+
465
+ **messagesActivated:** (`number`): Number of messages that were observed via activation.
466
+
467
+ **generationCount:** (`number`): Current reflection generation count.
468
+
469
+ **observations?:** (`string`): The activated observations text.
470
+
471
+ **recordId:** (`string`): The OM record ID.
472
+
473
+ **threadId:** (`string`): This thread's ID.
474
+
475
+ **config:** (`ObservationMarkerConfig`): Snapshot of config at activation time.
476
+
168
477
  ## Standalone usage
169
478
 
170
479
  Most users should use the `Memory` class above. Using `ObservationalMemory` directly is mainly useful for benchmarking, experimentation, or when you need to control processor ordering with other processors (like [guardrails](https://mastra.ai/docs/agents/guardrails)).