@mastra/memory 1.7.0 → 1.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. package/CHANGELOG.md +52 -0
  2. package/dist/{chunk-M7RAJAZ6.js → chunk-SUU4IAZJ.js} +307 -39
  3. package/dist/chunk-SUU4IAZJ.js.map +1 -0
  4. package/dist/{chunk-SHID74TI.cjs → chunk-YPFNHFT6.cjs} +307 -39
  5. package/dist/chunk-YPFNHFT6.cjs.map +1 -0
  6. package/dist/docs/SKILL.md +1 -1
  7. package/dist/docs/assets/SOURCE_MAP.json +24 -24
  8. package/dist/docs/references/docs-memory-observational-memory.md +23 -0
  9. package/dist/docs/references/docs-memory-overview.md +3 -3
  10. package/dist/docs/references/reference-memory-observational-memory.md +2 -0
  11. package/dist/index.cjs +64 -368
  12. package/dist/index.cjs.map +1 -1
  13. package/dist/index.d.ts.map +1 -1
  14. package/dist/index.js +64 -368
  15. package/dist/index.js.map +1 -1
  16. package/dist/{observational-memory-AU6MIH4Q.cjs → observational-memory-3HFM7PY2.cjs} +17 -17
  17. package/dist/{observational-memory-AU6MIH4Q.cjs.map → observational-memory-3HFM7PY2.cjs.map} +1 -1
  18. package/dist/{observational-memory-YRWU6CY3.js → observational-memory-XXD6E2SO.js} +3 -3
  19. package/dist/{observational-memory-YRWU6CY3.js.map → observational-memory-XXD6E2SO.js.map} +1 -1
  20. package/dist/processors/index.cjs +15 -15
  21. package/dist/processors/index.js +1 -1
  22. package/dist/processors/observational-memory/observational-memory.d.ts +21 -0
  23. package/dist/processors/observational-memory/observational-memory.d.ts.map +1 -1
  24. package/dist/processors/observational-memory/observer-agent.d.ts +14 -2
  25. package/dist/processors/observational-memory/observer-agent.d.ts.map +1 -1
  26. package/dist/processors/observational-memory/types.d.ts +7 -0
  27. package/dist/processors/observational-memory/types.d.ts.map +1 -1
  28. package/package.json +8 -8
  29. package/dist/chunk-M7RAJAZ6.js.map +0 -1
  30. package/dist/chunk-SHID74TI.cjs.map +0 -1
@@ -3,7 +3,7 @@ name: mastra-memory
3
3
  description: Documentation for @mastra/memory. Use when working with @mastra/memory APIs, configuration, or implementation.
4
4
  metadata:
5
5
  package: "@mastra/memory"
6
- version: "1.7.0"
6
+ version: "1.8.0"
7
7
  ---
8
8
 
9
9
  ## When to use
@@ -1,71 +1,71 @@
1
1
  {
2
- "version": "1.7.0",
2
+ "version": "1.8.0",
3
3
  "package": "@mastra/memory",
4
4
  "exports": {
5
5
  "OBSERVATIONAL_MEMORY_DEFAULTS": {
6
6
  "types": "dist/processors/index.d.ts",
7
- "implementation": "dist/chunk-M7RAJAZ6.js"
7
+ "implementation": "dist/chunk-SUU4IAZJ.js"
8
8
  },
9
9
  "OBSERVATION_CONTEXT_INSTRUCTIONS": {
10
10
  "types": "dist/processors/index.d.ts",
11
- "implementation": "dist/chunk-M7RAJAZ6.js"
11
+ "implementation": "dist/chunk-SUU4IAZJ.js"
12
12
  },
13
13
  "OBSERVATION_CONTEXT_PROMPT": {
14
14
  "types": "dist/processors/index.d.ts",
15
- "implementation": "dist/chunk-M7RAJAZ6.js"
15
+ "implementation": "dist/chunk-SUU4IAZJ.js"
16
16
  },
17
17
  "OBSERVATION_CONTINUATION_HINT": {
18
18
  "types": "dist/processors/index.d.ts",
19
- "implementation": "dist/chunk-M7RAJAZ6.js"
19
+ "implementation": "dist/chunk-SUU4IAZJ.js"
20
20
  },
21
21
  "OBSERVER_SYSTEM_PROMPT": {
22
22
  "types": "dist/processors/index.d.ts",
23
- "implementation": "dist/chunk-M7RAJAZ6.js"
23
+ "implementation": "dist/chunk-SUU4IAZJ.js"
24
24
  },
25
25
  "ObservationalMemory": {
26
26
  "types": "dist/processors/index.d.ts",
27
- "implementation": "dist/chunk-M7RAJAZ6.js",
28
- "line": 2890
27
+ "implementation": "dist/chunk-SUU4IAZJ.js",
28
+ "line": 2966
29
29
  },
30
30
  "TokenCounter": {
31
31
  "types": "dist/processors/index.d.ts",
32
- "implementation": "dist/chunk-M7RAJAZ6.js",
33
- "line": 2371
32
+ "implementation": "dist/chunk-SUU4IAZJ.js",
33
+ "line": 2447
34
34
  },
35
35
  "buildObserverPrompt": {
36
36
  "types": "dist/processors/index.d.ts",
37
- "implementation": "dist/chunk-M7RAJAZ6.js",
38
- "line": 992
37
+ "implementation": "dist/chunk-SUU4IAZJ.js",
38
+ "line": 1068
39
39
  },
40
40
  "buildObserverSystemPrompt": {
41
41
  "types": "dist/processors/index.d.ts",
42
- "implementation": "dist/chunk-M7RAJAZ6.js",
42
+ "implementation": "dist/chunk-SUU4IAZJ.js",
43
43
  "line": 572
44
44
  },
45
45
  "extractCurrentTask": {
46
46
  "types": "dist/processors/index.d.ts",
47
- "implementation": "dist/chunk-M7RAJAZ6.js",
48
- "line": 1100
47
+ "implementation": "dist/chunk-SUU4IAZJ.js",
48
+ "line": 1176
49
49
  },
50
50
  "formatMessagesForObserver": {
51
51
  "types": "dist/processors/index.d.ts",
52
- "implementation": "dist/chunk-M7RAJAZ6.js",
53
- "line": 819
52
+ "implementation": "dist/chunk-SUU4IAZJ.js",
53
+ "line": 827
54
54
  },
55
55
  "hasCurrentTaskSection": {
56
56
  "types": "dist/processors/index.d.ts",
57
- "implementation": "dist/chunk-M7RAJAZ6.js",
58
- "line": 1088
57
+ "implementation": "dist/chunk-SUU4IAZJ.js",
58
+ "line": 1164
59
59
  },
60
60
  "optimizeObservationsForContext": {
61
61
  "types": "dist/processors/index.d.ts",
62
- "implementation": "dist/chunk-M7RAJAZ6.js",
63
- "line": 1111
62
+ "implementation": "dist/chunk-SUU4IAZJ.js",
63
+ "line": 1187
64
64
  },
65
65
  "parseObserverOutput": {
66
66
  "types": "dist/processors/index.d.ts",
67
- "implementation": "dist/chunk-M7RAJAZ6.js",
68
- "line": 1002
67
+ "implementation": "dist/chunk-SUU4IAZJ.js",
68
+ "line": 1078
69
69
  },
70
70
  "extractWorkingMemoryContent": {
71
71
  "types": "dist/index.d.ts",
@@ -96,7 +96,7 @@
96
96
  "processors": {
97
97
  "index": "dist/processors/index.js",
98
98
  "chunks": [
99
- "chunk-M7RAJAZ6.js"
99
+ "chunk-SUU4IAZJ.js"
100
100
  ]
101
101
  }
102
102
  }
@@ -230,6 +230,29 @@ Setting `bufferTokens: false` disables both observation and reflection async buf
230
230
 
231
231
  > **Note:** Async buffering isn't supported with `scope: 'resource'`. It's automatically disabled in resource scope.
232
232
 
233
+ ## Observer Context Optimization
234
+
235
+ By default, the Observer receives the full observation history as context when processing new messages. The Observer also receives prior `current-task` and `suggested-response` metadata (when available), so it can stay oriented even when observation context is truncated. For long-running conversations where observations grow large, you can opt into context optimization to reduce Observer input costs.
236
+
237
+ Set `observation.previousObserverTokens` to limit how many tokens of previous observations are sent to the Observer. Observations are tail-truncated, keeping the most recent entries. When a buffered reflection is pending, the already-reflected lines are automatically replaced with the reflection summary before truncation is applied.
238
+
239
+ ```typescript
240
+ const memory = new Memory({
241
+ options: {
242
+ observationalMemory: {
243
+ model: 'google/gemini-2.5-flash',
244
+ observation: {
245
+ previousObserverTokens: 10_000, // keep only ~10k tokens of recent observations
246
+ },
247
+ },
248
+ },
249
+ })
250
+ ```
251
+
252
+ - `previousObserverTokens: 2000` → default; keeps \~2k tokens of recent observations.
253
+ - `previousObserverTokens: 0` → omit previous observations completely.
254
+ - `previousObserverTokens: false` → disable truncation and keep full previous observations.
255
+
233
256
  ## Migrating existing threads
234
257
 
235
258
  No manual migration needed. OM reads existing messages and observes them lazily when thresholds are exceeded.
@@ -5,9 +5,9 @@ Memory enables your agent to remember user messages, agent replies, and tool res
5
5
  Mastra supports four complementary memory types:
6
6
 
7
7
  - [**Message history**](https://mastra.ai/docs/memory/message-history) - keeps recent messages from the current conversation so they can be rendered in the UI and used to maintain short-term continuity within the exchange.
8
+ - [**Observational memory**](https://mastra.ai/docs/memory/observational-memory) - uses background Observer and Reflector agents to maintain a dense observation log that replaces raw message history as it grows, keeping the context window small while preserving long-term memory across conversations.
8
9
  - [**Working memory**](https://mastra.ai/docs/memory/working-memory) - stores persistent, structured user data such as names, preferences, and goals.
9
10
  - [**Semantic recall**](https://mastra.ai/docs/memory/semantic-recall) - retrieves relevant messages from older conversations based on semantic meaning rather than exact keywords, mirroring how humans recall information by association. Requires a [vector database](https://mastra.ai/docs/memory/semantic-recall) and an [embedding model](https://mastra.ai/docs/memory/semantic-recall).
10
- - [**Observational memory**](https://mastra.ai/docs/memory/observational-memory) - uses background Observer and Reflector agents to maintain a dense observation log that replaces raw message history as it grows, keeping the context window small while preserving long-term memory across conversations.
11
11
 
12
12
  If the combined memory exceeds the model's context limit, [memory processors](https://mastra.ai/docs/memory/memory-processors) can filter, trim, or prioritize content so the most relevant information is preserved.
13
13
 
@@ -16,9 +16,9 @@ If the combined memory exceeds the model's context limit, [memory processors](ht
16
16
  Choose a memory option to get started:
17
17
 
18
18
  - [Message history](https://mastra.ai/docs/memory/message-history)
19
+ - [Observational memory](https://mastra.ai/docs/memory/observational-memory)
19
20
  - [Working memory](https://mastra.ai/docs/memory/working-memory)
20
21
  - [Semantic recall](https://mastra.ai/docs/memory/semantic-recall)
21
- - [Observational memory](https://mastra.ai/docs/memory/observational-memory)
22
22
 
23
23
  ## Storage
24
24
 
@@ -41,5 +41,5 @@ This visibility helps you understand why an agent made specific decisions and ve
41
41
  ## Next steps
42
42
 
43
43
  - Learn more about [Storage](https://mastra.ai/docs/memory/storage) providers and configuration options
44
- - Add [Message history](https://mastra.ai/docs/memory/message-history), [Working memory](https://mastra.ai/docs/memory/working-memory), [Semantic recall](https://mastra.ai/docs/memory/semantic-recall), or [Observational memory](https://mastra.ai/docs/memory/observational-memory)
44
+ - Add [Message history](https://mastra.ai/docs/memory/message-history), [Observational memory](https://mastra.ai/docs/memory/observational-memory), [Working memory](https://mastra.ai/docs/memory/working-memory), or [Semantic recall](https://mastra.ai/docs/memory/semantic-recall)
45
45
  - Visit [Memory configuration reference](https://mastra.ai/reference/memory/memory-class) for all available options
@@ -60,6 +60,8 @@ OM performs thresholding with fast local token estimation. Text uses `tokenx`, a
60
60
 
61
61
  **observation.blockAfter** (`number`): Token threshold above which synchronous (blocking) observation is forced. Between \`messageTokens\` and \`blockAfter\`, only async buffering/activation is used. Above \`blockAfter\`, a synchronous observation runs as a last resort, while buffered activation still preserves a minimum remaining context (min(1000, retention floor)). Accepts a multiplier (1 < value < 2, multiplied by \`messageTokens\`) or an absolute token count (≥ 2, must be greater than \`messageTokens\`). Only relevant when \`bufferTokens\` is set. Defaults to \`1.2\` when async buffering is enabled.
62
62
 
63
+ **observation.previousObserverTokens** (`number | false`): Optional token budget for the observer's previous-observations context. When set to a number, the observations passed to the Observer agent are tail-truncated to fit within this budget while keeping the newest observations and preserving highlighted 🔴 items when possible. When a buffered reflection is pending, the already-reflected observation lines are automatically replaced with the reflection summary before truncation. Set to \`0\` to omit previous observations entirely, or \`false\` to disable truncation explicitly.
64
+
63
65
  **reflection** (`ObservationalMemoryReflectionConfig`): Configuration for the reflection step. Controls when the Reflector agent runs and how it behaves.
64
66
 
65
67
  **reflection.model** (`string | LanguageModel | DynamicModel | ModelWithRetries[]`): Model for the Reflector agent. Cannot be set if a top-level \`model\` is also provided. If neither this nor the top-level \`model\` is set, falls back to \`observation.model\`.