@mastra/memory 1.5.0-alpha.0 → 1.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,29 @@
1
1
  # @mastra/memory
2
2
 
3
+ ## 1.5.0
4
+
5
+ ### Minor Changes
6
+
7
+ - Improved conversational continuity when the message window shrinks during Observational Memory activation. The agent now preserves its suggested next response and current task across activation, so it maintains context instead of losing track of the conversation. ([#13354](https://github.com/mastra-ai/mastra/pull/13354))
8
+
9
+ Also improved the Observer to capture user messages more faithfully, reduce repetitive observations, and treat the most recent user message as the highest-priority signal.
10
+
11
+ ### Patch Changes
12
+
13
+ - Improved Observational Memory priority handling. User messages and task completions are now always treated as high priority, ensuring the observer captures the most relevant context during conversations. ([#13329](https://github.com/mastra-ai/mastra/pull/13329))
14
+
15
+ - Improved Observational Memory activation to preserve more usable context after activation. Previously, activation could leave the agent with too much or too little context depending on how chunks aligned with the retention target. ([#13305](https://github.com/mastra-ai/mastra/pull/13305))
16
+ - Activation now lands closer to the retention target by biasing chunk selection to slightly overshoot rather than undershoot
17
+ - Added safeguards to prevent activation from consuming too much context (95% ceiling and 1000-token floor)
18
+ - When pending tokens exceed `blockAfter`, activation now aggressively reduces context to unblock the conversation
19
+ - `bufferActivation` now accepts absolute token values (>= 1000) in addition to ratios (0–1), giving more precise control over when activation triggers
20
+
21
+ - Observations no longer inflate token counts from degenerate LLM output. Runaway or repetitive observer/reflector output is automatically detected and retried, preventing excessive context usage after activation. ([#13354](https://github.com/mastra-ai/mastra/pull/13354))
22
+
23
+ - Updated dependencies [[`0d9efb4`](https://github.com/mastra-ai/mastra/commit/0d9efb47992c34aa90581c18b9f51f774f6252a5), [`7184d87`](https://github.com/mastra-ai/mastra/commit/7184d87c9237d26862f500ccfd0c9f9eadd38ddf), [`5caa13d`](https://github.com/mastra-ai/mastra/commit/5caa13d1b2a496e2565ab124a11de9a51ad3e3b9), [`940163f`](https://github.com/mastra-ai/mastra/commit/940163fc492401d7562301e6f106ccef4fefe06f), [`47892c8`](https://github.com/mastra-ai/mastra/commit/47892c85708eac348209f99f10f9a5f5267e11c0), [`45bb78b`](https://github.com/mastra-ai/mastra/commit/45bb78b70bd9db29678fe49476cd9f4ed01bfd0b), [`70eef84`](https://github.com/mastra-ai/mastra/commit/70eef84b8f44493598fdafa2980a0e7283415eda), [`d84e52d`](https://github.com/mastra-ai/mastra/commit/d84e52d0f6511283ddd21ed5fe7f945449d0f799), [`24b80af`](https://github.com/mastra-ai/mastra/commit/24b80af87da93bb84d389340181e17b7477fa9ca), [`608e156`](https://github.com/mastra-ai/mastra/commit/608e156def954c9604c5e3f6d9dfce3bcc7aeab0), [`2b2e157`](https://github.com/mastra-ai/mastra/commit/2b2e157a092cd597d9d3f0000d62b8bb4a7348ed), [`59d30b5`](https://github.com/mastra-ai/mastra/commit/59d30b5d0cb44ea7a1c440e7460dfb57eac9a9b5), [`453693b`](https://github.com/mastra-ai/mastra/commit/453693bf9e265ddccecef901d50da6caaea0fbc6), [`78d1c80`](https://github.com/mastra-ai/mastra/commit/78d1c808ad90201897a300af551bcc1d34458a20), [`c204b63`](https://github.com/mastra-ai/mastra/commit/c204b632d19e66acb6d6e19b11c4540dd6ad5380), [`742a417`](https://github.com/mastra-ai/mastra/commit/742a417896088220a3b5560c354c45c5ca6d88b9)]:
24
+ - @mastra/core@1.6.0
25
+ - @mastra/schema-compat@1.1.2
26
+
3
27
  ## 1.5.0-alpha.0
4
28
 
5
29
  ### Minor Changes
@@ -3,7 +3,7 @@ name: mastra-memory
3
3
  description: Documentation for @mastra/memory. Use when working with @mastra/memory APIs, configuration, or implementation.
4
4
  metadata:
5
5
  package: "@mastra/memory"
6
- version: "1.5.0-alpha.0"
6
+ version: "1.5.0"
7
7
  ---
8
8
 
9
9
  ## When to use
@@ -1,5 +1,5 @@
1
1
  {
2
- "version": "1.5.0-alpha.0",
2
+ "version": "1.5.0",
3
3
  "package": "@mastra/memory",
4
4
  "exports": {
5
5
  "OBSERVATIONAL_MEMORY_DEFAULTS": {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@mastra/memory",
3
- "version": "1.5.0-alpha.0",
3
+ "version": "1.5.0",
4
4
  "description": "",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",
@@ -41,7 +41,7 @@
41
41
  "json-schema": "^0.4.0",
42
42
  "lru-cache": "^11.2.6",
43
43
  "xxhash-wasm": "^1.1.0",
44
- "@mastra/schema-compat": "1.1.2-alpha.0"
44
+ "@mastra/schema-compat": "1.1.2"
45
45
  },
46
46
  "devDependencies": {
47
47
  "@ai-sdk/openai": "^1.3.24",
@@ -49,19 +49,19 @@
49
49
  "@types/json-schema": "^7.0.15",
50
50
  "@types/lru-cache": "^7.10.10",
51
51
  "@types/node": "22.19.7",
52
- "@vitest/coverage-v8": "4.0.12",
53
- "@vitest/ui": "4.0.12",
52
+ "@vitest/coverage-v8": "4.0.18",
53
+ "@vitest/ui": "4.0.18",
54
54
  "eslint": "^9.37.0",
55
55
  "tsup": "^8.5.1",
56
56
  "typescript": "^5.9.3",
57
57
  "typescript-eslint": "^8.51.0",
58
- "vitest": "4.0.16",
59
- "@internal/ai-sdk-v4": "0.0.7",
60
- "@internal/ai-sdk-v5": "0.0.7",
61
- "@internal/ai-v6": "0.0.7",
62
- "@internal/types-builder": "0.0.35",
63
- "@mastra/core": "1.6.0-alpha.0",
64
- "@internal/lint": "0.0.60"
58
+ "vitest": "4.0.18",
59
+ "@internal/ai-sdk-v4": "0.0.8",
60
+ "@internal/ai-sdk-v5": "0.0.8",
61
+ "@internal/ai-v6": "0.0.8",
62
+ "@internal/types-builder": "0.0.36",
63
+ "@internal/lint": "0.0.61",
64
+ "@mastra/core": "1.6.0"
65
65
  },
66
66
  "peerDependencies": {
67
67
  "@mastra/core": ">=1.4.1-0 <2.0.0-0",