@mastra/memory 1.0.0-beta.2 → 1.0.0-beta.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +44 -0
- package/dist/index.cjs +104 -23
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.ts +4 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +92 -24
- package/dist/index.js.map +1 -1
- package/dist/tools/working-memory.d.ts +8 -0
- package/dist/tools/working-memory.d.ts.map +1 -1
- package/package.json +7 -14
- package/dist/processors/index.cjs +0 -165
- package/dist/processors/index.cjs.map +0 -1
- package/dist/processors/index.d.ts +0 -3
- package/dist/processors/index.d.ts.map +0 -1
- package/dist/processors/index.js +0 -158
- package/dist/processors/index.js.map +0 -1
- package/dist/processors/token-limiter.d.ts +0 -32
- package/dist/processors/token-limiter.d.ts.map +0 -1
- package/dist/processors/tool-call-filter.d.ts +0 -20
- package/dist/processors/tool-call-filter.d.ts.map +0 -1
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,49 @@
|
|
|
1
1
|
# @mastra/memory
|
|
2
2
|
|
|
3
|
+
## 1.0.0-beta.4
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- Fix connection pool exhaustion when saving many messages with semantic recall enabled. Instead of calling vector.upsert() for each message individually (which acquires a separate DB connection), all embeddings are now batched into a single upsert call. ([#10954](https://github.com/mastra-ai/mastra/pull/10954))
|
|
8
|
+
|
|
9
|
+
- Updated dependencies [[`0d41fe2`](https://github.com/mastra-ai/mastra/commit/0d41fe245355dfc66d61a0d9c85d9400aac351ff), [`6b3ba91`](https://github.com/mastra-ai/mastra/commit/6b3ba91494cc10394df96782f349a4f7b1e152cc), [`7907fd1`](https://github.com/mastra-ai/mastra/commit/7907fd1c5059813b7b870b81ca71041dc807331b)]:
|
|
10
|
+
- @mastra/core@1.0.0-beta.8
|
|
11
|
+
|
|
12
|
+
## 1.0.0-beta.3
|
|
13
|
+
|
|
14
|
+
### Minor Changes
|
|
15
|
+
|
|
16
|
+
- Memory system now uses processors. Memory processors (`MessageHistory`, `SemanticRecall`, `WorkingMemory`) are now exported from `@mastra/memory/processors` and automatically added to the agent pipeline based on your memory config. Core processors (`ToolCallFilter`, `TokenLimiter`) remain in `@mastra/core/processors`. ([#9254](https://github.com/mastra-ai/mastra/pull/9254))
|
|
17
|
+
|
|
18
|
+
- Schema-based working memory now uses merge semantics instead of replace semantics. ([#10659](https://github.com/mastra-ai/mastra/pull/10659))
|
|
19
|
+
|
|
20
|
+
**Before:** Each working memory update replaced the entire memory, causing data loss across conversation turns.
|
|
21
|
+
|
|
22
|
+
**After:** For schema-based working memory:
|
|
23
|
+
- Object fields are deep merged (existing fields preserved, only provided fields updated)
|
|
24
|
+
- Set a field to `null` to delete it
|
|
25
|
+
- Arrays are replaced entirely when provided
|
|
26
|
+
|
|
27
|
+
Template-based (Markdown) working memory retains the existing replace semantics.
|
|
28
|
+
|
|
29
|
+
This fixes issue #7775 where users building profile-like schemas would lose information from previous turns.
|
|
30
|
+
|
|
31
|
+
### Patch Changes
|
|
32
|
+
|
|
33
|
+
- feat(storage): support querying messages from multiple threads ([#10663](https://github.com/mastra-ai/mastra/pull/10663))
|
|
34
|
+
- Fixed TypeScript errors where `threadId: string | string[]` was being passed to places expecting `Scalar` type
|
|
35
|
+
- Added proper multi-thread support for `listMessages` across all adapters when `threadId` is an array
|
|
36
|
+
- Updated `_getIncludedMessages` to look up message threadId by ID (since message IDs are globally unique)
|
|
37
|
+
- **upstash**: Added `msg-idx:{messageId}` index for O(1) message lookups (backwards compatible with fallback to scan for old messages, with automatic backfill)
|
|
38
|
+
|
|
39
|
+
- Fix recall() to return newest messages when using lastMessages config ([#10543](https://github.com/mastra-ai/mastra/pull/10543))
|
|
40
|
+
|
|
41
|
+
When using lastMessages: N config without an explicit orderBy, the recall() function was returning the OLDEST N messages instead of the NEWEST N messages. This completely breaks conversation history for any thread that grows beyond the lastMessages limit.
|
|
42
|
+
|
|
43
|
+
- Updated dependencies [[`ac0d2f4`](https://github.com/mastra-ai/mastra/commit/ac0d2f4ff8831f72c1c66c2be809706d17f65789), [`1a0d3fc`](https://github.com/mastra-ai/mastra/commit/1a0d3fc811482c9c376cdf79ee615c23bae9b2d6), [`85a628b`](https://github.com/mastra-ai/mastra/commit/85a628b1224a8f64cd82ea7f033774bf22df7a7e), [`c237233`](https://github.com/mastra-ai/mastra/commit/c23723399ccedf7f5744b3f40997b79246bfbe64), [`15f9e21`](https://github.com/mastra-ai/mastra/commit/15f9e216177201ea6e3f6d0bfb063fcc0953444f), [`ff94dea`](https://github.com/mastra-ai/mastra/commit/ff94dea935f4e34545c63bcb6c29804732698809), [`5b2ff46`](https://github.com/mastra-ai/mastra/commit/5b2ff4651df70c146523a7fca773f8eb0a2272f8), [`db41688`](https://github.com/mastra-ai/mastra/commit/db4168806d007417e2e60b4f68656dca4e5f40c9), [`5ca599d`](https://github.com/mastra-ai/mastra/commit/5ca599d0bb59a1595f19f58473fcd67cc71cef58), [`bff1145`](https://github.com/mastra-ai/mastra/commit/bff114556b3cbadad9b2768488708f8ad0e91475), [`5c8ca24`](https://github.com/mastra-ai/mastra/commit/5c8ca247094e0cc2cdbd7137822fb47241f86e77), [`e191844`](https://github.com/mastra-ai/mastra/commit/e1918444ca3f80e82feef1dad506cd4ec6e2875f), [`22553f1`](https://github.com/mastra-ai/mastra/commit/22553f11c63ee5e966a9c034a349822249584691), [`7237163`](https://github.com/mastra-ai/mastra/commit/72371635dbf96a87df4b073cc48fc655afbdce3d), [`2500740`](https://github.com/mastra-ai/mastra/commit/2500740ea23da067d6e50ec71c625ab3ce275e64), [`873ecbb`](https://github.com/mastra-ai/mastra/commit/873ecbb517586aa17d2f1e99283755b3ebb2863f), [`4f9bbe5`](https://github.com/mastra-ai/mastra/commit/4f9bbe5968f42c86f4930b8193de3c3c17e5bd36), [`02e51fe`](https://github.com/mastra-ai/mastra/commit/02e51feddb3d4155cfbcc42624fd0d0970d032c0), [`8f3fa3a`](https://github.com/mastra-ai/mastra/commit/8f3fa3a652bb77da092f913ec51ae46e3a7e27dc), [`cd29ad2`](https://github.com/mastra-ai/mastra/commit/cd29ad23a255534e8191f249593849ed29160886), [`bdf4d8c`](https://github.com/mastra-ai/mastra/commit/bdf4d8cdc656d8a2c21d81834bfa3bfa70f56c16), [`854e3da`](https://github.com/mastra-ai/mastra/commit/854e3dad5daac17a91a20986399d3a51f54bf68b), [`ce18d38`](https://github.com/mastra-ai/mastra/commit/ce18d38678c65870350d123955014a8432075fd9), [`cccf9c8`](https://github.com/mastra-ai/mastra/commit/cccf9c8b2d2dfc1a5e63919395b83d78c89682a0), [`61a5705`](https://github.com/mastra-ai/mastra/commit/61a570551278b6743e64243b3ce7d73de915ca8a), [`db70a48`](https://github.com/mastra-ai/mastra/commit/db70a48aeeeeb8e5f92007e8ede52c364ce15287), [`f0fdc14`](https://github.com/mastra-ai/mastra/commit/f0fdc14ee233d619266b3d2bbdeea7d25cfc6d13), [`db18bc9`](https://github.com/mastra-ai/mastra/commit/db18bc9c3825e2c1a0ad9a183cc9935f6691bfa1), [`9b37b56`](https://github.com/mastra-ai/mastra/commit/9b37b565e1f2a76c24f728945cc740c2b09be9da), [`41a23c3`](https://github.com/mastra-ai/mastra/commit/41a23c32f9877d71810f37e24930515df2ff7a0f), [`5d171ad`](https://github.com/mastra-ai/mastra/commit/5d171ad9ef340387276b77c2bb3e83e83332d729), [`f03ae60`](https://github.com/mastra-ai/mastra/commit/f03ae60500fe350c9d828621006cdafe1975fdd8), [`d1e74a0`](https://github.com/mastra-ai/mastra/commit/d1e74a0a293866dece31022047f5dbab65a304d0), [`39e7869`](https://github.com/mastra-ai/mastra/commit/39e7869bc7d0ee391077ce291474d8a84eedccff), [`5761926`](https://github.com/mastra-ai/mastra/commit/57619260c4a2cdd598763abbacd90de594c6bc76), [`c900fdd`](https://github.com/mastra-ai/mastra/commit/c900fdd504c41348efdffb205cfe80d48c38fa33), [`604a79f`](https://github.com/mastra-ai/mastra/commit/604a79fecf276e26a54a3fe01bb94e65315d2e0e), [`887f0b4`](https://github.com/mastra-ai/mastra/commit/887f0b4746cdbd7cb7d6b17ac9f82aeb58037ea5), [`2562143`](https://github.com/mastra-ai/mastra/commit/256214336b4faa78646c9c1776612393790d8784), [`ef11a61`](https://github.com/mastra-ai/mastra/commit/ef11a61920fa0ed08a5b7ceedd192875af119749)]:
|
|
44
|
+
- @mastra/core@1.0.0-beta.6
|
|
45
|
+
- @mastra/schema-compat@1.0.0-beta.2
|
|
46
|
+
|
|
3
47
|
## 1.0.0-beta.2
|
|
4
48
|
|
|
5
49
|
### Patch Changes
|
package/dist/index.cjs
CHANGED
|
@@ -11,12 +11,36 @@ var xxhash = require('xxhash-wasm');
|
|
|
11
11
|
var zod = require('zod');
|
|
12
12
|
var tools = require('@mastra/core/tools');
|
|
13
13
|
var schemaCompat = require('@mastra/schema-compat');
|
|
14
|
+
var processors = require('@mastra/core/processors');
|
|
14
15
|
|
|
15
16
|
function _interopDefault (e) { return e && e.__esModule ? e : { default: e }; }
|
|
16
17
|
|
|
17
18
|
var xxhash__default = /*#__PURE__*/_interopDefault(xxhash);
|
|
18
19
|
|
|
19
20
|
// src/index.ts
|
|
21
|
+
function deepMergeWorkingMemory(existing, update) {
|
|
22
|
+
if (!existing || typeof existing !== "object") {
|
|
23
|
+
return update;
|
|
24
|
+
}
|
|
25
|
+
const result = { ...existing };
|
|
26
|
+
for (const key of Object.keys(update)) {
|
|
27
|
+
const updateValue = update[key];
|
|
28
|
+
const existingValue = result[key];
|
|
29
|
+
if (updateValue === null) {
|
|
30
|
+
delete result[key];
|
|
31
|
+
} else if (Array.isArray(updateValue)) {
|
|
32
|
+
result[key] = updateValue;
|
|
33
|
+
} else if (typeof updateValue === "object" && updateValue !== null && typeof existingValue === "object" && existingValue !== null && !Array.isArray(existingValue)) {
|
|
34
|
+
result[key] = deepMergeWorkingMemory(
|
|
35
|
+
existingValue,
|
|
36
|
+
updateValue
|
|
37
|
+
);
|
|
38
|
+
} else {
|
|
39
|
+
result[key] = updateValue;
|
|
40
|
+
}
|
|
41
|
+
}
|
|
42
|
+
return result;
|
|
43
|
+
}
|
|
20
44
|
var updateWorkingMemoryTool = (memoryConfig) => {
|
|
21
45
|
const schema = memoryConfig?.workingMemory?.schema;
|
|
22
46
|
let inputSchema = zod.z.object({
|
|
@@ -29,9 +53,11 @@ var updateWorkingMemoryTool = (memoryConfig) => {
|
|
|
29
53
|
)
|
|
30
54
|
});
|
|
31
55
|
}
|
|
56
|
+
const usesMergeSemantics = Boolean(schema);
|
|
57
|
+
const description = schema ? `Update the working memory with new information. Data is merged with existing memory - you only need to include fields you want to add or update. Set a field to null to remove it. Arrays are replaced entirely when provided.` : `Update the working memory with new information. Any data not included will be overwritten. Always pass data as string to the memory field. Never pass an object.`;
|
|
32
58
|
return tools.createTool({
|
|
33
59
|
id: "update-working-memory",
|
|
34
|
-
description
|
|
60
|
+
description,
|
|
35
61
|
inputSchema,
|
|
36
62
|
execute: async (inputData, context) => {
|
|
37
63
|
const threadId = context?.agent?.threadId;
|
|
@@ -51,7 +77,39 @@ var updateWorkingMemoryTool = (memoryConfig) => {
|
|
|
51
77
|
if (thread.resourceId && thread.resourceId !== resourceId) {
|
|
52
78
|
throw new Error(`Thread with id ${threadId} resourceId does not match the current resourceId ${resourceId}`);
|
|
53
79
|
}
|
|
54
|
-
|
|
80
|
+
let workingMemory;
|
|
81
|
+
if (usesMergeSemantics) {
|
|
82
|
+
const existingRaw = await memory.getWorkingMemory({
|
|
83
|
+
threadId,
|
|
84
|
+
resourceId,
|
|
85
|
+
memoryConfig
|
|
86
|
+
});
|
|
87
|
+
let existingData = null;
|
|
88
|
+
if (existingRaw) {
|
|
89
|
+
try {
|
|
90
|
+
existingData = typeof existingRaw === "string" ? JSON.parse(existingRaw) : existingRaw;
|
|
91
|
+
} catch {
|
|
92
|
+
existingData = null;
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
let newData;
|
|
96
|
+
if (typeof inputData.memory === "string") {
|
|
97
|
+
try {
|
|
98
|
+
newData = JSON.parse(inputData.memory);
|
|
99
|
+
} catch (parseError) {
|
|
100
|
+
const errorMessage = parseError instanceof Error ? parseError.message : String(parseError);
|
|
101
|
+
throw new Error(
|
|
102
|
+
`Failed to parse working memory input as JSON: ${errorMessage}. Raw input: ${inputData.memory.length > 500 ? inputData.memory.slice(0, 500) + "..." : inputData.memory}`
|
|
103
|
+
);
|
|
104
|
+
}
|
|
105
|
+
} else {
|
|
106
|
+
newData = inputData.memory;
|
|
107
|
+
}
|
|
108
|
+
const mergedData = deepMergeWorkingMemory(existingData, newData);
|
|
109
|
+
workingMemory = JSON.stringify(mergedData);
|
|
110
|
+
} else {
|
|
111
|
+
workingMemory = typeof inputData.memory === "string" ? inputData.memory : JSON.stringify(inputData.memory);
|
|
112
|
+
}
|
|
55
113
|
await memory.updateWorkingMemory({
|
|
56
114
|
threadId,
|
|
57
115
|
resourceId,
|
|
@@ -123,8 +181,6 @@ var __experimental_updateWorkingMemoryToolVNext = (config) => {
|
|
|
123
181
|
}
|
|
124
182
|
});
|
|
125
183
|
};
|
|
126
|
-
|
|
127
|
-
// src/index.ts
|
|
128
184
|
var CHARS_PER_TOKEN = 4;
|
|
129
185
|
var DEFAULT_MESSAGE_RANGE = { before: 1, after: 1 };
|
|
130
186
|
var DEFAULT_TOP_K = 4;
|
|
@@ -174,12 +230,14 @@ var Memory = class extends memory.MastraMemory {
|
|
|
174
230
|
const config = this.getMergedThreadConfig(threadConfig || {});
|
|
175
231
|
if (resourceId) await this.validateThreadIsOwnedByResource(threadId, resourceId, config);
|
|
176
232
|
const perPage = perPageArg !== void 0 ? perPageArg : config.lastMessages;
|
|
233
|
+
const shouldGetNewestAndReverse = !orderBy && perPage !== false;
|
|
234
|
+
const effectiveOrderBy = shouldGetNewestAndReverse ? { field: "createdAt", direction: "DESC" } : orderBy;
|
|
177
235
|
const vectorResults = [];
|
|
178
236
|
this.logger.debug(`Memory recall() with:`, {
|
|
179
237
|
threadId,
|
|
180
238
|
perPage,
|
|
181
239
|
page,
|
|
182
|
-
orderBy,
|
|
240
|
+
orderBy: effectiveOrderBy,
|
|
183
241
|
threadConfig
|
|
184
242
|
});
|
|
185
243
|
this.checkStorageFeatureSupport(config);
|
|
@@ -228,7 +286,7 @@ var Memory = class extends memory.MastraMemory {
|
|
|
228
286
|
resourceId,
|
|
229
287
|
perPage,
|
|
230
288
|
page,
|
|
231
|
-
orderBy,
|
|
289
|
+
orderBy: effectiveOrderBy,
|
|
232
290
|
filter,
|
|
233
291
|
...vectorResults?.length ? {
|
|
234
292
|
include: vectorResults.map((r) => ({
|
|
@@ -239,7 +297,7 @@ var Memory = class extends memory.MastraMemory {
|
|
|
239
297
|
}))
|
|
240
298
|
} : {}
|
|
241
299
|
});
|
|
242
|
-
const rawMessages = paginatedResult.messages;
|
|
300
|
+
const rawMessages = shouldGetNewestAndReverse ? paginatedResult.messages.reverse() : paginatedResult.messages;
|
|
243
301
|
const list = new agent.MessageList({ threadId, resourceId }).add(rawMessages, "memory");
|
|
244
302
|
const messages = list.get.all.db();
|
|
245
303
|
return { messages };
|
|
@@ -494,7 +552,8 @@ ${workingMemory}`;
|
|
|
494
552
|
messages: dbMessages
|
|
495
553
|
});
|
|
496
554
|
if (this.vector && config.semanticRecall) {
|
|
497
|
-
|
|
555
|
+
const embeddingData = [];
|
|
556
|
+
let dimension;
|
|
498
557
|
await Promise.all(
|
|
499
558
|
updatedMessages.map(async (message) => {
|
|
500
559
|
let textForEmbedding = null;
|
|
@@ -505,19 +564,11 @@ ${workingMemory}`;
|
|
|
505
564
|
if (joined) textForEmbedding = joined;
|
|
506
565
|
}
|
|
507
566
|
if (!textForEmbedding) return;
|
|
508
|
-
const
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
throw new Error(
|
|
514
|
-
`Tried to upsert embeddings to index ${indexName} but this Memory instance doesn't have an attached vector db.`
|
|
515
|
-
);
|
|
516
|
-
}
|
|
517
|
-
await this.vector.upsert({
|
|
518
|
-
indexName: await indexName,
|
|
519
|
-
vectors: embeddings,
|
|
520
|
-
metadata: chunks.map(() => ({
|
|
567
|
+
const result2 = await this.embedMessageContent(textForEmbedding);
|
|
568
|
+
dimension = result2.dimension;
|
|
569
|
+
embeddingData.push({
|
|
570
|
+
embeddings: result2.embeddings,
|
|
571
|
+
metadata: result2.chunks.map(() => ({
|
|
521
572
|
message_id: message.id,
|
|
522
573
|
thread_id: message.threadId,
|
|
523
574
|
resource_id: message.resourceId
|
|
@@ -525,6 +576,23 @@ ${workingMemory}`;
|
|
|
525
576
|
});
|
|
526
577
|
})
|
|
527
578
|
);
|
|
579
|
+
if (embeddingData.length > 0 && dimension !== void 0) {
|
|
580
|
+
if (typeof this.vector === `undefined`) {
|
|
581
|
+
throw new Error(`Tried to upsert embeddings but this Memory instance doesn't have an attached vector db.`);
|
|
582
|
+
}
|
|
583
|
+
const { indexName } = await this.createEmbeddingIndex(dimension, config);
|
|
584
|
+
const allVectors = [];
|
|
585
|
+
const allMetadata = [];
|
|
586
|
+
for (const data of embeddingData) {
|
|
587
|
+
allVectors.push(...data.embeddings);
|
|
588
|
+
allMetadata.push(...data.metadata);
|
|
589
|
+
}
|
|
590
|
+
await this.vector.upsert({
|
|
591
|
+
indexName,
|
|
592
|
+
vectors: allVectors,
|
|
593
|
+
metadata: allMetadata
|
|
594
|
+
});
|
|
595
|
+
}
|
|
528
596
|
}
|
|
529
597
|
return result;
|
|
530
598
|
}
|
|
@@ -630,7 +698,7 @@ ${workingMemory}`;
|
|
|
630
698
|
async getWorkingMemoryTemplate({
|
|
631
699
|
memoryConfig
|
|
632
700
|
}) {
|
|
633
|
-
const config = this.getMergedThreadConfig(memoryConfig
|
|
701
|
+
const config = this.getMergedThreadConfig(memoryConfig);
|
|
634
702
|
if (!config.workingMemory?.enabled) {
|
|
635
703
|
return null;
|
|
636
704
|
}
|
|
@@ -661,7 +729,7 @@ ${workingMemory}`;
|
|
|
661
729
|
if (!config.workingMemory?.enabled) {
|
|
662
730
|
return null;
|
|
663
731
|
}
|
|
664
|
-
const workingMemoryTemplate = await this.getWorkingMemoryTemplate({ memoryConfig
|
|
732
|
+
const workingMemoryTemplate = await this.getWorkingMemoryTemplate({ memoryConfig });
|
|
665
733
|
const workingMemoryData = await this.getWorkingMemory({ threadId, resourceId, memoryConfig: config });
|
|
666
734
|
if (!workingMemoryTemplate) {
|
|
667
735
|
return null;
|
|
@@ -818,6 +886,19 @@ ${template.content !== this.defaultWorkingMemoryTemplate ? `- Only store informa
|
|
|
818
886
|
}
|
|
819
887
|
};
|
|
820
888
|
|
|
889
|
+
Object.defineProperty(exports, "MessageHistory", {
|
|
890
|
+
enumerable: true,
|
|
891
|
+
get: function () { return processors.MessageHistory; }
|
|
892
|
+
});
|
|
893
|
+
Object.defineProperty(exports, "SemanticRecall", {
|
|
894
|
+
enumerable: true,
|
|
895
|
+
get: function () { return processors.SemanticRecall; }
|
|
896
|
+
});
|
|
897
|
+
Object.defineProperty(exports, "WorkingMemory", {
|
|
898
|
+
enumerable: true,
|
|
899
|
+
get: function () { return processors.WorkingMemory; }
|
|
900
|
+
});
|
|
821
901
|
exports.Memory = Memory;
|
|
902
|
+
exports.deepMergeWorkingMemory = deepMergeWorkingMemory;
|
|
822
903
|
//# sourceMappingURL=index.cjs.map
|
|
823
904
|
//# sourceMappingURL=index.cjs.map
|