@mastra/memory 0.10.2-alpha.1 → 0.10.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.turbo/turbo-build.log +2 -2
- package/CHANGELOG.md +35 -0
- package/dist/_tsup-dts-rollup.d.cts +16 -8
- package/dist/_tsup-dts-rollup.d.ts +16 -8
- package/dist/index.cjs +16 -7
- package/dist/index.js +16 -7
- package/package.json +4 -4
- package/src/index.ts +33 -12
- package/src/processors/index.test.ts +22 -22
package/.turbo/turbo-build.log
CHANGED
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
|
|
2
|
-
> @mastra/memory@0.10.2-alpha.
|
|
2
|
+
> @mastra/memory@0.10.2-alpha.2 build /home/runner/work/mastra/mastra/packages/memory
|
|
3
3
|
> pnpm run check && tsup --silent src/index.ts src/processors/index.ts --format esm,cjs --experimental-dts --clean --treeshake=smallest --splitting
|
|
4
4
|
|
|
5
5
|
|
|
6
|
-
> @mastra/memory@0.10.2-alpha.
|
|
6
|
+
> @mastra/memory@0.10.2-alpha.2 check /home/runner/work/mastra/mastra/packages/memory
|
|
7
7
|
> tsc --noEmit
|
|
8
8
|
|
|
9
9
|
Analysis will use the bundled TypeScript version 5.8.3
|
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,40 @@
|
|
|
1
1
|
# @mastra/memory
|
|
2
2
|
|
|
3
|
+
## 0.10.2
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- e5dc18d: Added a backwards compatible layer to begin storing/retrieving UIMessages in storage instead of CoreMessages
|
|
8
|
+
- c5bf1ce: Add backwards compat code for new MessageList in storage
|
|
9
|
+
- f0d559f: Fix peerdeps for alpha channel
|
|
10
|
+
- Updated dependencies [ee77e78]
|
|
11
|
+
- Updated dependencies [592a2db]
|
|
12
|
+
- Updated dependencies [e5dc18d]
|
|
13
|
+
- Updated dependencies [ab5adbe]
|
|
14
|
+
- Updated dependencies [1e8bb40]
|
|
15
|
+
- Updated dependencies [1b5fc55]
|
|
16
|
+
- Updated dependencies [195c428]
|
|
17
|
+
- Updated dependencies [f73e11b]
|
|
18
|
+
- Updated dependencies [37643b8]
|
|
19
|
+
- Updated dependencies [99fd6cf]
|
|
20
|
+
- Updated dependencies [c5bf1ce]
|
|
21
|
+
- Updated dependencies [add596e]
|
|
22
|
+
- Updated dependencies [8dc94d8]
|
|
23
|
+
- Updated dependencies [ecebbeb]
|
|
24
|
+
- Updated dependencies [79d5145]
|
|
25
|
+
- Updated dependencies [12b7002]
|
|
26
|
+
- Updated dependencies [2901125]
|
|
27
|
+
- @mastra/core@0.10.2
|
|
28
|
+
|
|
29
|
+
## 0.10.2-alpha.2
|
|
30
|
+
|
|
31
|
+
### Patch Changes
|
|
32
|
+
|
|
33
|
+
- c5bf1ce: Add backwards compat code for new MessageList in storage
|
|
34
|
+
- Updated dependencies [c5bf1ce]
|
|
35
|
+
- Updated dependencies [12b7002]
|
|
36
|
+
- @mastra/core@0.10.2-alpha.4
|
|
37
|
+
|
|
3
38
|
## 0.10.2-alpha.1
|
|
4
39
|
|
|
5
40
|
### Patch Changes
|
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
import type { CoreMessage } from '
|
|
1
|
+
import type { CoreMessage } from 'ai';
|
|
2
|
+
import type { CoreMessage as CoreMessage_2 } from '@mastra/core';
|
|
2
3
|
import type { CoreTool } from '@mastra/core';
|
|
3
4
|
import { MastraMemory } from '@mastra/core/memory';
|
|
4
5
|
import type { MastraMessageV1 } from '@mastra/core';
|
|
@@ -23,8 +24,9 @@ export declare class Memory extends MastraMemory {
|
|
|
23
24
|
query({ threadId, resourceId, selectBy, threadConfig, }: StorageGetMessagesArg & {
|
|
24
25
|
threadConfig?: MemoryConfig;
|
|
25
26
|
}): Promise<{
|
|
26
|
-
messages:
|
|
27
|
+
messages: CoreMessage[];
|
|
27
28
|
uiMessages: UIMessage[];
|
|
29
|
+
messagesV2: MastraMessageV2[];
|
|
28
30
|
}>;
|
|
29
31
|
rememberMessages({ threadId, resourceId, vectorMessageSearch, config, }: {
|
|
30
32
|
threadId: string;
|
|
@@ -56,9 +58,15 @@ export declare class Memory extends MastraMemory {
|
|
|
56
58
|
private embeddingCache;
|
|
57
59
|
private firstEmbed;
|
|
58
60
|
private embedMessageContent;
|
|
59
|
-
saveMessages(
|
|
60
|
-
messages: (MastraMessageV1 | MastraMessageV2)[];
|
|
61
|
-
memoryConfig?: MemoryConfig;
|
|
61
|
+
saveMessages(args: {
|
|
62
|
+
messages: (MastraMessageV1 | MastraMessageV2)[] | MastraMessageV1[] | MastraMessageV2[];
|
|
63
|
+
memoryConfig?: MemoryConfig | undefined;
|
|
64
|
+
format?: 'v1';
|
|
65
|
+
}): Promise<MastraMessageV1[]>;
|
|
66
|
+
saveMessages(args: {
|
|
67
|
+
messages: (MastraMessageV1 | MastraMessageV2)[] | MastraMessageV1[] | MastraMessageV2[];
|
|
68
|
+
memoryConfig?: MemoryConfig | undefined;
|
|
69
|
+
format: 'v2';
|
|
62
70
|
}): Promise<MastraMessageV2[]>;
|
|
63
71
|
protected updateMessageToHideWorkingMemory(message: MastraMessageV1): MastraMessageV1 | null;
|
|
64
72
|
protected updateMessageToHideWorkingMemoryV2(message: MastraMessageV2): MastraMessageV2 | null;
|
|
@@ -90,8 +98,8 @@ declare class TokenLimiter extends MemoryProcessor {
|
|
|
90
98
|
* @param options Either a number (token limit) or a configuration object
|
|
91
99
|
*/
|
|
92
100
|
constructor(options: number | TokenLimiterOptions);
|
|
93
|
-
process(messages:
|
|
94
|
-
countTokens(message: string |
|
|
101
|
+
process(messages: CoreMessage_2[], { systemMessage, memorySystemMessage, newMessages }?: MemoryProcessorOpts): CoreMessage_2[];
|
|
102
|
+
countTokens(message: string | CoreMessage_2): number;
|
|
95
103
|
}
|
|
96
104
|
export { TokenLimiter }
|
|
97
105
|
export { TokenLimiter as TokenLimiter_alias_1 }
|
|
@@ -121,7 +129,7 @@ declare class ToolCallFilter extends MemoryProcessor_2 {
|
|
|
121
129
|
constructor(options?: {
|
|
122
130
|
exclude?: string[];
|
|
123
131
|
});
|
|
124
|
-
process(messages:
|
|
132
|
+
process(messages: CoreMessage_2[]): CoreMessage_2[];
|
|
125
133
|
}
|
|
126
134
|
export { ToolCallFilter }
|
|
127
135
|
export { ToolCallFilter as ToolCallFilter_alias_1 }
|
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
import type { CoreMessage } from '
|
|
1
|
+
import type { CoreMessage } from 'ai';
|
|
2
|
+
import type { CoreMessage as CoreMessage_2 } from '@mastra/core';
|
|
2
3
|
import type { CoreTool } from '@mastra/core';
|
|
3
4
|
import { MastraMemory } from '@mastra/core/memory';
|
|
4
5
|
import type { MastraMessageV1 } from '@mastra/core';
|
|
@@ -23,8 +24,9 @@ export declare class Memory extends MastraMemory {
|
|
|
23
24
|
query({ threadId, resourceId, selectBy, threadConfig, }: StorageGetMessagesArg & {
|
|
24
25
|
threadConfig?: MemoryConfig;
|
|
25
26
|
}): Promise<{
|
|
26
|
-
messages:
|
|
27
|
+
messages: CoreMessage[];
|
|
27
28
|
uiMessages: UIMessage[];
|
|
29
|
+
messagesV2: MastraMessageV2[];
|
|
28
30
|
}>;
|
|
29
31
|
rememberMessages({ threadId, resourceId, vectorMessageSearch, config, }: {
|
|
30
32
|
threadId: string;
|
|
@@ -56,9 +58,15 @@ export declare class Memory extends MastraMemory {
|
|
|
56
58
|
private embeddingCache;
|
|
57
59
|
private firstEmbed;
|
|
58
60
|
private embedMessageContent;
|
|
59
|
-
saveMessages(
|
|
60
|
-
messages: (MastraMessageV1 | MastraMessageV2)[];
|
|
61
|
-
memoryConfig?: MemoryConfig;
|
|
61
|
+
saveMessages(args: {
|
|
62
|
+
messages: (MastraMessageV1 | MastraMessageV2)[] | MastraMessageV1[] | MastraMessageV2[];
|
|
63
|
+
memoryConfig?: MemoryConfig | undefined;
|
|
64
|
+
format?: 'v1';
|
|
65
|
+
}): Promise<MastraMessageV1[]>;
|
|
66
|
+
saveMessages(args: {
|
|
67
|
+
messages: (MastraMessageV1 | MastraMessageV2)[] | MastraMessageV1[] | MastraMessageV2[];
|
|
68
|
+
memoryConfig?: MemoryConfig | undefined;
|
|
69
|
+
format: 'v2';
|
|
62
70
|
}): Promise<MastraMessageV2[]>;
|
|
63
71
|
protected updateMessageToHideWorkingMemory(message: MastraMessageV1): MastraMessageV1 | null;
|
|
64
72
|
protected updateMessageToHideWorkingMemoryV2(message: MastraMessageV2): MastraMessageV2 | null;
|
|
@@ -90,8 +98,8 @@ declare class TokenLimiter extends MemoryProcessor {
|
|
|
90
98
|
* @param options Either a number (token limit) or a configuration object
|
|
91
99
|
*/
|
|
92
100
|
constructor(options: number | TokenLimiterOptions);
|
|
93
|
-
process(messages:
|
|
94
|
-
countTokens(message: string |
|
|
101
|
+
process(messages: CoreMessage_2[], { systemMessage, memorySystemMessage, newMessages }?: MemoryProcessorOpts): CoreMessage_2[];
|
|
102
|
+
countTokens(message: string | CoreMessage_2): number;
|
|
95
103
|
}
|
|
96
104
|
export { TokenLimiter }
|
|
97
105
|
export { TokenLimiter as TokenLimiter_alias_1 }
|
|
@@ -121,7 +129,7 @@ declare class ToolCallFilter extends MemoryProcessor_2 {
|
|
|
121
129
|
constructor(options?: {
|
|
122
130
|
exclude?: string[];
|
|
123
131
|
});
|
|
124
|
-
process(messages:
|
|
132
|
+
process(messages: CoreMessage_2[]): CoreMessage_2[];
|
|
125
133
|
}
|
|
126
134
|
export { ToolCallFilter }
|
|
127
135
|
export { ToolCallFilter as ToolCallFilter_alias_1 }
|
package/dist/index.cjs
CHANGED
|
@@ -116,6 +116,7 @@ var Memory = class extends memory.MastraMemory {
|
|
|
116
116
|
}
|
|
117
117
|
const rawMessages = await this.storage.getMessages({
|
|
118
118
|
threadId,
|
|
119
|
+
format: "v2",
|
|
119
120
|
selectBy: {
|
|
120
121
|
...selectBy,
|
|
121
122
|
...vectorResults?.length ? {
|
|
@@ -128,8 +129,7 @@ var Memory = class extends memory.MastraMemory {
|
|
|
128
129
|
},
|
|
129
130
|
threadConfig: config
|
|
130
131
|
});
|
|
131
|
-
const
|
|
132
|
-
const list = new agent.MessageList({ threadId, resourceId }).add(orderedByDate, "memory");
|
|
132
|
+
const list = new agent.MessageList({ threadId, resourceId }).add(rawMessages, "memory");
|
|
133
133
|
return {
|
|
134
134
|
get messages() {
|
|
135
135
|
const v1Messages = list.get.all.v1();
|
|
@@ -140,6 +140,9 @@ var Memory = class extends memory.MastraMemory {
|
|
|
140
140
|
},
|
|
141
141
|
get uiMessages() {
|
|
142
142
|
return list.get.all.ui();
|
|
143
|
+
},
|
|
144
|
+
get messagesV2() {
|
|
145
|
+
return list.get.all.v2();
|
|
143
146
|
}
|
|
144
147
|
};
|
|
145
148
|
}
|
|
@@ -163,11 +166,12 @@ var Memory = class extends memory.MastraMemory {
|
|
|
163
166
|
last: threadConfig.lastMessages,
|
|
164
167
|
vectorSearchString: threadConfig.semanticRecall && vectorMessageSearch ? vectorMessageSearch : void 0
|
|
165
168
|
},
|
|
166
|
-
threadConfig: config
|
|
169
|
+
threadConfig: config,
|
|
170
|
+
format: "v2"
|
|
167
171
|
});
|
|
168
|
-
const list = new agent.MessageList({ threadId, resourceId }).add(messagesResult.
|
|
172
|
+
const list = new agent.MessageList({ threadId, resourceId }).add(messagesResult.messagesV2, "memory");
|
|
169
173
|
this.logger.debug(`Remembered message history includes ${messagesResult.messages.length} messages.`);
|
|
170
|
-
return { messages: list.get.all.v1(), messagesV2: list.get.all.
|
|
174
|
+
return { messages: list.get.all.v1(), messagesV2: list.get.all.v2() };
|
|
171
175
|
}
|
|
172
176
|
async getThreadById({ threadId }) {
|
|
173
177
|
return this.storage.getThreadById({ threadId });
|
|
@@ -257,7 +261,8 @@ var Memory = class extends memory.MastraMemory {
|
|
|
257
261
|
}
|
|
258
262
|
async saveMessages({
|
|
259
263
|
messages,
|
|
260
|
-
memoryConfig
|
|
264
|
+
memoryConfig,
|
|
265
|
+
format = `v1`
|
|
261
266
|
}) {
|
|
262
267
|
const updatedMessages = messages.map((m) => {
|
|
263
268
|
if (agent.MessageList.isMastraMessageV1(m)) {
|
|
@@ -267,7 +272,10 @@ var Memory = class extends memory.MastraMemory {
|
|
|
267
272
|
return this.updateMessageToHideWorkingMemoryV2(m);
|
|
268
273
|
}).filter((m) => Boolean(m));
|
|
269
274
|
const config = this.getMergedThreadConfig(memoryConfig);
|
|
270
|
-
const result = this.storage.saveMessages({
|
|
275
|
+
const result = this.storage.saveMessages({
|
|
276
|
+
messages: new agent.MessageList().add(updatedMessages, "memory").get.all.v2(),
|
|
277
|
+
format: "v2"
|
|
278
|
+
});
|
|
271
279
|
if (this.vector && config.semanticRecall) {
|
|
272
280
|
let indexName;
|
|
273
281
|
await Promise.all(
|
|
@@ -310,6 +318,7 @@ var Memory = class extends memory.MastraMemory {
|
|
|
310
318
|
})
|
|
311
319
|
);
|
|
312
320
|
}
|
|
321
|
+
if (format === `v1`) return new agent.MessageList().add(await result, "memory").get.all.v1();
|
|
313
322
|
return result;
|
|
314
323
|
}
|
|
315
324
|
updateMessageToHideWorkingMemory(message) {
|
package/dist/index.js
CHANGED
|
@@ -110,6 +110,7 @@ var Memory = class extends MastraMemory {
|
|
|
110
110
|
}
|
|
111
111
|
const rawMessages = await this.storage.getMessages({
|
|
112
112
|
threadId,
|
|
113
|
+
format: "v2",
|
|
113
114
|
selectBy: {
|
|
114
115
|
...selectBy,
|
|
115
116
|
...vectorResults?.length ? {
|
|
@@ -122,8 +123,7 @@ var Memory = class extends MastraMemory {
|
|
|
122
123
|
},
|
|
123
124
|
threadConfig: config
|
|
124
125
|
});
|
|
125
|
-
const
|
|
126
|
-
const list = new MessageList({ threadId, resourceId }).add(orderedByDate, "memory");
|
|
126
|
+
const list = new MessageList({ threadId, resourceId }).add(rawMessages, "memory");
|
|
127
127
|
return {
|
|
128
128
|
get messages() {
|
|
129
129
|
const v1Messages = list.get.all.v1();
|
|
@@ -134,6 +134,9 @@ var Memory = class extends MastraMemory {
|
|
|
134
134
|
},
|
|
135
135
|
get uiMessages() {
|
|
136
136
|
return list.get.all.ui();
|
|
137
|
+
},
|
|
138
|
+
get messagesV2() {
|
|
139
|
+
return list.get.all.v2();
|
|
137
140
|
}
|
|
138
141
|
};
|
|
139
142
|
}
|
|
@@ -157,11 +160,12 @@ var Memory = class extends MastraMemory {
|
|
|
157
160
|
last: threadConfig.lastMessages,
|
|
158
161
|
vectorSearchString: threadConfig.semanticRecall && vectorMessageSearch ? vectorMessageSearch : void 0
|
|
159
162
|
},
|
|
160
|
-
threadConfig: config
|
|
163
|
+
threadConfig: config,
|
|
164
|
+
format: "v2"
|
|
161
165
|
});
|
|
162
|
-
const list = new MessageList({ threadId, resourceId }).add(messagesResult.
|
|
166
|
+
const list = new MessageList({ threadId, resourceId }).add(messagesResult.messagesV2, "memory");
|
|
163
167
|
this.logger.debug(`Remembered message history includes ${messagesResult.messages.length} messages.`);
|
|
164
|
-
return { messages: list.get.all.v1(), messagesV2: list.get.all.
|
|
168
|
+
return { messages: list.get.all.v1(), messagesV2: list.get.all.v2() };
|
|
165
169
|
}
|
|
166
170
|
async getThreadById({ threadId }) {
|
|
167
171
|
return this.storage.getThreadById({ threadId });
|
|
@@ -251,7 +255,8 @@ var Memory = class extends MastraMemory {
|
|
|
251
255
|
}
|
|
252
256
|
async saveMessages({
|
|
253
257
|
messages,
|
|
254
|
-
memoryConfig
|
|
258
|
+
memoryConfig,
|
|
259
|
+
format = `v1`
|
|
255
260
|
}) {
|
|
256
261
|
const updatedMessages = messages.map((m) => {
|
|
257
262
|
if (MessageList.isMastraMessageV1(m)) {
|
|
@@ -261,7 +266,10 @@ var Memory = class extends MastraMemory {
|
|
|
261
266
|
return this.updateMessageToHideWorkingMemoryV2(m);
|
|
262
267
|
}).filter((m) => Boolean(m));
|
|
263
268
|
const config = this.getMergedThreadConfig(memoryConfig);
|
|
264
|
-
const result = this.storage.saveMessages({
|
|
269
|
+
const result = this.storage.saveMessages({
|
|
270
|
+
messages: new MessageList().add(updatedMessages, "memory").get.all.v2(),
|
|
271
|
+
format: "v2"
|
|
272
|
+
});
|
|
265
273
|
if (this.vector && config.semanticRecall) {
|
|
266
274
|
let indexName;
|
|
267
275
|
await Promise.all(
|
|
@@ -304,6 +312,7 @@ var Memory = class extends MastraMemory {
|
|
|
304
312
|
})
|
|
305
313
|
);
|
|
306
314
|
}
|
|
315
|
+
if (format === `v1`) return new MessageList().add(await result, "memory").get.all.v1();
|
|
307
316
|
return result;
|
|
308
317
|
}
|
|
309
318
|
updateMessageToHideWorkingMemory(message) {
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@mastra/memory",
|
|
3
|
-
"version": "0.10.2
|
|
3
|
+
"version": "0.10.2",
|
|
4
4
|
"description": "",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "./dist/index.js",
|
|
@@ -52,11 +52,11 @@
|
|
|
52
52
|
"typescript": "^5.8.2",
|
|
53
53
|
"typescript-eslint": "^8.26.1",
|
|
54
54
|
"vitest": "^3.1.2",
|
|
55
|
-
"@internal/lint": "0.0.
|
|
56
|
-
"@mastra/core": "0.10.2
|
|
55
|
+
"@internal/lint": "0.0.8",
|
|
56
|
+
"@mastra/core": "0.10.2"
|
|
57
57
|
},
|
|
58
58
|
"peerDependencies": {
|
|
59
|
-
"@mastra/core": "^0.10.
|
|
59
|
+
"@mastra/core": "^0.10.2-alpha.0"
|
|
60
60
|
},
|
|
61
61
|
"scripts": {
|
|
62
62
|
"check": "tsc --noEmit",
|
package/src/index.ts
CHANGED
|
@@ -6,7 +6,7 @@ import { MastraMemory } from '@mastra/core/memory';
|
|
|
6
6
|
import type { MemoryConfig, SharedMemoryConfig, StorageThreadType } from '@mastra/core/memory';
|
|
7
7
|
import type { StorageGetMessagesArg } from '@mastra/core/storage';
|
|
8
8
|
import { embedMany } from 'ai';
|
|
9
|
-
import type { TextPart, UIMessage } from 'ai';
|
|
9
|
+
import type { CoreMessage, TextPart, UIMessage } from 'ai';
|
|
10
10
|
|
|
11
11
|
import xxhash from 'xxhash-wasm';
|
|
12
12
|
import { updateWorkingMemoryTool } from './tools/working-memory';
|
|
@@ -56,7 +56,7 @@ export class Memory extends MastraMemory {
|
|
|
56
56
|
threadConfig,
|
|
57
57
|
}: StorageGetMessagesArg & {
|
|
58
58
|
threadConfig?: MemoryConfig;
|
|
59
|
-
}): Promise<{ messages:
|
|
59
|
+
}): Promise<{ messages: CoreMessage[]; uiMessages: UIMessage[]; messagesV2: MastraMessageV2[] }> {
|
|
60
60
|
if (resourceId) await this.validateThreadIsOwnedByResource(threadId, resourceId);
|
|
61
61
|
|
|
62
62
|
const vectorResults: {
|
|
@@ -117,6 +117,7 @@ export class Memory extends MastraMemory {
|
|
|
117
117
|
// Get raw messages from storage
|
|
118
118
|
const rawMessages = await this.storage.getMessages({
|
|
119
119
|
threadId,
|
|
120
|
+
format: 'v2',
|
|
120
121
|
selectBy: {
|
|
121
122
|
...selectBy,
|
|
122
123
|
...(vectorResults?.length
|
|
@@ -138,9 +139,7 @@ export class Memory extends MastraMemory {
|
|
|
138
139
|
threadConfig: config,
|
|
139
140
|
});
|
|
140
141
|
|
|
141
|
-
const
|
|
142
|
-
|
|
143
|
-
const list = new MessageList({ threadId, resourceId }).add(orderedByDate, 'memory');
|
|
142
|
+
const list = new MessageList({ threadId, resourceId }).add(rawMessages, 'memory');
|
|
144
143
|
return {
|
|
145
144
|
get messages() {
|
|
146
145
|
// returning v1 messages for backwards compat! v1 messages were CoreMessages stored in the db.
|
|
@@ -152,13 +151,18 @@ export class Memory extends MastraMemory {
|
|
|
152
151
|
if (selectBy?.last && v1Messages.length > selectBy.last) {
|
|
153
152
|
// ex: 23 (v1 messages) minus 20 (selectBy.last messages)
|
|
154
153
|
// means we will start from index 3 and keep all the later newer messages from index 3 til the end of the array
|
|
155
|
-
return v1Messages.slice(v1Messages.length - selectBy.last);
|
|
154
|
+
return v1Messages.slice(v1Messages.length - selectBy.last) as CoreMessage[];
|
|
156
155
|
}
|
|
157
|
-
|
|
156
|
+
// TODO: this is absolutely wrong but became apparent that this is what we were doing before adding MessageList. Our public types said CoreMessage but we were returning MessageType which is equivalent to MastraMessageV1
|
|
157
|
+
// In a breaking change we should make this the type it actually is.
|
|
158
|
+
return v1Messages as CoreMessage[];
|
|
158
159
|
},
|
|
159
160
|
get uiMessages() {
|
|
160
161
|
return list.get.all.ui();
|
|
161
162
|
},
|
|
163
|
+
get messagesV2() {
|
|
164
|
+
return list.get.all.v2();
|
|
165
|
+
},
|
|
162
166
|
};
|
|
163
167
|
}
|
|
164
168
|
|
|
@@ -190,12 +194,13 @@ export class Memory extends MastraMemory {
|
|
|
190
194
|
vectorSearchString: threadConfig.semanticRecall && vectorMessageSearch ? vectorMessageSearch : undefined,
|
|
191
195
|
},
|
|
192
196
|
threadConfig: config,
|
|
197
|
+
format: 'v2',
|
|
193
198
|
});
|
|
194
199
|
// Using MessageList here just to convert mixed input messages to single type output messages
|
|
195
|
-
const list = new MessageList({ threadId, resourceId }).add(messagesResult.
|
|
200
|
+
const list = new MessageList({ threadId, resourceId }).add(messagesResult.messagesV2, 'memory');
|
|
196
201
|
|
|
197
202
|
this.logger.debug(`Remembered message history includes ${messagesResult.messages.length} messages.`);
|
|
198
|
-
return { messages: list.get.all.v1(), messagesV2: list.get.all.
|
|
203
|
+
return { messages: list.get.all.v1(), messagesV2: list.get.all.v2() };
|
|
199
204
|
}
|
|
200
205
|
|
|
201
206
|
async getThreadById({ threadId }: { threadId: string }): Promise<StorageThreadType | null> {
|
|
@@ -326,13 +331,25 @@ export class Memory extends MastraMemory {
|
|
|
326
331
|
return result;
|
|
327
332
|
}
|
|
328
333
|
|
|
334
|
+
async saveMessages(args: {
|
|
335
|
+
messages: (MastraMessageV1 | MastraMessageV2)[] | MastraMessageV1[] | MastraMessageV2[];
|
|
336
|
+
memoryConfig?: MemoryConfig | undefined;
|
|
337
|
+
format?: 'v1';
|
|
338
|
+
}): Promise<MastraMessageV1[]>;
|
|
339
|
+
async saveMessages(args: {
|
|
340
|
+
messages: (MastraMessageV1 | MastraMessageV2)[] | MastraMessageV1[] | MastraMessageV2[];
|
|
341
|
+
memoryConfig?: MemoryConfig | undefined;
|
|
342
|
+
format: 'v2';
|
|
343
|
+
}): Promise<MastraMessageV2[]>;
|
|
329
344
|
async saveMessages({
|
|
330
345
|
messages,
|
|
331
346
|
memoryConfig,
|
|
347
|
+
format = `v1`,
|
|
332
348
|
}: {
|
|
333
349
|
messages: (MastraMessageV1 | MastraMessageV2)[];
|
|
334
|
-
memoryConfig?: MemoryConfig;
|
|
335
|
-
|
|
350
|
+
memoryConfig?: MemoryConfig | undefined;
|
|
351
|
+
format?: 'v1' | 'v2';
|
|
352
|
+
}): Promise<MastraMessageV2[] | MastraMessageV1[]> {
|
|
336
353
|
// Then strip working memory tags from all messages
|
|
337
354
|
const updatedMessages = messages
|
|
338
355
|
.map(m => {
|
|
@@ -347,7 +364,10 @@ export class Memory extends MastraMemory {
|
|
|
347
364
|
|
|
348
365
|
const config = this.getMergedThreadConfig(memoryConfig);
|
|
349
366
|
|
|
350
|
-
const result = this.storage.saveMessages({
|
|
367
|
+
const result = this.storage.saveMessages({
|
|
368
|
+
messages: new MessageList().add(updatedMessages, 'memory').get.all.v2(),
|
|
369
|
+
format: 'v2',
|
|
370
|
+
});
|
|
351
371
|
|
|
352
372
|
if (this.vector && config.semanticRecall) {
|
|
353
373
|
let indexName: Promise<string>;
|
|
@@ -412,6 +432,7 @@ export class Memory extends MastraMemory {
|
|
|
412
432
|
);
|
|
413
433
|
}
|
|
414
434
|
|
|
435
|
+
if (format === `v1`) return new MessageList().add(await result, 'memory').get.all.v1(); // for backwards compat convert to v1 message format
|
|
415
436
|
return result;
|
|
416
437
|
}
|
|
417
438
|
protected updateMessageToHideWorkingMemory(message: MastraMessageV1): MastraMessageV1 | null {
|
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
import { openai } from '@ai-sdk/openai';
|
|
2
2
|
import { createTool } from '@mastra/core';
|
|
3
|
-
import type {
|
|
3
|
+
import type { MastraMessageV1 } from '@mastra/core';
|
|
4
4
|
import { Agent } from '@mastra/core/agent';
|
|
5
|
+
import type { CoreMessage } from 'ai';
|
|
5
6
|
import cl100k_base from 'js-tiktoken/ranks/cl100k_base';
|
|
6
7
|
import { describe, it, expect, vi } from 'vitest';
|
|
7
8
|
import { z } from 'zod';
|
|
@@ -13,7 +14,7 @@ vi.setConfig({ testTimeout: 20_000, hookTimeout: 20_000 });
|
|
|
13
14
|
describe('TokenLimiter', () => {
|
|
14
15
|
it('should limit messages to the specified token count', () => {
|
|
15
16
|
// Create messages with predictable token counts (approximately 25 tokens each)
|
|
16
|
-
const {
|
|
17
|
+
const { fakeCore } = generateConversationHistory({
|
|
17
18
|
threadId: '1',
|
|
18
19
|
messageCount: 5,
|
|
19
20
|
toolNames: [],
|
|
@@ -21,8 +22,7 @@ describe('TokenLimiter', () => {
|
|
|
21
22
|
});
|
|
22
23
|
|
|
23
24
|
const limiter = new TokenLimiter(200);
|
|
24
|
-
|
|
25
|
-
const result = limiter.process(messages);
|
|
25
|
+
const result = limiter.process(fakeCore) as MastraMessageV1[];
|
|
26
26
|
|
|
27
27
|
// Should prioritize newest messages (higher ids)
|
|
28
28
|
expect(result.length).toBe(2);
|
|
@@ -37,7 +37,7 @@ describe('TokenLimiter', () => {
|
|
|
37
37
|
});
|
|
38
38
|
|
|
39
39
|
it('should use different encodings based on configuration', () => {
|
|
40
|
-
const {
|
|
40
|
+
const { fakeCore } = generateConversationHistory({
|
|
41
41
|
threadId: '6',
|
|
42
42
|
messageCount: 1,
|
|
43
43
|
toolNames: [],
|
|
@@ -51,16 +51,16 @@ describe('TokenLimiter', () => {
|
|
|
51
51
|
encoding: cl100k_base,
|
|
52
52
|
});
|
|
53
53
|
|
|
54
|
-
// All should process
|
|
55
|
-
const defaultResult = defaultLimiter.process(
|
|
56
|
-
const customResult = customLimiter.process(
|
|
54
|
+
// All should process fakeCore successfully but potentially with different token counts
|
|
55
|
+
const defaultResult = defaultLimiter.process(fakeCore);
|
|
56
|
+
const customResult = customLimiter.process(fakeCore);
|
|
57
57
|
|
|
58
|
-
// Each should return the same
|
|
59
|
-
expect(defaultResult.length).toBe(
|
|
60
|
-
expect(customResult.length).toBe(
|
|
58
|
+
// Each should return the same fakeCore but with potentially different token counts
|
|
59
|
+
expect(defaultResult.length).toBe(fakeCore.length);
|
|
60
|
+
expect(customResult.length).toBe(fakeCore.length);
|
|
61
61
|
});
|
|
62
62
|
|
|
63
|
-
function estimateTokens(messages:
|
|
63
|
+
function estimateTokens(messages: MastraMessageV1[]) {
|
|
64
64
|
// Create a TokenLimiter just for counting tokens
|
|
65
65
|
const testLimiter = new TokenLimiter(Infinity);
|
|
66
66
|
|
|
@@ -69,7 +69,7 @@ describe('TokenLimiter', () => {
|
|
|
69
69
|
// Count tokens for each message including all overheads
|
|
70
70
|
for (const message of messages) {
|
|
71
71
|
// Base token count from the countTokens method
|
|
72
|
-
estimatedTokens += testLimiter.countTokens(message);
|
|
72
|
+
estimatedTokens += testLimiter.countTokens(message as CoreMessage); // TODO: this is really actually a MastraMessageV1 but in previous implementations we were casting V1 to CoreMessage which is almost the same but not exactly
|
|
73
73
|
}
|
|
74
74
|
|
|
75
75
|
return Number(estimatedTokens.toFixed(2));
|
|
@@ -82,10 +82,10 @@ describe('TokenLimiter', () => {
|
|
|
82
82
|
}
|
|
83
83
|
|
|
84
84
|
async function expectTokenEstimate(config: Parameters<typeof generateConversationHistory>[0], agent: Agent) {
|
|
85
|
-
const { messages, counts } = generateConversationHistory(config);
|
|
85
|
+
const { messages, fakeCore, counts } = generateConversationHistory(config);
|
|
86
86
|
|
|
87
87
|
const estimate = estimateTokens(messages);
|
|
88
|
-
const used = (await agent.generate(
|
|
88
|
+
const used = (await agent.generate(fakeCore.slice(0, -1))).usage.totalTokens;
|
|
89
89
|
|
|
90
90
|
console.log(`Estimated ${estimate} tokens, used ${used} tokens.\n`, counts);
|
|
91
91
|
|
|
@@ -193,13 +193,13 @@ describe('TokenLimiter', () => {
|
|
|
193
193
|
|
|
194
194
|
describe.concurrent('ToolCallFilter', () => {
|
|
195
195
|
it('should exclude all tool calls when created with no arguments', () => {
|
|
196
|
-
const {
|
|
196
|
+
const { fakeCore } = generateConversationHistory({
|
|
197
197
|
threadId: '3',
|
|
198
198
|
toolNames: ['weather', 'calculator', 'search'],
|
|
199
199
|
messageCount: 1,
|
|
200
200
|
});
|
|
201
201
|
const filter = new ToolCallFilter();
|
|
202
|
-
const result = filter.process(
|
|
202
|
+
const result = filter.process(fakeCore) as MastraMessageV1[];
|
|
203
203
|
|
|
204
204
|
// Should only keep the text message and assistant res
|
|
205
205
|
expect(result.length).toBe(2);
|
|
@@ -207,13 +207,13 @@ describe.concurrent('ToolCallFilter', () => {
|
|
|
207
207
|
});
|
|
208
208
|
|
|
209
209
|
it('should exclude specific tool calls by name', () => {
|
|
210
|
-
const {
|
|
210
|
+
const { fakeCore } = generateConversationHistory({
|
|
211
211
|
threadId: '4',
|
|
212
212
|
toolNames: ['weather', 'calculator'],
|
|
213
213
|
messageCount: 2,
|
|
214
214
|
});
|
|
215
215
|
const filter = new ToolCallFilter({ exclude: ['weather'] });
|
|
216
|
-
const result = filter.process(
|
|
216
|
+
const result = filter.process(fakeCore) as MastraMessageV1[];
|
|
217
217
|
|
|
218
218
|
// Should keep text message, assistant reply, calculator tool call, and calculator result
|
|
219
219
|
expect(result.length).toBe(4);
|
|
@@ -224,15 +224,15 @@ describe.concurrent('ToolCallFilter', () => {
|
|
|
224
224
|
});
|
|
225
225
|
|
|
226
226
|
it('should keep all messages when exclude list is empty', () => {
|
|
227
|
-
const {
|
|
227
|
+
const { fakeCore } = generateConversationHistory({
|
|
228
228
|
threadId: '5',
|
|
229
229
|
toolNames: ['weather', 'calculator'],
|
|
230
230
|
});
|
|
231
231
|
|
|
232
232
|
const filter = new ToolCallFilter({ exclude: [] });
|
|
233
|
-
const result = filter.process(
|
|
233
|
+
const result = filter.process(fakeCore);
|
|
234
234
|
|
|
235
235
|
// Should keep all messages
|
|
236
|
-
expect(result.length).toBe(
|
|
236
|
+
expect(result.length).toBe(fakeCore.length);
|
|
237
237
|
});
|
|
238
238
|
});
|