@mastra/memory 0.10.3-alpha.0 → 0.10.3-alpha.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.turbo/turbo-build.log +2 -2
- package/CHANGELOG.md +8 -0
- package/dist/_tsup-dts-rollup.d.cts +1 -0
- package/dist/_tsup-dts-rollup.d.ts +1 -0
- package/dist/index.cjs +17 -2
- package/dist/index.js +17 -2
- package/package.json +2 -2
- package/src/index.ts +29 -4
package/.turbo/turbo-build.log
CHANGED
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
|
|
2
|
-
> @mastra/memory@0.10.3-alpha.
|
|
2
|
+
> @mastra/memory@0.10.3-alpha.1 build /home/runner/work/mastra/mastra/packages/memory
|
|
3
3
|
> pnpm run check && tsup --silent src/index.ts src/processors/index.ts --format esm,cjs --experimental-dts --clean --treeshake=smallest --splitting
|
|
4
4
|
|
|
5
5
|
|
|
6
|
-
> @mastra/memory@0.10.3-alpha.
|
|
6
|
+
> @mastra/memory@0.10.3-alpha.1 check /home/runner/work/mastra/mastra/packages/memory
|
|
7
7
|
> tsc --noEmit
|
|
8
8
|
|
|
9
9
|
Analysis will use the bundled TypeScript version 5.8.3
|
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,13 @@
|
|
|
1
1
|
# @mastra/memory
|
|
2
2
|
|
|
3
|
+
## 0.10.3-alpha.1
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- 48eddb9: update filter logic in Memory class to support semantic recall search scope
|
|
8
|
+
- Updated dependencies [48eddb9]
|
|
9
|
+
- @mastra/core@0.10.4-alpha.2
|
|
10
|
+
|
|
3
11
|
## 0.10.3-alpha.0
|
|
4
12
|
|
|
5
13
|
### Patch Changes
|
|
@@ -21,6 +21,7 @@ import type { UIMessage } from 'ai';
|
|
|
21
21
|
export declare class Memory extends MastraMemory {
|
|
22
22
|
constructor(config?: SharedMemoryConfig);
|
|
23
23
|
private validateThreadIsOwnedByResource;
|
|
24
|
+
private checkStorageFeatureSupport;
|
|
24
25
|
query({ threadId, resourceId, selectBy, threadConfig, }: StorageGetMessagesArg & {
|
|
25
26
|
threadConfig?: MemoryConfig;
|
|
26
27
|
}): Promise<{
|
|
@@ -21,6 +21,7 @@ import type { UIMessage } from 'ai';
|
|
|
21
21
|
export declare class Memory extends MastraMemory {
|
|
22
22
|
constructor(config?: SharedMemoryConfig);
|
|
23
23
|
private validateThreadIsOwnedByResource;
|
|
24
|
+
private checkStorageFeatureSupport;
|
|
24
25
|
query({ threadId, resourceId, selectBy, threadConfig, }: StorageGetMessagesArg & {
|
|
25
26
|
threadConfig?: MemoryConfig;
|
|
26
27
|
}): Promise<{
|
package/dist/index.cjs
CHANGED
|
@@ -56,6 +56,7 @@ var Memory = class extends memory.MastraMemory {
|
|
|
56
56
|
}
|
|
57
57
|
});
|
|
58
58
|
this.threadConfig = mergedConfig;
|
|
59
|
+
this.checkStorageFeatureSupport(mergedConfig);
|
|
59
60
|
}
|
|
60
61
|
async validateThreadIsOwnedByResource(threadId, resourceId) {
|
|
61
62
|
const thread = await this.storage.getThreadById({ threadId });
|
|
@@ -68,6 +69,13 @@ var Memory = class extends memory.MastraMemory {
|
|
|
68
69
|
);
|
|
69
70
|
}
|
|
70
71
|
}
|
|
72
|
+
checkStorageFeatureSupport(config) {
|
|
73
|
+
if (typeof config.semanticRecall === `object` && config.semanticRecall.scope === `resource` && !this.storage.supports.selectByIncludeResourceScope) {
|
|
74
|
+
throw new Error(
|
|
75
|
+
`Memory error: Attached storage adapter "${this.storage.name || "unknown"}" doesn't support semanticRecall: { scope: "resource" } yet and currently only supports per-thread semantic recall.`
|
|
76
|
+
);
|
|
77
|
+
}
|
|
78
|
+
}
|
|
71
79
|
async query({
|
|
72
80
|
threadId,
|
|
73
81
|
resourceId,
|
|
@@ -82,6 +90,7 @@ var Memory = class extends memory.MastraMemory {
|
|
|
82
90
|
threadConfig
|
|
83
91
|
});
|
|
84
92
|
const config = this.getMergedThreadConfig(threadConfig || {});
|
|
93
|
+
this.checkStorageFeatureSupport(config);
|
|
85
94
|
const defaultRange = DEFAULT_MESSAGE_RANGE;
|
|
86
95
|
const defaultTopK = DEFAULT_TOP_K;
|
|
87
96
|
const vectorConfig = typeof config?.semanticRecall === `boolean` ? {
|
|
@@ -91,7 +100,8 @@ var Memory = class extends memory.MastraMemory {
|
|
|
91
100
|
topK: config?.semanticRecall?.topK ?? defaultTopK,
|
|
92
101
|
messageRange: config?.semanticRecall?.messageRange ?? defaultRange
|
|
93
102
|
};
|
|
94
|
-
|
|
103
|
+
const resourceScope = typeof config?.semanticRecall === "object" && config?.semanticRecall?.scope === `resource`;
|
|
104
|
+
if (config?.semanticRecall && selectBy?.vectorSearchString && this.vector) {
|
|
95
105
|
const { embeddings, dimension } = await this.embedMessageContent(selectBy.vectorSearchString);
|
|
96
106
|
const { indexName } = await this.createEmbeddingIndex(dimension);
|
|
97
107
|
await Promise.all(
|
|
@@ -106,7 +116,9 @@ var Memory = class extends memory.MastraMemory {
|
|
|
106
116
|
indexName,
|
|
107
117
|
queryVector: embedding,
|
|
108
118
|
topK: vectorConfig.topK,
|
|
109
|
-
filter: {
|
|
119
|
+
filter: resourceScope ? {
|
|
120
|
+
resource_id: resourceId
|
|
121
|
+
} : {
|
|
110
122
|
thread_id: threadId
|
|
111
123
|
}
|
|
112
124
|
})
|
|
@@ -116,12 +128,14 @@ var Memory = class extends memory.MastraMemory {
|
|
|
116
128
|
}
|
|
117
129
|
const rawMessages = await this.storage.getMessages({
|
|
118
130
|
threadId,
|
|
131
|
+
resourceId,
|
|
119
132
|
format: "v2",
|
|
120
133
|
selectBy: {
|
|
121
134
|
...selectBy,
|
|
122
135
|
...vectorResults?.length ? {
|
|
123
136
|
include: vectorResults.map((r) => ({
|
|
124
137
|
id: r.metadata?.message_id,
|
|
138
|
+
threadId: r.metadata?.thread_id,
|
|
125
139
|
withNextMessages: typeof vectorConfig.messageRange === "number" ? vectorConfig.messageRange : vectorConfig.messageRange.after,
|
|
126
140
|
withPreviousMessages: typeof vectorConfig.messageRange === "number" ? vectorConfig.messageRange : vectorConfig.messageRange.before
|
|
127
141
|
}))
|
|
@@ -161,6 +175,7 @@ var Memory = class extends memory.MastraMemory {
|
|
|
161
175
|
};
|
|
162
176
|
}
|
|
163
177
|
const messagesResult = await this.query({
|
|
178
|
+
resourceId,
|
|
164
179
|
threadId,
|
|
165
180
|
selectBy: {
|
|
166
181
|
last: threadConfig.lastMessages,
|
package/dist/index.js
CHANGED
|
@@ -50,6 +50,7 @@ var Memory = class extends MastraMemory {
|
|
|
50
50
|
}
|
|
51
51
|
});
|
|
52
52
|
this.threadConfig = mergedConfig;
|
|
53
|
+
this.checkStorageFeatureSupport(mergedConfig);
|
|
53
54
|
}
|
|
54
55
|
async validateThreadIsOwnedByResource(threadId, resourceId) {
|
|
55
56
|
const thread = await this.storage.getThreadById({ threadId });
|
|
@@ -62,6 +63,13 @@ var Memory = class extends MastraMemory {
|
|
|
62
63
|
);
|
|
63
64
|
}
|
|
64
65
|
}
|
|
66
|
+
checkStorageFeatureSupport(config) {
|
|
67
|
+
if (typeof config.semanticRecall === `object` && config.semanticRecall.scope === `resource` && !this.storage.supports.selectByIncludeResourceScope) {
|
|
68
|
+
throw new Error(
|
|
69
|
+
`Memory error: Attached storage adapter "${this.storage.name || "unknown"}" doesn't support semanticRecall: { scope: "resource" } yet and currently only supports per-thread semantic recall.`
|
|
70
|
+
);
|
|
71
|
+
}
|
|
72
|
+
}
|
|
65
73
|
async query({
|
|
66
74
|
threadId,
|
|
67
75
|
resourceId,
|
|
@@ -76,6 +84,7 @@ var Memory = class extends MastraMemory {
|
|
|
76
84
|
threadConfig
|
|
77
85
|
});
|
|
78
86
|
const config = this.getMergedThreadConfig(threadConfig || {});
|
|
87
|
+
this.checkStorageFeatureSupport(config);
|
|
79
88
|
const defaultRange = DEFAULT_MESSAGE_RANGE;
|
|
80
89
|
const defaultTopK = DEFAULT_TOP_K;
|
|
81
90
|
const vectorConfig = typeof config?.semanticRecall === `boolean` ? {
|
|
@@ -85,7 +94,8 @@ var Memory = class extends MastraMemory {
|
|
|
85
94
|
topK: config?.semanticRecall?.topK ?? defaultTopK,
|
|
86
95
|
messageRange: config?.semanticRecall?.messageRange ?? defaultRange
|
|
87
96
|
};
|
|
88
|
-
|
|
97
|
+
const resourceScope = typeof config?.semanticRecall === "object" && config?.semanticRecall?.scope === `resource`;
|
|
98
|
+
if (config?.semanticRecall && selectBy?.vectorSearchString && this.vector) {
|
|
89
99
|
const { embeddings, dimension } = await this.embedMessageContent(selectBy.vectorSearchString);
|
|
90
100
|
const { indexName } = await this.createEmbeddingIndex(dimension);
|
|
91
101
|
await Promise.all(
|
|
@@ -100,7 +110,9 @@ var Memory = class extends MastraMemory {
|
|
|
100
110
|
indexName,
|
|
101
111
|
queryVector: embedding,
|
|
102
112
|
topK: vectorConfig.topK,
|
|
103
|
-
filter: {
|
|
113
|
+
filter: resourceScope ? {
|
|
114
|
+
resource_id: resourceId
|
|
115
|
+
} : {
|
|
104
116
|
thread_id: threadId
|
|
105
117
|
}
|
|
106
118
|
})
|
|
@@ -110,12 +122,14 @@ var Memory = class extends MastraMemory {
|
|
|
110
122
|
}
|
|
111
123
|
const rawMessages = await this.storage.getMessages({
|
|
112
124
|
threadId,
|
|
125
|
+
resourceId,
|
|
113
126
|
format: "v2",
|
|
114
127
|
selectBy: {
|
|
115
128
|
...selectBy,
|
|
116
129
|
...vectorResults?.length ? {
|
|
117
130
|
include: vectorResults.map((r) => ({
|
|
118
131
|
id: r.metadata?.message_id,
|
|
132
|
+
threadId: r.metadata?.thread_id,
|
|
119
133
|
withNextMessages: typeof vectorConfig.messageRange === "number" ? vectorConfig.messageRange : vectorConfig.messageRange.after,
|
|
120
134
|
withPreviousMessages: typeof vectorConfig.messageRange === "number" ? vectorConfig.messageRange : vectorConfig.messageRange.before
|
|
121
135
|
}))
|
|
@@ -155,6 +169,7 @@ var Memory = class extends MastraMemory {
|
|
|
155
169
|
};
|
|
156
170
|
}
|
|
157
171
|
const messagesResult = await this.query({
|
|
172
|
+
resourceId,
|
|
158
173
|
threadId,
|
|
159
174
|
selectBy: {
|
|
160
175
|
last: threadConfig.lastMessages,
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@mastra/memory",
|
|
3
|
-
"version": "0.10.3-alpha.
|
|
3
|
+
"version": "0.10.3-alpha.1",
|
|
4
4
|
"description": "",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "./dist/index.js",
|
|
@@ -53,7 +53,7 @@
|
|
|
53
53
|
"typescript-eslint": "^8.26.1",
|
|
54
54
|
"vitest": "^3.2.2",
|
|
55
55
|
"@internal/lint": "0.0.10",
|
|
56
|
-
"@mastra/core": "0.10.4-alpha.
|
|
56
|
+
"@mastra/core": "0.10.4-alpha.2"
|
|
57
57
|
},
|
|
58
58
|
"peerDependencies": {
|
|
59
59
|
"@mastra/core": "^0.10.2-alpha.0"
|
package/src/index.ts
CHANGED
|
@@ -35,6 +35,8 @@ export class Memory extends MastraMemory {
|
|
|
35
35
|
},
|
|
36
36
|
});
|
|
37
37
|
this.threadConfig = mergedConfig;
|
|
38
|
+
|
|
39
|
+
this.checkStorageFeatureSupport(mergedConfig);
|
|
38
40
|
}
|
|
39
41
|
|
|
40
42
|
private async validateThreadIsOwnedByResource(threadId: string, resourceId: string) {
|
|
@@ -49,6 +51,18 @@ export class Memory extends MastraMemory {
|
|
|
49
51
|
}
|
|
50
52
|
}
|
|
51
53
|
|
|
54
|
+
private checkStorageFeatureSupport(config: MemoryConfig) {
|
|
55
|
+
if (
|
|
56
|
+
typeof config.semanticRecall === `object` &&
|
|
57
|
+
config.semanticRecall.scope === `resource` &&
|
|
58
|
+
!this.storage.supports.selectByIncludeResourceScope
|
|
59
|
+
) {
|
|
60
|
+
throw new Error(
|
|
61
|
+
`Memory error: Attached storage adapter "${this.storage.name || 'unknown'}" doesn't support semanticRecall: { scope: "resource" } yet and currently only supports per-thread semantic recall.`,
|
|
62
|
+
);
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
|
|
52
66
|
async query({
|
|
53
67
|
threadId,
|
|
54
68
|
resourceId,
|
|
@@ -74,6 +88,8 @@ export class Memory extends MastraMemory {
|
|
|
74
88
|
|
|
75
89
|
const config = this.getMergedThreadConfig(threadConfig || {});
|
|
76
90
|
|
|
91
|
+
this.checkStorageFeatureSupport(config);
|
|
92
|
+
|
|
77
93
|
const defaultRange = DEFAULT_MESSAGE_RANGE;
|
|
78
94
|
const defaultTopK = DEFAULT_TOP_K;
|
|
79
95
|
|
|
@@ -88,7 +104,9 @@ export class Memory extends MastraMemory {
|
|
|
88
104
|
messageRange: config?.semanticRecall?.messageRange ?? defaultRange,
|
|
89
105
|
};
|
|
90
106
|
|
|
91
|
-
|
|
107
|
+
const resourceScope = typeof config?.semanticRecall === 'object' && config?.semanticRecall?.scope === `resource`;
|
|
108
|
+
|
|
109
|
+
if (config?.semanticRecall && selectBy?.vectorSearchString && this.vector) {
|
|
92
110
|
const { embeddings, dimension } = await this.embedMessageContent(selectBy.vectorSearchString!);
|
|
93
111
|
const { indexName } = await this.createEmbeddingIndex(dimension);
|
|
94
112
|
|
|
@@ -105,9 +123,13 @@ export class Memory extends MastraMemory {
|
|
|
105
123
|
indexName,
|
|
106
124
|
queryVector: embedding,
|
|
107
125
|
topK: vectorConfig.topK,
|
|
108
|
-
filter:
|
|
109
|
-
|
|
110
|
-
|
|
126
|
+
filter: resourceScope
|
|
127
|
+
? {
|
|
128
|
+
resource_id: resourceId,
|
|
129
|
+
}
|
|
130
|
+
: {
|
|
131
|
+
thread_id: threadId,
|
|
132
|
+
},
|
|
111
133
|
})),
|
|
112
134
|
);
|
|
113
135
|
}),
|
|
@@ -117,6 +139,7 @@ export class Memory extends MastraMemory {
|
|
|
117
139
|
// Get raw messages from storage
|
|
118
140
|
const rawMessages = await this.storage.getMessages({
|
|
119
141
|
threadId,
|
|
142
|
+
resourceId,
|
|
120
143
|
format: 'v2',
|
|
121
144
|
selectBy: {
|
|
122
145
|
...selectBy,
|
|
@@ -124,6 +147,7 @@ export class Memory extends MastraMemory {
|
|
|
124
147
|
? {
|
|
125
148
|
include: vectorResults.map(r => ({
|
|
126
149
|
id: r.metadata?.message_id,
|
|
150
|
+
threadId: r.metadata?.thread_id,
|
|
127
151
|
withNextMessages:
|
|
128
152
|
typeof vectorConfig.messageRange === 'number'
|
|
129
153
|
? vectorConfig.messageRange
|
|
@@ -188,6 +212,7 @@ export class Memory extends MastraMemory {
|
|
|
188
212
|
}
|
|
189
213
|
|
|
190
214
|
const messagesResult = await this.query({
|
|
215
|
+
resourceId,
|
|
191
216
|
threadId,
|
|
192
217
|
selectBy: {
|
|
193
218
|
last: threadConfig.lastMessages,
|