server-memory-enhanced 3.0.0 → 3.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +40 -67
- package/dist/lib/collaboration/flag-manager.js +4 -3
- package/dist/lib/knowledge-graph-manager.js +18 -18
- package/dist/lib/maintenance/bulk-updater.js +3 -2
- package/dist/lib/maintenance/memory-pruner.js +22 -13
- package/dist/lib/operations/entity-operations.js +13 -4
- package/dist/lib/operations/observation-operations.js +9 -5
- package/dist/lib/operations/relation-operations.js +7 -5
- package/dist/lib/save-memory-handler.js +2 -2
- package/dist/lib/schemas.js +96 -0
- package/package.json +17 -3
- package/README.md +0 -399
package/dist/index.js
CHANGED
|
@@ -6,7 +6,7 @@ import { promises as fs } from 'fs';
|
|
|
6
6
|
import path from 'path';
|
|
7
7
|
import { fileURLToPath } from 'url';
|
|
8
8
|
import { KnowledgeGraphManager } from './lib/knowledge-graph-manager.js';
|
|
9
|
-
import { EntitySchema, RelationSchema, SaveMemoryInputSchema, SaveMemoryOutputSchema, GetAnalyticsInputSchema, GetAnalyticsOutputSchema, GetObservationHistoryInputSchema, GetObservationHistoryOutputSchema, ListEntitiesInputSchema, ListEntitiesOutputSchema, ValidateMemoryInputSchema, ValidateMemoryOutputSchema, UpdateObservationInputSchema, UpdateObservationOutputSchema, ReadGraphInputSchema, SearchNodesInputSchema, OpenNodesInputSchema, QueryNodesInputSchema, GetMemoryStatsInputSchema, GetRecentChangesInputSchema, FindRelationPathInputSchema, DetectConflictsInputSchema, GetFlaggedEntitiesInputSchema, GetContextInputSchema } from './lib/schemas.js';
|
|
9
|
+
import { EntitySchema, RelationSchema, SaveMemoryInputSchema, SaveMemoryOutputSchema, GetAnalyticsInputSchema, GetAnalyticsOutputSchema, GetObservationHistoryInputSchema, GetObservationHistoryOutputSchema, ListEntitiesInputSchema, ListEntitiesOutputSchema, ValidateMemoryInputSchema, ValidateMemoryOutputSchema, UpdateObservationInputSchema, UpdateObservationOutputSchema, ReadGraphInputSchema, SearchNodesInputSchema, OpenNodesInputSchema, QueryNodesInputSchema, GetMemoryStatsInputSchema, GetRecentChangesInputSchema, FindRelationPathInputSchema, DetectConflictsInputSchema, GetFlaggedEntitiesInputSchema, GetContextInputSchema, CreateEntitiesInputSchema, CreateRelationsInputSchema, AddObservationsInputSchema, DeleteEntitiesInputSchema, DeleteObservationsInputSchema, DeleteRelationsInputSchema, PruneMemoryInputSchema, BulkUpdateInputSchema, FlagForReviewInputSchema } from './lib/schemas.js';
|
|
10
10
|
import { handleSaveMemory } from './lib/save-memory-handler.js';
|
|
11
11
|
import { validateSaveMemoryRequest } from './lib/validation.js';
|
|
12
12
|
import { JsonlStorageAdapter } from './lib/jsonl-storage-adapter.js';
|
|
@@ -115,7 +115,7 @@ server.registerTool("save_memory", {
|
|
|
115
115
|
inputSchema: SaveMemoryInputSchema,
|
|
116
116
|
outputSchema: SaveMemoryOutputSchema
|
|
117
117
|
}, async (input) => {
|
|
118
|
-
const result = await handleSaveMemory(input, (entities) => knowledgeGraphManager.createEntities(entities), (relations) => knowledgeGraphManager.createRelations(relations), (threadId) => knowledgeGraphManager.getEntityNamesInThread(threadId));
|
|
118
|
+
const result = await handleSaveMemory(input, (threadId, entities) => knowledgeGraphManager.createEntities(threadId, entities), (threadId, relations) => knowledgeGraphManager.createRelations(threadId, relations), (threadId) => knowledgeGraphManager.getEntityNamesInThread(threadId));
|
|
119
119
|
if (result.success) {
|
|
120
120
|
// Build success message with entity names
|
|
121
121
|
let successText = `✓ Successfully saved ${result.created.entities} entities and ${result.created.relations} relations.\n` +
|
|
@@ -171,14 +171,13 @@ server.registerTool("save_memory", {
|
|
|
171
171
|
server.registerTool("create_entities", {
|
|
172
172
|
title: "Create Entities",
|
|
173
173
|
description: "Create multiple new entities in the knowledge graph with metadata (agent thread ID, timestamp, confidence, importance)",
|
|
174
|
-
inputSchema:
|
|
175
|
-
entities: z.array(EntitySchemaCompat)
|
|
176
|
-
},
|
|
174
|
+
inputSchema: CreateEntitiesInputSchema,
|
|
177
175
|
outputSchema: {
|
|
178
176
|
entities: z.array(EntitySchemaCompat)
|
|
179
177
|
}
|
|
180
|
-
}, async (
|
|
181
|
-
const
|
|
178
|
+
}, async (input) => {
|
|
179
|
+
const { threadId, entities } = input;
|
|
180
|
+
const result = await knowledgeGraphManager.createEntities(threadId, entities);
|
|
182
181
|
return {
|
|
183
182
|
content: [{ type: "text", text: JSON.stringify(result, null, 2) }],
|
|
184
183
|
structuredContent: { entities: result }
|
|
@@ -188,14 +187,13 @@ server.registerTool("create_entities", {
|
|
|
188
187
|
server.registerTool("create_relations", {
|
|
189
188
|
title: "Create Relations",
|
|
190
189
|
description: "Create multiple new relations between entities in the knowledge graph with metadata (agent thread ID, timestamp, confidence, importance). Relations should be in active voice",
|
|
191
|
-
inputSchema:
|
|
192
|
-
relations: z.array(RelationSchemaCompat)
|
|
193
|
-
},
|
|
190
|
+
inputSchema: CreateRelationsInputSchema,
|
|
194
191
|
outputSchema: {
|
|
195
192
|
relations: z.array(RelationSchemaCompat)
|
|
196
193
|
}
|
|
197
|
-
}, async (
|
|
198
|
-
const
|
|
194
|
+
}, async (input) => {
|
|
195
|
+
const { threadId, relations } = input;
|
|
196
|
+
const result = await knowledgeGraphManager.createRelations(threadId, relations);
|
|
199
197
|
return {
|
|
200
198
|
content: [{ type: "text", text: JSON.stringify(result, null, 2) }],
|
|
201
199
|
structuredContent: { relations: result }
|
|
@@ -205,24 +203,16 @@ server.registerTool("create_relations", {
|
|
|
205
203
|
server.registerTool("add_observations", {
|
|
206
204
|
title: "Add Observations",
|
|
207
205
|
description: "Add new observations to existing entities in the knowledge graph with metadata (agent thread ID, timestamp, confidence, importance)",
|
|
208
|
-
inputSchema:
|
|
209
|
-
observations: z.array(z.object({
|
|
210
|
-
entityName: z.string().describe("The name of the entity to add the observations to"),
|
|
211
|
-
contents: z.array(z.string()).describe("An array of observation contents to add"),
|
|
212
|
-
agentThreadId: z.string().describe("The agent thread ID adding these observations"),
|
|
213
|
-
timestamp: z.string().describe("ISO 8601 timestamp of when the observations are added"),
|
|
214
|
-
confidence: z.number().min(0).max(1).describe("Confidence coefficient from 0 to 1"),
|
|
215
|
-
importance: z.number().min(0).max(1).describe("Importance for memory integrity if lost: 0 (not important) to 1 (critical)")
|
|
216
|
-
}))
|
|
217
|
-
},
|
|
206
|
+
inputSchema: AddObservationsInputSchema,
|
|
218
207
|
outputSchema: {
|
|
219
208
|
results: z.array(z.object({
|
|
220
209
|
entityName: z.string(),
|
|
221
210
|
addedObservations: z.array(z.string())
|
|
222
211
|
}))
|
|
223
212
|
}
|
|
224
|
-
}, async (
|
|
225
|
-
const
|
|
213
|
+
}, async (input) => {
|
|
214
|
+
const { threadId, observations } = input;
|
|
215
|
+
const result = await knowledgeGraphManager.addObservations(threadId, observations);
|
|
226
216
|
return {
|
|
227
217
|
content: [{ type: "text", text: JSON.stringify(result, null, 2) }],
|
|
228
218
|
structuredContent: { results: result }
|
|
@@ -232,15 +222,14 @@ server.registerTool("add_observations", {
|
|
|
232
222
|
server.registerTool("delete_entities", {
|
|
233
223
|
title: "Delete Entities",
|
|
234
224
|
description: "Delete multiple entities and their associated relations from the knowledge graph",
|
|
235
|
-
inputSchema:
|
|
236
|
-
entityNames: z.array(z.string()).describe("An array of entity names to delete")
|
|
237
|
-
},
|
|
225
|
+
inputSchema: DeleteEntitiesInputSchema,
|
|
238
226
|
outputSchema: {
|
|
239
227
|
success: z.boolean(),
|
|
240
228
|
message: z.string()
|
|
241
229
|
}
|
|
242
|
-
}, async (
|
|
243
|
-
|
|
230
|
+
}, async (input) => {
|
|
231
|
+
const { threadId, entityNames } = input;
|
|
232
|
+
await knowledgeGraphManager.deleteEntities(threadId, entityNames);
|
|
244
233
|
return {
|
|
245
234
|
content: [{ type: "text", text: "Entities deleted successfully" }],
|
|
246
235
|
structuredContent: { success: true, message: "Entities deleted successfully" }
|
|
@@ -250,18 +239,14 @@ server.registerTool("delete_entities", {
|
|
|
250
239
|
server.registerTool("delete_observations", {
|
|
251
240
|
title: "Delete Observations",
|
|
252
241
|
description: "Delete specific observations from entities in the knowledge graph",
|
|
253
|
-
inputSchema:
|
|
254
|
-
deletions: z.array(z.object({
|
|
255
|
-
entityName: z.string().describe("The name of the entity containing the observations"),
|
|
256
|
-
observations: z.array(z.string()).describe("An array of observations to delete")
|
|
257
|
-
}))
|
|
258
|
-
},
|
|
242
|
+
inputSchema: DeleteObservationsInputSchema,
|
|
259
243
|
outputSchema: {
|
|
260
244
|
success: z.boolean(),
|
|
261
245
|
message: z.string()
|
|
262
246
|
}
|
|
263
|
-
}, async (
|
|
264
|
-
|
|
247
|
+
}, async (input) => {
|
|
248
|
+
const { threadId, deletions } = input;
|
|
249
|
+
await knowledgeGraphManager.deleteObservations(threadId, deletions);
|
|
265
250
|
return {
|
|
266
251
|
content: [{ type: "text", text: "Observations deleted successfully" }],
|
|
267
252
|
structuredContent: { success: true, message: "Observations deleted successfully" }
|
|
@@ -273,7 +258,8 @@ server.registerTool("update_observation", {
|
|
|
273
258
|
description: "Update an existing observation by creating a new version with updated content. This maintains version history through the supersedes/superseded_by chain.",
|
|
274
259
|
inputSchema: UpdateObservationInputSchema.shape,
|
|
275
260
|
outputSchema: UpdateObservationOutputSchema.shape
|
|
276
|
-
}, async (
|
|
261
|
+
}, async (input) => {
|
|
262
|
+
const { entityName, observationId, newContent, agentThreadId, timestamp, confidence, importance } = input;
|
|
277
263
|
const updatedObservation = await knowledgeGraphManager.updateObservation({
|
|
278
264
|
entityName,
|
|
279
265
|
observationId,
|
|
@@ -297,15 +283,14 @@ server.registerTool("update_observation", {
|
|
|
297
283
|
server.registerTool("delete_relations", {
|
|
298
284
|
title: "Delete Relations",
|
|
299
285
|
description: "Delete multiple relations from the knowledge graph",
|
|
300
|
-
inputSchema:
|
|
301
|
-
relations: z.array(RelationSchemaCompat).describe("An array of relations to delete")
|
|
302
|
-
},
|
|
286
|
+
inputSchema: DeleteRelationsInputSchema,
|
|
303
287
|
outputSchema: {
|
|
304
288
|
success: z.boolean(),
|
|
305
289
|
message: z.string()
|
|
306
290
|
}
|
|
307
|
-
}, async (
|
|
308
|
-
|
|
291
|
+
}, async (input) => {
|
|
292
|
+
const { threadId, relations } = input;
|
|
293
|
+
await knowledgeGraphManager.deleteRelations(threadId, relations);
|
|
309
294
|
return {
|
|
310
295
|
content: [{ type: "text", text: "Relations deleted successfully" }],
|
|
311
296
|
structuredContent: { success: true, message: "Relations deleted successfully" }
|
|
@@ -592,17 +577,14 @@ server.registerTool("detect_conflicts", {
|
|
|
592
577
|
server.registerTool("prune_memory", {
|
|
593
578
|
title: "Prune Memory",
|
|
594
579
|
description: "Remove old or low-importance entities to manage memory size, with option to keep minimum number of entities",
|
|
595
|
-
inputSchema:
|
|
596
|
-
olderThan: z.string().optional().describe("ISO 8601 timestamp - remove entities older than this"),
|
|
597
|
-
importanceLessThan: z.number().min(0).max(1).optional().describe("Remove entities with importance less than this value"),
|
|
598
|
-
keepMinEntities: z.number().optional().describe("Minimum number of entities to keep regardless of filters")
|
|
599
|
-
},
|
|
580
|
+
inputSchema: PruneMemoryInputSchema,
|
|
600
581
|
outputSchema: {
|
|
601
582
|
removedEntities: z.number(),
|
|
602
583
|
removedRelations: z.number()
|
|
603
584
|
}
|
|
604
|
-
}, async (
|
|
605
|
-
const
|
|
585
|
+
}, async (input) => {
|
|
586
|
+
const { threadId, ...options } = input;
|
|
587
|
+
const result = await knowledgeGraphManager.pruneMemory(threadId, options);
|
|
606
588
|
return {
|
|
607
589
|
content: [{ type: "text", text: JSON.stringify(result, null, 2) }],
|
|
608
590
|
structuredContent: result
|
|
@@ -612,20 +594,14 @@ server.registerTool("prune_memory", {
|
|
|
612
594
|
server.registerTool("bulk_update", {
|
|
613
595
|
title: "Bulk Update",
|
|
614
596
|
description: "Efficiently update multiple entities at once with new confidence, importance, or observations",
|
|
615
|
-
inputSchema:
|
|
616
|
-
updates: z.array(z.object({
|
|
617
|
-
entityName: z.string(),
|
|
618
|
-
confidence: z.number().min(0).max(1).optional(),
|
|
619
|
-
importance: z.number().min(0).max(1).optional(),
|
|
620
|
-
addObservations: z.array(z.string()).optional()
|
|
621
|
-
}))
|
|
622
|
-
},
|
|
597
|
+
inputSchema: BulkUpdateInputSchema,
|
|
623
598
|
outputSchema: {
|
|
624
599
|
updated: z.number(),
|
|
625
600
|
notFound: z.array(z.string())
|
|
626
601
|
}
|
|
627
|
-
}, async (
|
|
628
|
-
const
|
|
602
|
+
}, async (input) => {
|
|
603
|
+
const { threadId, updates } = input;
|
|
604
|
+
const result = await knowledgeGraphManager.bulkUpdate(threadId, updates);
|
|
629
605
|
return {
|
|
630
606
|
content: [{ type: "text", text: JSON.stringify(result, null, 2) }],
|
|
631
607
|
structuredContent: result
|
|
@@ -635,17 +611,14 @@ server.registerTool("bulk_update", {
|
|
|
635
611
|
server.registerTool("flag_for_review", {
|
|
636
612
|
title: "Flag Entity for Review",
|
|
637
613
|
description: "Mark an entity for human review with a specific reason (Human-in-the-Loop)",
|
|
638
|
-
inputSchema:
|
|
639
|
-
entityName: z.string().describe("Name of entity to flag"),
|
|
640
|
-
reason: z.string().describe("Reason for flagging"),
|
|
641
|
-
reviewer: z.string().optional().describe("Optional reviewer name")
|
|
642
|
-
},
|
|
614
|
+
inputSchema: FlagForReviewInputSchema,
|
|
643
615
|
outputSchema: {
|
|
644
616
|
success: z.boolean(),
|
|
645
617
|
message: z.string()
|
|
646
618
|
}
|
|
647
|
-
}, async (
|
|
648
|
-
|
|
619
|
+
}, async (input) => {
|
|
620
|
+
const { threadId, entityName, reason, reviewer } = input;
|
|
621
|
+
await knowledgeGraphManager.flagForReview(threadId, entityName, reason, reviewer);
|
|
649
622
|
return {
|
|
650
623
|
content: [{ type: "text", text: `Entity "${entityName}" flagged for review` }],
|
|
651
624
|
structuredContent: { success: true, message: `Entity "${entityName}" flagged for review` }
|
|
@@ -4,12 +4,13 @@
|
|
|
4
4
|
import { randomUUID } from 'crypto';
|
|
5
5
|
/**
|
|
6
6
|
* Flag an entity for review
|
|
7
|
+
* Thread isolation: Only flags entities in the specified thread
|
|
7
8
|
*/
|
|
8
|
-
export async function flagForReview(storage, entityName, reason, reviewer) {
|
|
9
|
+
export async function flagForReview(storage, threadId, entityName, reason, reviewer) {
|
|
9
10
|
const graph = await storage.loadGraph();
|
|
10
|
-
const entity = graph.entities.find(e => e.name === entityName);
|
|
11
|
+
const entity = graph.entities.find(e => e.name === entityName && e.agentThreadId === threadId);
|
|
11
12
|
if (!entity) {
|
|
12
|
-
throw new Error(`Entity with name ${entityName} not found`);
|
|
13
|
+
throw new Error(`Entity with name ${entityName} not found in thread ${threadId}`);
|
|
13
14
|
}
|
|
14
15
|
// Add a special observation to mark for review
|
|
15
16
|
const flagContent = `[FLAGGED FOR REVIEW: ${reason}${reviewer ? ` - Reviewer: ${reviewer}` : ''}]`;
|
|
@@ -43,31 +43,31 @@ export class KnowledgeGraphManager {
|
|
|
43
43
|
await this.initializePromise;
|
|
44
44
|
}
|
|
45
45
|
// Entity Operations
|
|
46
|
-
async createEntities(entities) {
|
|
46
|
+
async createEntities(threadId, entities) {
|
|
47
47
|
await this.ensureInitialized();
|
|
48
|
-
return EntityOps.createEntities(this.storage, entities);
|
|
48
|
+
return EntityOps.createEntities(this.storage, threadId, entities);
|
|
49
49
|
}
|
|
50
|
-
async deleteEntities(entityNames) {
|
|
50
|
+
async deleteEntities(threadId, entityNames) {
|
|
51
51
|
await this.ensureInitialized();
|
|
52
|
-
return EntityOps.deleteEntities(this.storage, entityNames);
|
|
52
|
+
return EntityOps.deleteEntities(this.storage, threadId, entityNames);
|
|
53
53
|
}
|
|
54
54
|
// Relation Operations
|
|
55
|
-
async createRelations(relations) {
|
|
55
|
+
async createRelations(threadId, relations) {
|
|
56
56
|
await this.ensureInitialized();
|
|
57
|
-
return RelationOps.createRelations(this.storage, relations);
|
|
57
|
+
return RelationOps.createRelations(this.storage, threadId, relations);
|
|
58
58
|
}
|
|
59
|
-
async deleteRelations(relations) {
|
|
59
|
+
async deleteRelations(threadId, relations) {
|
|
60
60
|
await this.ensureInitialized();
|
|
61
|
-
return RelationOps.deleteRelations(this.storage, relations);
|
|
61
|
+
return RelationOps.deleteRelations(this.storage, threadId, relations);
|
|
62
62
|
}
|
|
63
63
|
// Observation Operations
|
|
64
|
-
async addObservations(observations) {
|
|
64
|
+
async addObservations(threadId, observations) {
|
|
65
65
|
await this.ensureInitialized();
|
|
66
|
-
return ObservationOps.addObservations(this.storage, observations);
|
|
66
|
+
return ObservationOps.addObservations(this.storage, threadId, observations);
|
|
67
67
|
}
|
|
68
|
-
async deleteObservations(deletions) {
|
|
68
|
+
async deleteObservations(threadId, deletions) {
|
|
69
69
|
await this.ensureInitialized();
|
|
70
|
-
return ObservationOps.deleteObservations(this.storage, deletions);
|
|
70
|
+
return ObservationOps.deleteObservations(this.storage, threadId, deletions);
|
|
71
71
|
}
|
|
72
72
|
async updateObservation(params) {
|
|
73
73
|
await this.ensureInitialized();
|
|
@@ -131,18 +131,18 @@ export class KnowledgeGraphManager {
|
|
|
131
131
|
return AnalyticsService.getAnalytics(this.storage, threadId);
|
|
132
132
|
}
|
|
133
133
|
// Memory Maintenance
|
|
134
|
-
async pruneMemory(options) {
|
|
134
|
+
async pruneMemory(threadId, options) {
|
|
135
135
|
await this.ensureInitialized();
|
|
136
|
-
return MemoryPruner.pruneMemory(this.storage, options);
|
|
136
|
+
return MemoryPruner.pruneMemory(this.storage, threadId, options);
|
|
137
137
|
}
|
|
138
|
-
async bulkUpdate(updates) {
|
|
138
|
+
async bulkUpdate(threadId, updates) {
|
|
139
139
|
await this.ensureInitialized();
|
|
140
|
-
return BulkUpdater.bulkUpdate(this.storage, updates);
|
|
140
|
+
return BulkUpdater.bulkUpdate(this.storage, threadId, updates);
|
|
141
141
|
}
|
|
142
142
|
// Collaboration Features
|
|
143
|
-
async flagForReview(entityName, reason, reviewer) {
|
|
143
|
+
async flagForReview(threadId, entityName, reason, reviewer) {
|
|
144
144
|
await this.ensureInitialized();
|
|
145
|
-
return FlagManager.flagForReview(this.storage, entityName, reason, reviewer);
|
|
145
|
+
return FlagManager.flagForReview(this.storage, threadId, entityName, reason, reviewer);
|
|
146
146
|
}
|
|
147
147
|
async getFlaggedEntities(threadId) {
|
|
148
148
|
await this.ensureInitialized();
|
|
@@ -4,13 +4,14 @@
|
|
|
4
4
|
import { randomUUID } from 'crypto';
|
|
5
5
|
/**
|
|
6
6
|
* Perform bulk updates on multiple entities
|
|
7
|
+
* Thread isolation: Only updates entities in the specified thread
|
|
7
8
|
*/
|
|
8
|
-
export async function bulkUpdate(storage, updates) {
|
|
9
|
+
export async function bulkUpdate(storage, threadId, updates) {
|
|
9
10
|
const graph = await storage.loadGraph();
|
|
10
11
|
let updated = 0;
|
|
11
12
|
const notFound = [];
|
|
12
13
|
for (const update of updates) {
|
|
13
|
-
const entity = graph.entities.find(e => e.name === update.entityName);
|
|
14
|
+
const entity = graph.entities.find(e => e.name === update.entityName && e.agentThreadId === threadId);
|
|
14
15
|
if (!entity) {
|
|
15
16
|
notFound.push(update.entityName);
|
|
16
17
|
continue;
|
|
@@ -3,13 +3,18 @@
|
|
|
3
3
|
*/
|
|
4
4
|
/**
|
|
5
5
|
* Prune memory based on age and importance criteria
|
|
6
|
+
* Thread isolation: Only prunes entities and relations in the specified thread
|
|
6
7
|
*/
|
|
7
|
-
export async function pruneMemory(storage, options) {
|
|
8
|
+
export async function pruneMemory(storage, threadId, options) {
|
|
8
9
|
const graph = await storage.loadGraph();
|
|
9
|
-
|
|
10
|
-
const
|
|
11
|
-
|
|
12
|
-
|
|
10
|
+
// Filter to only entities in the specified thread
|
|
11
|
+
const threadEntities = graph.entities.filter(e => e.agentThreadId === threadId);
|
|
12
|
+
const initialEntityCount = threadEntities.length;
|
|
13
|
+
// Count initial relations in the thread
|
|
14
|
+
const threadEntityNames = new Set(threadEntities.map(e => e.name));
|
|
15
|
+
const initialRelationCount = graph.relations.filter(r => r.agentThreadId === threadId && threadEntityNames.has(r.from) && threadEntityNames.has(r.to)).length;
|
|
16
|
+
// Filter entities to remove within the thread
|
|
17
|
+
let entitiesToKeep = threadEntities;
|
|
13
18
|
if (options.olderThan) {
|
|
14
19
|
const cutoffDate = new Date(options.olderThan);
|
|
15
20
|
entitiesToKeep = entitiesToKeep.filter(e => new Date(e.timestamp) >= cutoffDate);
|
|
@@ -18,13 +23,13 @@ export async function pruneMemory(storage, options) {
|
|
|
18
23
|
entitiesToKeep = entitiesToKeep.filter(e => e.importance >= options.importanceLessThan);
|
|
19
24
|
}
|
|
20
25
|
// Ensure we keep minimum entities
|
|
21
|
-
// If keepMinEntities is set and we need more entities, backfill from the original
|
|
26
|
+
// If keepMinEntities is set and we need more entities, backfill from the original thread entities
|
|
22
27
|
// sorted by importance and recency
|
|
23
28
|
if (options.keepMinEntities && entitiesToKeep.length < options.keepMinEntities) {
|
|
24
29
|
const minToKeep = options.keepMinEntities;
|
|
25
30
|
const alreadyKeptNames = new Set(entitiesToKeep.map(e => e.name));
|
|
26
|
-
// Candidates are entities from the
|
|
27
|
-
const candidates =
|
|
31
|
+
// Candidates are entities from the thread that are not already kept
|
|
32
|
+
const candidates = threadEntities
|
|
28
33
|
.filter(e => !alreadyKeptNames.has(e.name))
|
|
29
34
|
.sort((a, b) => {
|
|
30
35
|
if (a.importance !== b.importance)
|
|
@@ -36,13 +41,17 @@ export async function pruneMemory(storage, options) {
|
|
|
36
41
|
entitiesToKeep = [...entitiesToKeep, ...backfill];
|
|
37
42
|
}
|
|
38
43
|
const keptEntityNames = new Set(entitiesToKeep.map(e => e.name));
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
graph
|
|
42
|
-
graph.
|
|
44
|
+
const removedEntityNames = new Set(threadEntityNames);
|
|
45
|
+
keptEntityNames.forEach(name => removedEntityNames.delete(name));
|
|
46
|
+
// Update the main graph: remove entities from this thread that should be pruned
|
|
47
|
+
graph.entities = graph.entities.filter(e => e.agentThreadId !== threadId || keptEntityNames.has(e.name));
|
|
48
|
+
// Remove relations from this thread that reference removed entities
|
|
49
|
+
graph.relations = graph.relations.filter(r => !(r.agentThreadId === threadId && (removedEntityNames.has(r.from) || removedEntityNames.has(r.to))));
|
|
43
50
|
await storage.saveGraph(graph);
|
|
51
|
+
// Count remaining relations in the thread after pruning
|
|
52
|
+
const finalRelationCount = graph.relations.filter(r => r.agentThreadId === threadId && keptEntityNames.has(r.from) && keptEntityNames.has(r.to)).length;
|
|
44
53
|
return {
|
|
45
54
|
removedEntities: initialEntityCount - entitiesToKeep.length,
|
|
46
|
-
removedRelations: initialRelationCount -
|
|
55
|
+
removedRelations: initialRelationCount - finalRelationCount
|
|
47
56
|
};
|
|
48
57
|
}
|
|
@@ -5,10 +5,12 @@
|
|
|
5
5
|
* Create new entities in the knowledge graph
|
|
6
6
|
* Entity names are globally unique across all threads in the collaborative knowledge graph
|
|
7
7
|
* This prevents duplicate entities while allowing multiple threads to contribute to the same entity
|
|
8
|
+
* @param threadId - Thread ID passed for context (entities already have agentThreadId set)
|
|
8
9
|
*/
|
|
9
|
-
export async function createEntities(storage, entities) {
|
|
10
|
+
export async function createEntities(storage, threadId, entities) {
|
|
10
11
|
const graph = await storage.loadGraph();
|
|
11
12
|
const existingNames = new Set(graph.entities.map(e => e.name));
|
|
13
|
+
// Filter out entities that already exist, entities are expected to have agentThreadId already set
|
|
12
14
|
const newEntities = entities.filter(e => !existingNames.has(e.name));
|
|
13
15
|
graph.entities.push(...newEntities);
|
|
14
16
|
await storage.saveGraph(graph);
|
|
@@ -17,11 +19,18 @@ export async function createEntities(storage, entities) {
|
|
|
17
19
|
/**
|
|
18
20
|
* Delete entities from the knowledge graph
|
|
19
21
|
* Also removes all relations referencing the deleted entities
|
|
22
|
+
* Thread isolation: Only deletes entities that belong to the specified thread
|
|
20
23
|
*/
|
|
21
|
-
export async function deleteEntities(storage, entityNames) {
|
|
24
|
+
export async function deleteEntities(storage, threadId, entityNames) {
|
|
22
25
|
const graph = await storage.loadGraph();
|
|
23
26
|
const namesToDelete = new Set(entityNames);
|
|
24
|
-
|
|
25
|
-
|
|
27
|
+
// Determine which entities will actually be deleted for this thread
|
|
28
|
+
const deletedEntityNames = new Set(graph.entities
|
|
29
|
+
.filter(e => namesToDelete.has(e.name) && e.agentThreadId === threadId)
|
|
30
|
+
.map(e => e.name));
|
|
31
|
+
// Delete entities that were identified for deletion
|
|
32
|
+
graph.entities = graph.entities.filter(e => !deletedEntityNames.has(e.name));
|
|
33
|
+
// Only delete relations that belong to the specified thread and reference actually deleted entities
|
|
34
|
+
graph.relations = graph.relations.filter(r => !(r.agentThreadId === threadId && (deletedEntityNames.has(r.from) || deletedEntityNames.has(r.to))));
|
|
26
35
|
await storage.saveGraph(graph);
|
|
27
36
|
}
|
|
@@ -7,13 +7,15 @@ import { validateObservationNotSuperseded, createObservationVersion } from '../u
|
|
|
7
7
|
/**
|
|
8
8
|
* Add observations to entities
|
|
9
9
|
* Checks for duplicate content and creates version chains when content is updated
|
|
10
|
+
* Thread parameter is used for validation to ensure only entities in the thread are modified
|
|
10
11
|
*/
|
|
11
|
-
export async function addObservations(storage, observations) {
|
|
12
|
+
export async function addObservations(storage, threadId, observations) {
|
|
12
13
|
const graph = await storage.loadGraph();
|
|
13
14
|
const results = observations.map(o => {
|
|
14
|
-
|
|
15
|
+
// Find entity - thread validation happens here to ensure we only modify entities from this thread
|
|
16
|
+
const entity = graph.entities.find(e => e.name === o.entityName && e.agentThreadId === threadId);
|
|
15
17
|
if (!entity) {
|
|
16
|
-
throw new Error(`Entity with name ${o.entityName} not found`);
|
|
18
|
+
throw new Error(`Entity with name ${o.entityName} not found in thread ${threadId}`);
|
|
17
19
|
}
|
|
18
20
|
// Check for existing observations with same content to create version chain
|
|
19
21
|
const newObservations = [];
|
|
@@ -56,11 +58,13 @@ export async function addObservations(storage, observations) {
|
|
|
56
58
|
/**
|
|
57
59
|
* Delete observations from entities
|
|
58
60
|
* Supports deletion by content (backward compatibility) or by ID
|
|
61
|
+
* Thread parameter is used for validation to ensure only entities in the thread are modified
|
|
59
62
|
*/
|
|
60
|
-
export async function deleteObservations(storage, deletions) {
|
|
63
|
+
export async function deleteObservations(storage, threadId, deletions) {
|
|
61
64
|
const graph = await storage.loadGraph();
|
|
62
65
|
deletions.forEach(d => {
|
|
63
|
-
|
|
66
|
+
// Find entity - thread validation happens here to ensure we only modify entities from this thread
|
|
67
|
+
const entity = graph.entities.find(e => e.name === d.entityName && e.agentThreadId === threadId);
|
|
64
68
|
if (entity) {
|
|
65
69
|
// Delete observations by content (for backward compatibility) or by ID
|
|
66
70
|
entity.observations = entity.observations.filter(o => !d.observations.includes(o.content) && !d.observations.includes(o.id));
|
|
@@ -6,8 +6,9 @@ import { createRelationKey } from '../utils/relation-key.js';
|
|
|
6
6
|
* Create new relations in the knowledge graph
|
|
7
7
|
* Relations are globally unique by (from, to, relationType) across all threads
|
|
8
8
|
* This enables multiple threads to collaboratively build the knowledge graph
|
|
9
|
+
* @param threadId - Thread ID passed for context (relations already have agentThreadId set)
|
|
9
10
|
*/
|
|
10
|
-
export async function createRelations(storage, relations) {
|
|
11
|
+
export async function createRelations(storage, threadId, relations) {
|
|
11
12
|
const graph = await storage.loadGraph();
|
|
12
13
|
// Validate that referenced entities exist
|
|
13
14
|
const entityNames = new Set(graph.entities.map(e => e.name));
|
|
@@ -33,13 +34,14 @@ export async function createRelations(storage, relations) {
|
|
|
33
34
|
}
|
|
34
35
|
/**
|
|
35
36
|
* Delete relations from the knowledge graph
|
|
36
|
-
*
|
|
37
|
+
* Thread isolation: Only deletes relations that belong to the specified thread
|
|
37
38
|
*/
|
|
38
|
-
export async function deleteRelations(storage, relations) {
|
|
39
|
+
export async function deleteRelations(storage, threadId, relations) {
|
|
39
40
|
const graph = await storage.loadGraph();
|
|
40
|
-
// Delete relations
|
|
41
|
+
// Delete relations only from the specified thread by matching (from, to, relationType, threadId)
|
|
41
42
|
graph.relations = graph.relations.filter(r => !relations.some(delRelation => r.from === delRelation.from &&
|
|
42
43
|
r.to === delRelation.to &&
|
|
43
|
-
r.relationType === delRelation.relationType
|
|
44
|
+
r.relationType === delRelation.relationType &&
|
|
45
|
+
r.agentThreadId === threadId));
|
|
44
46
|
await storage.saveGraph(graph);
|
|
45
47
|
}
|
|
@@ -86,7 +86,7 @@ export async function handleSaveMemory(input, createEntitiesFn, createRelationsF
|
|
|
86
86
|
};
|
|
87
87
|
});
|
|
88
88
|
// Create all entities first
|
|
89
|
-
const createdEntities = await createEntitiesFn(entities);
|
|
89
|
+
const createdEntities = await createEntitiesFn(input.threadId, entities);
|
|
90
90
|
// Build relations array from all entities
|
|
91
91
|
const relations = [];
|
|
92
92
|
for (const entity of input.entities) {
|
|
@@ -113,7 +113,7 @@ export async function handleSaveMemory(input, createEntitiesFn, createRelationsF
|
|
|
113
113
|
}
|
|
114
114
|
}
|
|
115
115
|
// Create all relations
|
|
116
|
-
const createdRelations = await createRelationsFn(relations);
|
|
116
|
+
const createdRelations = await createRelationsFn(input.threadId, relations);
|
|
117
117
|
// Calculate quality score
|
|
118
118
|
const qualityScore = calculateQualityScore(input.entities);
|
|
119
119
|
// Extract entity names for reference in subsequent calls
|
package/dist/lib/schemas.js
CHANGED
|
@@ -199,3 +199,99 @@ export const GetContextInputSchema = z.object({
|
|
|
199
199
|
entityNames: z.array(z.string()).min(1).describe("Array of entity names to get context for"),
|
|
200
200
|
depth: z.number().int().min(1).optional().default(1).describe("Context depth (default: 1)")
|
|
201
201
|
});
|
|
202
|
+
// Schema for create_entities tool
|
|
203
|
+
export const CreateEntitiesInputSchema = z.object({
|
|
204
|
+
threadId: z.string().min(1).describe("Thread ID for this conversation/project"),
|
|
205
|
+
entities: z.array(EntitySchema).describe("Array of entities to create")
|
|
206
|
+
}).superRefine((data, ctx) => {
|
|
207
|
+
const { threadId, entities } = data;
|
|
208
|
+
entities.forEach((entity, index) => {
|
|
209
|
+
if (entity.agentThreadId !== threadId) {
|
|
210
|
+
ctx.addIssue({
|
|
211
|
+
code: z.ZodIssueCode.custom,
|
|
212
|
+
message: "Entity agentThreadId must match the top-level threadId",
|
|
213
|
+
path: ["entities", index, "agentThreadId"]
|
|
214
|
+
});
|
|
215
|
+
}
|
|
216
|
+
});
|
|
217
|
+
});
|
|
218
|
+
// Schema for create_relations tool
|
|
219
|
+
export const CreateRelationsInputSchema = z.object({
|
|
220
|
+
threadId: z.string().min(1).describe("Thread ID for this conversation/project"),
|
|
221
|
+
relations: z.array(RelationSchema).describe("Array of relations to create")
|
|
222
|
+
}).superRefine((data, ctx) => {
|
|
223
|
+
const { threadId, relations } = data;
|
|
224
|
+
relations.forEach((relation, index) => {
|
|
225
|
+
if (relation.agentThreadId !== threadId) {
|
|
226
|
+
ctx.addIssue({
|
|
227
|
+
code: z.ZodIssueCode.custom,
|
|
228
|
+
message: "Relation agentThreadId must match the top-level threadId",
|
|
229
|
+
path: ["relations", index, "agentThreadId"]
|
|
230
|
+
});
|
|
231
|
+
}
|
|
232
|
+
});
|
|
233
|
+
});
|
|
234
|
+
// Schema for add_observations tool
|
|
235
|
+
export const AddObservationsInputSchema = z.object({
|
|
236
|
+
threadId: z.string().min(1).describe("Thread ID for this conversation/project"),
|
|
237
|
+
observations: z.array(z.object({
|
|
238
|
+
entityName: z.string().describe("The name of the entity to add the observations to"),
|
|
239
|
+
contents: z.array(z.string()).describe("An array of observation contents to add"),
|
|
240
|
+
agentThreadId: z.string().describe("The agent thread ID adding these observations"),
|
|
241
|
+
timestamp: z.string().describe("ISO 8601 timestamp of when the observations are added"),
|
|
242
|
+
confidence: z.number().min(0).max(1).describe("Confidence coefficient from 0 to 1"),
|
|
243
|
+
importance: z.number().min(0).max(1).describe("Importance for memory integrity if lost: 0 (not important) to 1 (critical)")
|
|
244
|
+
})).describe("Array of observations to add")
|
|
245
|
+
}).superRefine((data, ctx) => {
|
|
246
|
+
data.observations.forEach((observation, index) => {
|
|
247
|
+
if (observation.agentThreadId !== data.threadId) {
|
|
248
|
+
ctx.addIssue({
|
|
249
|
+
code: z.ZodIssueCode.custom,
|
|
250
|
+
message: "agentThreadId must match the top-level threadId",
|
|
251
|
+
path: ["observations", index, "agentThreadId"],
|
|
252
|
+
});
|
|
253
|
+
}
|
|
254
|
+
});
|
|
255
|
+
});
|
|
256
|
+
// Schema for delete_entities tool
|
|
257
|
+
export const DeleteEntitiesInputSchema = z.object({
|
|
258
|
+
threadId: z.string().min(1).describe("Thread ID for this conversation/project"),
|
|
259
|
+
entityNames: z.array(z.string()).describe("An array of entity names to delete")
|
|
260
|
+
});
|
|
261
|
+
// Schema for delete_observations tool
|
|
262
|
+
export const DeleteObservationsInputSchema = z.object({
|
|
263
|
+
threadId: z.string().min(1).describe("Thread ID for this conversation/project"),
|
|
264
|
+
deletions: z.array(z.object({
|
|
265
|
+
entityName: z.string().describe("The name of the entity containing the observations"),
|
|
266
|
+
observations: z.array(z.string()).describe("An array of observations to delete")
|
|
267
|
+
})).describe("Array of deletions to perform")
|
|
268
|
+
});
|
|
269
|
+
// Schema for delete_relations tool
|
|
270
|
+
export const DeleteRelationsInputSchema = z.object({
|
|
271
|
+
threadId: z.string().min(1).describe("Thread ID for this conversation/project"),
|
|
272
|
+
relations: z.array(RelationSchema).describe("An array of relations to delete")
|
|
273
|
+
});
|
|
274
|
+
// Schema for prune_memory tool
|
|
275
|
+
export const PruneMemoryInputSchema = z.object({
|
|
276
|
+
threadId: z.string().min(1).describe("Thread ID for this conversation/project"),
|
|
277
|
+
olderThan: z.string().optional().describe("ISO 8601 timestamp - remove entities older than this"),
|
|
278
|
+
importanceLessThan: z.number().min(0).max(1).optional().describe("Remove entities with importance less than this value"),
|
|
279
|
+
keepMinEntities: z.number().optional().describe("Minimum number of entities to keep regardless of filters")
|
|
280
|
+
});
|
|
281
|
+
// Schema for bulk_update tool
|
|
282
|
+
export const BulkUpdateInputSchema = z.object({
|
|
283
|
+
threadId: z.string().min(1).describe("Thread ID for this conversation/project"),
|
|
284
|
+
updates: z.array(z.object({
|
|
285
|
+
entityName: z.string(),
|
|
286
|
+
confidence: z.number().min(0).max(1).optional(),
|
|
287
|
+
importance: z.number().min(0).max(1).optional(),
|
|
288
|
+
addObservations: z.array(z.string()).optional()
|
|
289
|
+
})).describe("Array of updates to perform")
|
|
290
|
+
});
|
|
291
|
+
// Schema for flag_for_review tool
|
|
292
|
+
export const FlagForReviewInputSchema = z.object({
|
|
293
|
+
threadId: z.string().min(1).describe("Thread ID for this conversation/project"),
|
|
294
|
+
entityName: z.string().describe("Name of entity to flag"),
|
|
295
|
+
reason: z.string().describe("Reason for flagging"),
|
|
296
|
+
reviewer: z.string().optional().describe("Optional reviewer name")
|
|
297
|
+
});
|
package/package.json
CHANGED
|
@@ -1,14 +1,28 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "server-memory-enhanced",
|
|
3
|
-
"version": "3.
|
|
4
|
-
"description": "
|
|
3
|
+
"version": "3.1.0",
|
|
4
|
+
"description": "Persistent memory for long conversations - MCP server for knowledge graph with atomic facts, cross-chat memory sharing, and multi-agent delegation",
|
|
5
5
|
"license": "MIT",
|
|
6
6
|
"mcpName": "io.github.modelcontextprotocol/server-memory-enhanced",
|
|
7
7
|
"author": "Andriy Shevchenko",
|
|
8
8
|
"repository": {
|
|
9
9
|
"type": "git",
|
|
10
|
-
"url": "https://github.com/andriyshevchenko/
|
|
10
|
+
"url": "https://github.com/andriyshevchenko/atomic-memory-mcp.git"
|
|
11
11
|
},
|
|
12
|
+
"homepage": "https://github.com/andriyshevchenko/atomic-memory-mcp",
|
|
13
|
+
"bugs": "https://github.com/andriyshevchenko/atomic-memory-mcp/issues",
|
|
14
|
+
"keywords": [
|
|
15
|
+
"mcp",
|
|
16
|
+
"model-context-protocol",
|
|
17
|
+
"memory",
|
|
18
|
+
"knowledge-graph",
|
|
19
|
+
"ai",
|
|
20
|
+
"claude",
|
|
21
|
+
"copilot",
|
|
22
|
+
"context",
|
|
23
|
+
"persistence",
|
|
24
|
+
"multi-agent"
|
|
25
|
+
],
|
|
12
26
|
"type": "module",
|
|
13
27
|
"bin": {
|
|
14
28
|
"mcp-server-memory-enhanced": "dist/index.js"
|
package/README.md
DELETED
|
@@ -1,399 +0,0 @@
|
|
|
1
|
-
# Memory-Enhanced MCP Server
|
|
2
|
-
|
|
3
|
-
An enhanced version of the Memory MCP server that provides persistent knowledge graph storage with agent threading, timestamps, and confidence scoring.
|
|
4
|
-
|
|
5
|
-
## Features
|
|
6
|
-
|
|
7
|
-
- **Agent Thread Isolation**: Each agent thread writes to a separate file for better organization and parallel processing
|
|
8
|
-
- **Timestamp Tracking**: Every entity and relation has an ISO 8601 timestamp indicating when it was created
|
|
9
|
-
- **Confidence Scoring**: Each piece of knowledge has a confidence coefficient (0.0 to 1.0) representing certainty
|
|
10
|
-
- **Persistent Storage**: Knowledge graphs are stored in JSONL format, one file per agent thread
|
|
11
|
-
- **Graph Operations**: Full CRUD support for entities, relations, and observations
|
|
12
|
-
|
|
13
|
-
## Enhanced Data Model
|
|
14
|
-
|
|
15
|
-
### Entities
|
|
16
|
-
Each entity now includes:
|
|
17
|
-
- `name`: Entity identifier
|
|
18
|
-
- `entityType`: Type of entity (free-form, any domain-specific type allowed)
|
|
19
|
-
- `observations`: Array of **versioned Observation objects** (not strings) - **BREAKING CHANGE**
|
|
20
|
-
- Each observation has: `id`, `content`, `timestamp`, `version`, `supersedes`, `superseded_by`
|
|
21
|
-
- Supports full version history tracking
|
|
22
|
-
- `agentThreadId`: Unique identifier for the agent thread
|
|
23
|
-
- `timestamp`: ISO 8601 timestamp of creation
|
|
24
|
-
- `confidence`: Confidence score (0.0 to 1.0)
|
|
25
|
-
- `importance`: Importance for memory integrity if lost (0.0 = not important, 1.0 = critical)
|
|
26
|
-
|
|
27
|
-
### Relations
|
|
28
|
-
Each relation now includes:
|
|
29
|
-
- `from`: Source entity name
|
|
30
|
-
- `to`: Target entity name
|
|
31
|
-
- `relationType`: Type of relationship
|
|
32
|
-
- `agentThreadId`: Unique identifier for the agent thread
|
|
33
|
-
- `timestamp`: ISO 8601 timestamp of creation
|
|
34
|
-
- `confidence`: Confidence score (0.0 to 1.0)
|
|
35
|
-
- `importance`: Importance for memory integrity if lost (0.0 = not important, 1.0 = critical)
|
|
36
|
-
|
|
37
|
-
## Storage Architecture
|
|
38
|
-
|
|
39
|
-
The server implements a **collaborative knowledge graph** where multiple agent threads contribute to a shared graph:
|
|
40
|
-
|
|
41
|
-
### Design Principles
|
|
42
|
-
- **Shared Entities**: Entity names are globally unique across all threads. If entity "Alice" exists, all threads reference the same entity.
|
|
43
|
-
- **Shared Relations**: Relations are unique by (from, to, relationType) across all threads.
|
|
44
|
-
- **Metadata Tracking**: Each entity and relation tracks which agent thread created it via `agentThreadId`, along with `timestamp` and `confidence`.
|
|
45
|
-
- **Distributed Storage**: Data is physically stored in separate JSONL files per thread for organization and performance.
|
|
46
|
-
- **Aggregated Reads**: Read operations combine data from all thread files to provide a complete view of the knowledge graph.
|
|
47
|
-
|
|
48
|
-
### File Organization
|
|
49
|
-
The server stores data in separate JSONL files per agent thread:
|
|
50
|
-
- Default location: `./memory-data/thread-{agentThreadId}.jsonl`
|
|
51
|
-
- Custom location: Set `MEMORY_DIR_PATH` environment variable
|
|
52
|
-
- Each file contains entities and relations for one agent thread
|
|
53
|
-
- Read operations aggregate data across all thread files
|
|
54
|
-
|
|
55
|
-
## Available Tools
|
|
56
|
-
|
|
57
|
-
### ⭐ Recommended Tool (New)
|
|
58
|
-
1. **save_memory**: **[RECOMMENDED]** Unified tool for creating entities and relations atomically with server-side validation
|
|
59
|
-
- Enforces observation limits (max 300 chars, 3 sentences per observation, ignoring periods in version numbers)
|
|
60
|
-
- Requires at least 1 relation per entity (prevents orphaned nodes)
|
|
61
|
-
- Free-form entity types with soft normalization
|
|
62
|
-
- Atomic transactions (all-or-nothing)
|
|
63
|
-
- Bidirectional relation tracking
|
|
64
|
-
- Quality score calculation
|
|
65
|
-
- Clear, actionable error messages
|
|
66
|
-
|
|
67
|
-
### Core Operations
|
|
68
|
-
> ⚠️ **Note**: `create_entities` and `create_relations` are **deprecated**. New code should use `save_memory` for better reliability and validation.
|
|
69
|
-
|
|
70
|
-
1. **create_entities**: Create new entities with metadata (including importance) - **[DEPRECATED - Use save_memory]**
|
|
71
|
-
2. **create_relations**: Create relationships between entities with metadata (including importance) - **[DEPRECATED - Use save_memory]**
|
|
72
|
-
3. **add_observations**: Add observations to existing entities with metadata (including importance)
|
|
73
|
-
4. **update_observation**: **[NEW]** Update an existing observation by creating a new version with updated content, maintaining version history
|
|
74
|
-
5. **delete_entities**: Remove entities and cascading relations
|
|
75
|
-
6. **delete_observations**: Remove specific observations
|
|
76
|
-
7. **delete_relations**: Delete relationships
|
|
77
|
-
8. **read_graph**: Read the entire knowledge graph
|
|
78
|
-
9. **search_nodes**: Search entities by name, type, or observation content
|
|
79
|
-
10. **open_nodes**: Retrieve specific entities by name
|
|
80
|
-
11. **query_nodes**: Advanced querying with range-based filtering by timestamp, confidence, and importance
|
|
81
|
-
12. **list_entities**: List entities with optional filtering by type and name pattern for quick discovery
|
|
82
|
-
13. **validate_memory**: Validate entities without saving (dry-run) - check for errors before attempting save_memory
|
|
83
|
-
|
|
84
|
-
### Memory Management & Insights
|
|
85
|
-
14. **get_analytics**: **[NEW]** Get simple, LLM-friendly analytics about your knowledge graph
|
|
86
|
-
- Recent changes (last 10 entities)
|
|
87
|
-
- Top important entities (by importance score)
|
|
88
|
-
- Most connected entities (by relation count)
|
|
89
|
-
- Orphaned entities (quality check)
|
|
90
|
-
15. **get_observation_history**: **[NEW]** Retrieve version history for observations
|
|
91
|
-
- Track how observations evolve over time
|
|
92
|
-
- View complete version chains
|
|
93
|
-
- Supports rollback by viewing previous versions
|
|
94
|
-
16. **get_memory_stats**: Get comprehensive statistics (entity counts, thread activity, avg confidence/importance, recent activity)
|
|
95
|
-
17. **get_recent_changes**: Retrieve entities and relations created/modified since a specific timestamp
|
|
96
|
-
18. **prune_memory**: Remove old or low-importance entities to manage memory size
|
|
97
|
-
19. **bulk_update**: Efficiently update multiple entities at once (confidence, importance, observations)
|
|
98
|
-
20. **list_conversations**: List all available agent threads (conversations) with metadata including entity counts, relation counts, and activity timestamps
|
|
99
|
-
|
|
100
|
-
### Relationship Intelligence
|
|
101
|
-
21. **find_relation_path**: Find the shortest path of relationships between two entities (useful for "how are they connected?")
|
|
102
|
-
22. **get_context**: Retrieve entities and relations related to specified entities up to a certain depth
|
|
103
|
-
|
|
104
|
-
### Quality & Review
|
|
105
|
-
23. **detect_conflicts**: Detect potentially conflicting observations using pattern matching and negation detection
|
|
106
|
-
24. **flag_for_review**: Mark entities for human review with a specific reason (Human-in-the-Loop)
|
|
107
|
-
25. **get_flagged_entities**: Retrieve all entities flagged for review
|
|
108
|
-
|
|
109
|
-
## Usage
|
|
110
|
-
|
|
111
|
-
### Installation
|
|
112
|
-
|
|
113
|
-
```bash
|
|
114
|
-
npm install server-memory-enhanced
|
|
115
|
-
```
|
|
116
|
-
|
|
117
|
-
### Running the Server
|
|
118
|
-
|
|
119
|
-
```bash
|
|
120
|
-
npx mcp-server-memory-enhanced
|
|
121
|
-
```
|
|
122
|
-
|
|
123
|
-
### Configuration
|
|
124
|
-
|
|
125
|
-
#### File Storage (Default)
|
|
126
|
-
|
|
127
|
-
Set the `MEMORY_DIR_PATH` environment variable to customize the storage location:
|
|
128
|
-
|
|
129
|
-
```bash
|
|
130
|
-
MEMORY_DIR_PATH=/path/to/memory/directory npx mcp-server-memory-enhanced
|
|
131
|
-
```
|
|
132
|
-
|
|
133
|
-
#### Neo4j Storage (Optional)
|
|
134
|
-
|
|
135
|
-
The server supports Neo4j as an alternative storage backend. If Neo4j environment variables are set, the server will attempt to connect to Neo4j. If the connection fails or variables are not set, it will automatically fall back to file-based JSONL storage.
|
|
136
|
-
|
|
137
|
-
**Environment Variables:**
|
|
138
|
-
|
|
139
|
-
```bash
|
|
140
|
-
# Neo4j connection settings
|
|
141
|
-
export NEO4J_URI=neo4j://localhost:7687
|
|
142
|
-
export NEO4J_USERNAME=neo4j
|
|
143
|
-
export NEO4J_PASSWORD=your_password
|
|
144
|
-
export NEO4J_DATABASE=neo4j # Optional, defaults to 'neo4j'
|
|
145
|
-
|
|
146
|
-
# Run the server
|
|
147
|
-
npx mcp-server-memory-enhanced
|
|
148
|
-
```
|
|
149
|
-
|
|
150
|
-
**Using Docker Compose:**
|
|
151
|
-
|
|
152
|
-
A `docker-compose.yml` file is provided for local development with Neo4j:
|
|
153
|
-
|
|
154
|
-
```bash
|
|
155
|
-
# Start Neo4j and the MCP server
|
|
156
|
-
docker-compose up
|
|
157
|
-
|
|
158
|
-
# The Neo4j browser will be available at http://localhost:7474
|
|
159
|
-
# Username: neo4j, Password: testpassword
|
|
160
|
-
```
|
|
161
|
-
|
|
162
|
-
**Using with Claude Desktop:**
|
|
163
|
-
|
|
164
|
-
Configure the server in your Claude Desktop configuration with Neo4j:
|
|
165
|
-
|
|
166
|
-
```json
|
|
167
|
-
{
|
|
168
|
-
"mcpServers": {
|
|
169
|
-
"memory-enhanced": {
|
|
170
|
-
"command": "npx",
|
|
171
|
-
"args": ["-y", "mcp-server-memory-enhanced"],
|
|
172
|
-
"env": {
|
|
173
|
-
"NEO4J_URI": "neo4j://localhost:7687",
|
|
174
|
-
"NEO4J_USERNAME": "neo4j",
|
|
175
|
-
"NEO4J_PASSWORD": "your_password"
|
|
176
|
-
}
|
|
177
|
-
}
|
|
178
|
-
}
|
|
179
|
-
}
|
|
180
|
-
```
|
|
181
|
-
|
|
182
|
-
**Benefits of Neo4j Storage:**
|
|
183
|
-
|
|
184
|
-
- **Graph-native queries**: Faster relationship traversals and path finding
|
|
185
|
-
- **Scalability**: Better performance with large knowledge graphs
|
|
186
|
-
- **Advanced queries**: Native support for graph algorithms
|
|
187
|
-
- **Visualization**: Use Neo4j Browser to visualize your knowledge graph
|
|
188
|
-
- **Automatic fallback**: If Neo4j is not available, automatically uses file storage
|
|
189
|
-
|
|
190
|
-
## User Guide
|
|
191
|
-
|
|
192
|
-
### ✨ Using save_memory (Recommended)
|
|
193
|
-
|
|
194
|
-
The `save_memory` tool is the recommended way to create entities and relations. It provides atomic transactions and server-side validation to ensure high-quality knowledge graphs.
|
|
195
|
-
|
|
196
|
-
#### Key Principles
|
|
197
|
-
|
|
198
|
-
1. **Atomic Observations**: Each observation should be a single, atomic fact
|
|
199
|
-
- ✅ Good: `"Works at Google"`, `"Lives in San Francisco"`
|
|
200
|
-
- ❌ Bad: `"Works at Google and lives in San Francisco and has a PhD in Computer Science"`
|
|
201
|
-
- **Max length**: 300 characters per observation
|
|
202
|
-
- **Max sentences**: 3 sentences per observation (technical content with version numbers supported)
|
|
203
|
-
|
|
204
|
-
2. **Mandatory Relations**: Every entity must connect to at least one other entity
|
|
205
|
-
- ✅ Good: `{ targetEntity: "Google", relationType: "works at" }`
|
|
206
|
-
- ❌ Bad: Empty relations array `[]`
|
|
207
|
-
- This prevents orphaned nodes and ensures a well-connected knowledge graph
|
|
208
|
-
|
|
209
|
-
3. **Free Entity Types**: Use any entity type that makes sense for your domain
|
|
210
|
-
- ✅ Good: `"Person"`, `"Company"`, `"Document"`, `"Recipe"`, `"Patient"`, `"API"`
|
|
211
|
-
- Soft normalization: `"person"` → `"Person"` (warning, not error)
|
|
212
|
-
- Space warning: `"API Key"` → suggests `"APIKey"`
|
|
213
|
-
|
|
214
|
-
4. **Error Messages**: The tool provides clear, actionable error messages
|
|
215
|
-
- Too long: `"Observation too long (350 chars). Max 300. Suggestion: Split into multiple observations."`
|
|
216
|
-
- No relations: `"Entity 'X' must have at least 1 relation. Suggestion: Add relations to show connections."`
|
|
217
|
-
- Too many sentences: `"Too many sentences (4). Max 3. Suggestion: Split this into 4 separate observations."`
|
|
218
|
-
|
|
219
|
-
### Example Usage
|
|
220
|
-
|
|
221
|
-
```typescript
|
|
222
|
-
// ✅ RECOMMENDED: Use save_memory for atomic entity and relation creation
|
|
223
|
-
await save_memory({
|
|
224
|
-
entities: [
|
|
225
|
-
{
|
|
226
|
-
name: "Alice",
|
|
227
|
-
entityType: "Person",
|
|
228
|
-
observations: ["Works at Google", "Lives in SF"], // Atomic facts, under 300 chars
|
|
229
|
-
relations: [{ targetEntity: "Bob", relationType: "knows" }] // At least 1 relation required
|
|
230
|
-
},
|
|
231
|
-
{
|
|
232
|
-
name: "Bob",
|
|
233
|
-
entityType: "Person",
|
|
234
|
-
observations: ["Works at Microsoft"],
|
|
235
|
-
relations: [{ targetEntity: "Alice", relationType: "knows" }]
|
|
236
|
-
}
|
|
237
|
-
],
|
|
238
|
-
threadId: "conversation-001"
|
|
239
|
-
});
|
|
240
|
-
|
|
241
|
-
// Get analytics about your knowledge graph
|
|
242
|
-
await get_analytics({
|
|
243
|
-
threadId: "conversation-001"
|
|
244
|
-
});
|
|
245
|
-
// Returns: {
|
|
246
|
-
// recent_changes: [...], // Last 10 entities
|
|
247
|
-
// top_important: [...], // Top 10 by importance
|
|
248
|
-
// most_connected: [...], // Top 10 by relation count
|
|
249
|
-
// orphaned_entities: [...] // Quality check
|
|
250
|
-
// }
|
|
251
|
-
|
|
252
|
-
// Get observation version history
|
|
253
|
-
await get_observation_history({
|
|
254
|
-
entityName: "Python Scripts",
|
|
255
|
-
observationId: "obs_abc123"
|
|
256
|
-
});
|
|
257
|
-
// Returns: { history: [{ id, content, version, timestamp, supersedes, superseded_by }, ...] }
|
|
258
|
-
|
|
259
|
-
// Update an existing observation (creates a new version)
|
|
260
|
-
await update_observation({
|
|
261
|
-
entityName: "Alice",
|
|
262
|
-
observationId: "obs_abc123",
|
|
263
|
-
newContent: "Works at Google (Senior Engineer)",
|
|
264
|
-
agentThreadId: "conversation-001",
|
|
265
|
-
timestamp: "2024-01-20T12:00:00Z",
|
|
266
|
-
confidence: 0.95 // Optional: update confidence
|
|
267
|
-
});
|
|
268
|
-
// Returns: { success: true, updatedObservation: { id, content, version: 2, supersedes, ... }, message: "..." }
|
|
269
|
-
|
|
270
|
-
// Query nodes with range-based filtering
|
|
271
|
-
await queryNodes({
|
|
272
|
-
timestampStart: "2024-01-20T09:00:00Z",
|
|
273
|
-
timestampEnd: "2024-01-20T11:00:00Z",
|
|
274
|
-
confidenceMin: 0.8,
|
|
275
|
-
importanceMin: 0.7 // Only get important items
|
|
276
|
-
});
|
|
277
|
-
|
|
278
|
-
// Get memory statistics
|
|
279
|
-
await getMemoryStats();
|
|
280
|
-
// Returns: { entityCount, relationCount, threadCount, entityTypes, avgConfidence, avgImportance, recentActivity }
|
|
281
|
-
|
|
282
|
-
// List all conversations (agent threads)
|
|
283
|
-
await listConversations();
|
|
284
|
-
// Returns: { conversations: [{ agentThreadId, entityCount, relationCount, firstCreated, lastUpdated }, ...] }
|
|
285
|
-
|
|
286
|
-
// Get recent changes since last interaction
|
|
287
|
-
await getRecentChanges({ since: "2024-01-20T10:00:00Z" });
|
|
288
|
-
|
|
289
|
-
// Find how two entities are connected
|
|
290
|
-
await findRelationPath({ from: "Alice", to: "Charlie", maxDepth: 5 });
|
|
291
|
-
|
|
292
|
-
// Get context around specific entities
|
|
293
|
-
await getContext({ entityNames: ["Alice", "Bob"], depth: 2 });
|
|
294
|
-
|
|
295
|
-
// Detect conflicting observations
|
|
296
|
-
await detectConflicts();
|
|
297
|
-
|
|
298
|
-
// Flag entity for human review
|
|
299
|
-
await flagForReview({ entityName: "Alice", reason: "Uncertain data", reviewer: "John" });
|
|
300
|
-
|
|
301
|
-
// Bulk update multiple entities
|
|
302
|
-
await bulkUpdate({
|
|
303
|
-
updates: [
|
|
304
|
-
{ entityName: "Alice", importance: 0.95 },
|
|
305
|
-
{ entityName: "Bob", confidence: 0.85, addObservations: ["updated info"] }
|
|
306
|
-
]
|
|
307
|
-
});
|
|
308
|
-
|
|
309
|
-
// Prune old/unimportant data
|
|
310
|
-
await pruneMemory({ olderThan: "2024-01-01T00:00:00Z", importanceLessThan: 0.3, keepMinEntities: 100 });
|
|
311
|
-
```
|
|
312
|
-
|
|
313
|
-
### 🔄 Migration Guide
|
|
314
|
-
|
|
315
|
-
For users of the old `create_entities` and `create_relations` tools:
|
|
316
|
-
|
|
317
|
-
#### What Changed
|
|
318
|
-
- **Old approach**: Two separate tools that could be used independently
|
|
319
|
-
- `create_entities` → creates entities
|
|
320
|
-
- `create_relations` → creates relations (optional, often skipped by LLMs)
|
|
321
|
-
- **New approach**: Single `save_memory` tool with atomic transactions
|
|
322
|
-
- Creates entities and relations together
|
|
323
|
-
- Enforces mandatory relations (at least 1 per entity)
|
|
324
|
-
- Validates observation length and atomicity
|
|
325
|
-
|
|
326
|
-
#### Migrating Your Code
|
|
327
|
-
```typescript
|
|
328
|
-
// ❌ OLD WAY (deprecated but still works)
|
|
329
|
-
await create_entities({
|
|
330
|
-
entities: [{ name: "Alice", entityType: "person", observations: ["works at Google and lives in SF"] }]
|
|
331
|
-
});
|
|
332
|
-
await create_relations({ // Often forgotten!
|
|
333
|
-
relations: [{ from: "Alice", to: "Bob", relationType: "knows" }]
|
|
334
|
-
});
|
|
335
|
-
|
|
336
|
-
// ✅ NEW WAY (recommended)
|
|
337
|
-
await save_memory({
|
|
338
|
-
entities: [
|
|
339
|
-
{
|
|
340
|
-
name: "Alice",
|
|
341
|
-
entityType: "Person",
|
|
342
|
-
observations: ["Works at Google", "Lives in SF"], // Split into atomic facts
|
|
343
|
-
relations: [{ targetEntity: "Bob", relationType: "knows" }] // Required!
|
|
344
|
-
}
|
|
345
|
-
],
|
|
346
|
-
threadId: "conversation-001"
|
|
347
|
-
});
|
|
348
|
-
```
|
|
349
|
-
|
|
350
|
-
#### Migration Strategy
|
|
351
|
-
1. **Old tools remain available**: `create_entities` and `create_relations` are deprecated but not removed
|
|
352
|
-
2. **No forced migration**: Update your code gradually at your own pace
|
|
353
|
-
3. **New code should use `save_memory`**: Benefits from validation and atomic transactions
|
|
354
|
-
4. **Observation versioning**: New installations use versioned observations (breaking change for data model)
|
|
355
|
-
|
|
356
|
-
## Development
|
|
357
|
-
|
|
358
|
-
### Build
|
|
359
|
-
|
|
360
|
-
```bash
|
|
361
|
-
npm run build
|
|
362
|
-
```
|
|
363
|
-
|
|
364
|
-
### Test
|
|
365
|
-
|
|
366
|
-
```bash
|
|
367
|
-
npm run test
|
|
368
|
-
```
|
|
369
|
-
|
|
370
|
-
### Watch Mode
|
|
371
|
-
|
|
372
|
-
```bash
|
|
373
|
-
npm run watch
|
|
374
|
-
```
|
|
375
|
-
|
|
376
|
-
## License
|
|
377
|
-
|
|
378
|
-
MIT
|
|
379
|
-
|
|
380
|
-
## 🤝 Contributing
|
|
381
|
-
|
|
382
|
-
Contributions are welcome! Please see [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
|
|
383
|
-
|
|
384
|
-
## 🔒 Security
|
|
385
|
-
|
|
386
|
-
See [SECURITY.MD](SECURITY.md) for reporting security vulnerabilities.
|
|
387
|
-
|
|
388
|
-
## 📜 License
|
|
389
|
-
|
|
390
|
-
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
|
|
391
|
-
|
|
392
|
-
## 💬 Community
|
|
393
|
-
|
|
394
|
-
- [GitHub Discussions](https://github.com/modelcontextprotocol/servers/discussions)
|
|
395
|
-
- [Model Context Protocol Documentation](https://modelcontextprotocol.io)
|
|
396
|
-
|
|
397
|
-
---
|
|
398
|
-
|
|
399
|
-
Part of the [Model Context Protocol](https://modelcontextprotocol.io) project.
|