memtap 2.1.0 → 3.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -3,7 +3,7 @@
3
3
  [![npm](https://img.shields.io/npm/v/memtap)](https://www.npmjs.com/package/memtap)
4
4
  [![License: MIT](https://img.shields.io/badge/License-MIT-green.svg)](https://opensource.org/licenses/MIT)
5
5
 
6
- **MemTap** gives your OpenClaw agent a persistent, graph-based long-term memory powered by ArangoDB.
6
+ **MemTap** gives your OpenClaw agent a persistent, graph-based long-term memory.
7
7
 
8
8
  Instead of simple vector recall, MemTap builds a **knowledge graph** — memories are connected through entities, relationships, and temporal context. Your agent remembers facts, tracks decisions, discovers connections through multi-hop graph traversal, and consolidates knowledge over time.
9
9
 
@@ -104,6 +104,7 @@ Sign up at [memtap.ai](https://memtap.ai) to get your API key.
104
104
  ## Why MemTap?
105
105
 
106
106
  - **Graph-based**: Full knowledge graph, not just vector similarity
107
+ - **Cloud-native**: Managed service — no database setup needed
107
108
  - **GraphRAG**: Multi-hop traversal discovers connections vector search misses
108
109
  - **Decision tracking**: Unique decision management for AI agents
109
110
  - **Auto-capture**: Learns from conversations without explicit commands
@@ -114,8 +115,8 @@ Sign up at [memtap.ai](https://memtap.ai) to get your API key.
114
115
 
115
116
  - **Website**: [memtap.ai](https://memtap.ai)
116
117
  - **API Docs**: [api.memtap.ai](https://api.memtap.ai)
117
- - **GitHub**: [github.com/memtap/openclaw-plugin](https://github.com/memtap/openclaw-plugin)
118
- - **Support**: [github.com/memtap/openclaw-plugin/issues](https://github.com/memtap/openclaw-plugin/issues)
118
+ - **GitHub**: [github.com/psifactory/memtap](https://github.com/psifactory/memtap)
119
+ - **Support**: [github.com/psifactory/memtap/issues](https://github.com/psifactory/memtap/issues)
119
120
 
120
121
  ## License
121
122
 
package/index.ts CHANGED
@@ -1,23 +1,31 @@
1
1
  /**
2
2
  * MemTap — Graph-based Long-Term Memory Plugin for OpenClaw
3
- * v2.1.0 "The Neuron" — Neuromimetic Memory System with Human-like Cognition
3
+ * v3.1.0 "The Oracle" — Proactive Surfacing, Learning Loop & Inference Engine
4
4
  *
5
5
  * Tools:
6
- * - memtap_recall — semantic graph recall
7
- * - memtap_remember — store a memory in the graph
8
- * - memtap_memory — get, update, delete individual memories
9
- * - memtap_bulletin — context bulletin with graph expansion
10
- * - memtap_graph — graph analysis (overview, gaps, clusters, connections, traverse)
11
- * - memtap_decide — decision tracking (list, create, resolve, defer)
12
- * - memtap_graphrag — vector/BM25 + graph traversal search
13
- * - memtap_maintenance — memory maintenance (decay-report, contradictions, dedup-scan, run-all)
14
- * - memtap_entities — entity management (list, memories, merge)
15
- * - memtap_edges — create edges between memories
16
- * - memtap_health server health check and statistics
6
+ * - memtap_recall — semantic graph recall
7
+ * - memtap_remember — store a memory in the graph (supports immutable flag)
8
+ * - memtap_memory — get, update, delete individual memories
9
+ * - memtap_bulletin — context bulletin with graph expansion
10
+ * - memtap_graph — graph analysis (overview, gaps, clusters, connections, traverse)
11
+ * - memtap_decide — decision tracking (list, create, resolve, defer)
12
+ * - memtap_graphrag — vector/BM25 + graph traversal search
13
+ * - memtap_maintenance — memory maintenance (decay-report, contradictions, dedup-scan, resolve-contradictions, run-all)
14
+ * - memtap_entities — entity management (list, memories, merge)
15
+ * - memtap_edges — create edges between memories
16
+ * - memtap_consolidate consolidate related memories into synthesized summaries
17
+ * - memtap_profile — view/refresh agent memory profile
18
+ * - memtap_export — export memory graph (json, graphml, markdown)
19
+ * - memtap_health — server health check and statistics
20
+ * - memtap_outcome — record decision outcomes for learning loop
21
+ * - memtap_infer — inference engine: discover implicit knowledge via graph traversal + LLM reasoning
17
22
  *
18
23
  * Hooks:
19
- * - preMessage — neuromimetic tiered recall with working memory simulation
20
- * - message_completed — attention-gated encoding with emotional weighting
24
+ * - preMessage — neuromimetic tiered recall with working memory simulation + adaptive decay reinforcement
25
+ * + proactive surfacing of thematically related memories
26
+ * + decision outcome context injection (learning loop)
27
+ * - message_completed — attention-gated encoding with emotional weighting + auto-category assignment
28
+ * + proactive memory usage tracking with importance reinforcement
21
29
  * - agent:bootstrap — inject memory bulletin at session start
22
30
  * - periodic — dream-mode consolidation and neural maintenance
23
31
  * - session_end — performance monitoring and neural analytics
@@ -38,6 +46,10 @@ interface MemTapConfig {
38
46
  embeddingModel?: string;
39
47
  embeddingApiKey?: string;
40
48
  decayRate?: number;
49
+ instructions?: {
50
+ include?: string[];
51
+ exclude?: string[];
52
+ };
41
53
  }
42
54
 
43
55
  function getConfig(api: any): MemTapConfig {
@@ -88,7 +100,9 @@ function storeImportance(userValue: number): number {
88
100
 
89
101
  // ── Memory types ─────────────────────────────────────────────────────────────
90
102
 
91
- const MEMORY_TYPES = ['fact', 'preference', 'decision', 'identity', 'event', 'observation', 'goal', 'task'] as const;
103
+ const MEMORY_TYPES = ['fact', 'preference', 'decision', 'identity', 'event', 'observation', 'goal', 'task', 'consolidated', 'outcome', 'inferred'] as const;
104
+
105
+ const MEMORY_CATEGORIES = ['personal', 'professional', 'technical', 'project', 'health', 'preferences'] as const;
92
106
 
93
107
  // ── Neuromimetic Memory System (v2.1 "The Neuron") ──────────────────────────
94
108
 
@@ -178,6 +192,9 @@ const memoryCache = new Map<string, { data: any[]; timestamp: number; query: str
178
192
  // Attention tracking for encoding decisions
179
193
  let attentionHistory: Array<{ timestamp: number; agent: string; level: string; trigger: string }> = [];
180
194
 
195
+ // Proactive surfacing tracking (which proactive memories were injected per agent)
196
+ const proactiveSurfacedMemories = new Map<string, { memoryIds: string[]; timestamp: number }>();
197
+
181
198
  // ── Neuromimetic Functions ──────────────────────────────────────────────────
182
199
 
183
200
  function updateWorkingMemory(agentId: string, topics: string[], memories: any[] = []): WorkingMemory {
@@ -571,7 +588,8 @@ Antwort als JSON-Array (NUR das Array, kein Markdown):
571
588
  "content": "Kurze, prägnante Beschreibung des Fakts",
572
589
  "type": "fact|preference|decision|identity|event|observation|goal|task",
573
590
  "importance": 1-10,
574
- "tags": ["tag1", "tag2"]
591
+ "tags": ["tag1", "tag2"],
592
+ "category": "personal|professional|technical|project|health|preferences"
575
593
  }
576
594
  ]
577
595
 
@@ -680,19 +698,21 @@ export default function register(api: any) {
680
698
  type: { type: 'string', enum: MEMORY_TYPES, description: 'Memory type (default: fact)' },
681
699
  importance: { type: 'number', description: 'Importance 1-10 (default: 5)' },
682
700
  tags: { type: 'array', items: { type: 'string' }, description: 'Optional tags for categorization' },
701
+ immutable: { type: 'boolean', description: 'Mark memory as immutable (cannot be auto-decayed or auto-archived)' },
683
702
  },
684
703
  required: ['content'],
685
704
  },
686
- async execute(_id: string, params: { content: string; type?: string; importance?: number; tags?: string[] }) {
705
+ async execute(_id: string, params: { content: string; type?: string; importance?: number; tags?: string[]; immutable?: boolean }) {
687
706
  const cfg = getConfig(api);
688
707
  const importance = params.importance ?? 5;
689
- const body = {
708
+ const body: Record<string, any> = {
690
709
  content: params.content,
691
710
  type: params.type || 'fact',
692
711
  agent: agentId(cfg, api),
693
712
  importance: storeImportance(importance),
694
713
  tags: params.tags || [],
695
714
  };
715
+ if (params.immutable) body.immutable = true;
696
716
 
697
717
  try {
698
718
  const data = await bbFetch(cfg, `${baseUrl(cfg)}/memories`, {
@@ -857,13 +877,14 @@ export default function register(api: any) {
857
877
  '- decay-report: find memories that have decayed below importance threshold\n' +
858
878
  '- contradictions: find contradicting memory pairs\n' +
859
879
  '- dedup-scan: find potential duplicate memories\n' +
880
+ '- resolve-contradictions: use LLM to resolve contradicting memories\n' +
860
881
  '- run-all: combined report of all checks',
861
882
  parameters: {
862
883
  type: 'object',
863
884
  properties: {
864
885
  action: {
865
886
  type: 'string',
866
- enum: ['decay-report', 'contradictions', 'dedup-scan', 'run-all'],
887
+ enum: ['decay-report', 'contradictions', 'dedup-scan', 'resolve-contradictions', 'run-all'],
867
888
  description: 'Maintenance action to run',
868
889
  },
869
890
  },
@@ -925,6 +946,63 @@ export default function register(api: any) {
925
946
  return { content: [{ type: 'text', text: output }] };
926
947
  }
927
948
 
949
+ case 'resolve-contradictions': {
950
+ // Fetch contradictions
951
+ data = await bbFetch(cfg, `${base}/maintenance/contradictions`);
952
+ const contradictions = data.results || data.contradictions || [];
953
+ if (contradictions.length === 0) {
954
+ return { content: [{ type: 'text', text: 'No contradictions found to resolve.' }] };
955
+ }
956
+
957
+ let resolved = 0;
958
+ const resolutions: string[] = [];
959
+
960
+ for (const c of contradictions.slice(0, 10)) {
961
+ const m1 = c.memory1 || c;
962
+ const m2 = c.memory2 || c;
963
+ const m1Summary = m1.summary || m1.content || m1.from || '';
964
+ const m2Summary = m2.summary || m2.content || m2.to || '';
965
+
966
+ try {
967
+ const llmUrl = cfg.llmUrl || 'http://127.0.0.1:18789/v1/chat/completions';
968
+ const model = cfg.llmModel || 'anthropic/claude-sonnet-4-20250514';
969
+ const llmRes = await fetch(llmUrl, {
970
+ method: 'POST',
971
+ headers: { 'Content-Type': 'application/json' },
972
+ body: JSON.stringify({
973
+ model,
974
+ max_tokens: 500,
975
+ messages: [
976
+ { role: 'system', content: 'You resolve contradictions between two memories. Decide which is more current/accurate. Respond with JSON: {"keep": 1 or 2, "reason": "short reason"}' },
977
+ { role: 'user', content: `Memory 1 (created: ${m1.created || '?'}): ${m1Summary}\nMemory 2 (created: ${m2.created || '?'}): ${m2Summary}` },
978
+ ],
979
+ }),
980
+ });
981
+ if (!llmRes.ok) continue;
982
+ const llmData = await llmRes.json();
983
+ const text = llmData.choices?.[0]?.message?.content?.trim() || '';
984
+ const cleaned = text.replace(/^```json?\n?/m, '').replace(/\n?```$/m, '').trim();
985
+ const verdict = JSON.parse(cleaned);
986
+
987
+ const keepId = verdict.keep === 1 ? (m1.id || m1._key) : (m2.id || m2._key);
988
+ const archiveId = verdict.keep === 1 ? (m2.id || m2._key) : (m1.id || m1._key);
989
+
990
+ if (keepId && archiveId) {
991
+ await bbFetch(cfg, `${base}/maintenance/resolve-contradiction`, {
992
+ method: 'POST',
993
+ body: JSON.stringify({ keep: keepId, archive: archiveId, reason: verdict.reason }),
994
+ });
995
+ resolved++;
996
+ resolutions.push(`Kept [${keepId}], archived [${archiveId}]: ${verdict.reason}`);
997
+ }
998
+ } catch { /* skip individual resolution failures */ }
999
+ }
1000
+
1001
+ let output = `Contradiction Resolution: ${resolved}/${contradictions.length} resolved\n\n`;
1002
+ output += resolutions.map((r, i) => ` ${i + 1}. ${r}`).join('\n');
1003
+ return { content: [{ type: 'text', text: output }] };
1004
+ }
1005
+
928
1006
  case 'run-all': {
929
1007
  data = await bbFetch(cfg, `${base}/maintenance/run-all`, { method: 'POST', body: '{}' });
930
1008
  const decay = data.decay || data.decayReport || {};
@@ -1342,13 +1420,208 @@ export default function register(api: any) {
1342
1420
  },
1343
1421
  });
1344
1422
 
1423
+ // ── Tool: memtap_consolidate ──────────────────────────────────────────────
1424
+
1425
+ api.registerTool({
1426
+ name: 'memtap_consolidate',
1427
+ description:
1428
+ 'Consolidate related memories into a synthesized summary. ' +
1429
+ 'Searches memories by entity or topic, uses LLM to create a synthesis, ' +
1430
+ 'and stores the result as a consolidated memory linked to the originals.',
1431
+ parameters: {
1432
+ type: 'object',
1433
+ properties: {
1434
+ entity: { type: 'string', description: 'Entity name to consolidate memories around' },
1435
+ topic: { type: 'string', description: 'Topic to consolidate memories about' },
1436
+ },
1437
+ },
1438
+ async execute(_id: string, params: { entity?: string; topic?: string }) {
1439
+ if (!params.entity && !params.topic) {
1440
+ return { content: [{ type: 'text', text: 'Provide at least "entity" or "topic" to consolidate.' }], isError: true };
1441
+ }
1442
+
1443
+ const cfg = getConfig(api);
1444
+ const base = baseUrl(cfg);
1445
+ const agent = agentId(cfg, api);
1446
+ const query = params.entity || params.topic || '';
1447
+
1448
+ try {
1449
+ // 1. Recall related memories
1450
+ const recallUrl = new URL('/recall', base);
1451
+ recallUrl.searchParams.set('q', query);
1452
+ recallUrl.searchParams.set('agent', agent);
1453
+ recallUrl.searchParams.set('limit', '20');
1454
+ const recallData = await bbFetch(cfg, recallUrl.toString());
1455
+ const memories = recallData.results || recallData.memories || [];
1456
+
1457
+ if (memories.length < 2) {
1458
+ return { content: [{ type: 'text', text: `Only ${memories.length} memory found for "${query}" — need at least 2 to consolidate.` }] };
1459
+ }
1460
+
1461
+ // 2. LLM synthesis
1462
+ const memoryTexts = memories.map((m: any, i: number) =>
1463
+ `${i + 1}. [${m.type}] ${m.content} (importance: ${displayImportance(m.importance)}/10, created: ${m.created || '?'})`
1464
+ ).join('\n');
1465
+
1466
+ const llmUrl = cfg.llmUrl || 'http://127.0.0.1:18789/v1/chat/completions';
1467
+ const model = cfg.llmModel || 'anthropic/claude-sonnet-4-20250514';
1468
+
1469
+ const synthRes = await fetch(llmUrl, {
1470
+ method: 'POST',
1471
+ headers: { 'Content-Type': 'application/json' },
1472
+ body: JSON.stringify({
1473
+ model,
1474
+ max_tokens: 1500,
1475
+ messages: [
1476
+ { role: 'system', content: 'Du erstellst eine prägnante Synthese aus mehreren Erinnerungen. Fasse Kernaussagen zusammen, entferne Redundanzen, hebe Widersprüche hervor. Antwort als JSON: {"synthesis": "...", "keyInsights": ["..."], "importance": 1-10}' },
1477
+ { role: 'user', content: `Konsolidiere diese ${memories.length} Erinnerungen zum Thema "${query}":\n\n${memoryTexts}` },
1478
+ ],
1479
+ }),
1480
+ });
1481
+
1482
+ if (!synthRes.ok) throw new Error(`LLM synthesis failed: ${synthRes.status}`);
1483
+ const synthData = await synthRes.json();
1484
+ const synthText = synthData.choices?.[0]?.message?.content?.trim() || '';
1485
+ const cleanedSynth = synthText.replace(/^```json?\n?/m, '').replace(/\n?```$/m, '').trim();
1486
+ const synthesis = JSON.parse(cleanedSynth);
1487
+
1488
+ // 3. Store consolidated memory
1489
+ const storeRes = await bbFetch(cfg, `${base}/memories`, {
1490
+ method: 'POST',
1491
+ body: JSON.stringify({
1492
+ content: synthesis.synthesis,
1493
+ type: 'consolidated',
1494
+ agent,
1495
+ importance: storeImportance(synthesis.importance || 7),
1496
+ tags: ['consolidated', `topic:${query}`, ...(synthesis.keyInsights || []).slice(0, 3)],
1497
+ source: 'plugin:consolidation',
1498
+ }),
1499
+ });
1500
+
1501
+ const consolidatedId = storeRes.id || storeRes._key;
1502
+
1503
+ // 4. Create PART_OF edges from originals to consolidated
1504
+ let edgesCreated = 0;
1505
+ for (const mem of memories) {
1506
+ const memId = mem.id || mem._key;
1507
+ if (memId && consolidatedId) {
1508
+ try {
1509
+ await bbFetch(cfg, `${base}/relate`, {
1510
+ method: 'POST',
1511
+ body: JSON.stringify({ from: memId, to: consolidatedId, type: 'PART_OF', weight: 0.8 }),
1512
+ });
1513
+ edgesCreated++;
1514
+ } catch { /* skip individual edge failures */ }
1515
+ }
1516
+ }
1517
+
1518
+ let response = `Consolidated ${memories.length} memories into [${consolidatedId}]\n`;
1519
+ response += `Synthesis: ${synthesis.synthesis.substring(0, 200)}...\n`;
1520
+ if (synthesis.keyInsights?.length) {
1521
+ response += `Key insights: ${synthesis.keyInsights.join(', ')}\n`;
1522
+ }
1523
+ response += `Edges created: ${edgesCreated} PART_OF links`;
1524
+
1525
+ return { content: [{ type: 'text', text: response }] };
1526
+ } catch (err: any) {
1527
+ return { content: [{ type: 'text', text: `MemTap consolidation error: ${err.message}` }], isError: true };
1528
+ }
1529
+ },
1530
+ });
1531
+
1532
+ // ── Tool: memtap_profile ──────────────────────────────────────────────────
1533
+
1534
+ api.registerTool({
1535
+ name: 'memtap_profile',
1536
+ description:
1537
+ 'View or refresh the agent memory profile. ' +
1538
+ 'Shows a summary of stored knowledge, topic distribution, and memory health.',
1539
+ parameters: {
1540
+ type: 'object',
1541
+ properties: {
1542
+ action: {
1543
+ type: 'string',
1544
+ enum: ['view', 'refresh'],
1545
+ description: 'View the current profile or refresh it from the knowledge graph',
1546
+ },
1547
+ },
1548
+ required: ['action'],
1549
+ },
1550
+ async execute(_id: string, params: { action: 'view' | 'refresh' }) {
1551
+ const cfg = getConfig(api);
1552
+ const base = baseUrl(cfg);
1553
+ const agent = agentId(cfg, api);
1554
+
1555
+ try {
1556
+ if (params.action === 'view') {
1557
+ const data = await bbFetch(cfg, `${base}/profiles?agent=${encodeURIComponent(agent)}`);
1558
+ return { content: [{ type: 'text', text: JSON.stringify(data, null, 2) }] };
1559
+ } else {
1560
+ const data = await bbFetch(cfg, `${base}/profiles`, {
1561
+ method: 'POST',
1562
+ body: JSON.stringify({ agent }),
1563
+ });
1564
+ return { content: [{ type: 'text', text: `Profile refreshed.\n${JSON.stringify(data, null, 2)}` }] };
1565
+ }
1566
+ } catch (err: any) {
1567
+ return { content: [{ type: 'text', text: `MemTap profile error: ${err.message}` }], isError: true };
1568
+ }
1569
+ },
1570
+ });
1571
+
1572
+ // ── Tool: memtap_export ───────────────────────────────────────────────────
1573
+
1574
+ api.registerTool({
1575
+ name: 'memtap_export',
1576
+ description:
1577
+ 'Export the memory graph in various formats. ' +
1578
+ 'Supports JSON (full data), GraphML (for graph visualization tools), and Markdown (human-readable).',
1579
+ parameters: {
1580
+ type: 'object',
1581
+ properties: {
1582
+ format: {
1583
+ type: 'string',
1584
+ enum: ['json', 'graphml', 'markdown'],
1585
+ description: 'Export format',
1586
+ },
1587
+ },
1588
+ required: ['format'],
1589
+ },
1590
+ async execute(_id: string, params: { format: 'json' | 'graphml' | 'markdown' }) {
1591
+ const cfg = getConfig(api);
1592
+ const base = baseUrl(cfg);
1593
+ const agent = agentId(cfg, api);
1594
+
1595
+ try {
1596
+ const url = new URL('/export', base);
1597
+ url.searchParams.set('format', params.format);
1598
+ url.searchParams.set('agent', agent);
1599
+
1600
+ const data = await bbFetch(cfg, url.toString());
1601
+
1602
+ if (params.format === 'json') {
1603
+ const summary = data.memories?.length ?? data.count ?? '?';
1604
+ return { content: [{ type: 'text', text: `Exported ${summary} memories as JSON.\n\n${JSON.stringify(data, null, 2).substring(0, 5000)}` }] };
1605
+ } else if (params.format === 'graphml') {
1606
+ const graphml = typeof data === 'string' ? data : data.graphml || JSON.stringify(data);
1607
+ return { content: [{ type: 'text', text: `GraphML export:\n\n${String(graphml).substring(0, 5000)}` }] };
1608
+ } else {
1609
+ const md = typeof data === 'string' ? data : data.markdown || JSON.stringify(data, null, 2);
1610
+ return { content: [{ type: 'text', text: String(md).substring(0, 5000) }] };
1611
+ }
1612
+ } catch (err: any) {
1613
+ return { content: [{ type: 'text', text: `MemTap export error: ${err.message}` }], isError: true };
1614
+ }
1615
+ },
1616
+ });
1617
+
1345
1618
  // ── Tool: memtap_health (Enhanced Neural Monitoring) ────────────────────────
1346
1619
 
1347
1620
  api.registerTool({
1348
1621
  name: 'memtap_health',
1349
1622
  description:
1350
1623
  'Check MemTap server health and get neural system statistics. Actions:\n' +
1351
- '- health: server health check (ArangoDB version, counts)\n' +
1624
+ '- health: server health check (version, counts)\n' +
1352
1625
  '- stats: detailed statistics (by type, by agent, entity/edge counts)\n' +
1353
1626
  '- neural: neuromimetic system status (working memory, attention, consolidation)\n' +
1354
1627
  '- performance: system performance metrics (cache hits, response times, memory usage)\n' +
@@ -1377,7 +1650,7 @@ export default function register(api: any) {
1377
1650
  case 'health':
1378
1651
  data = await bbFetch(cfg, `${base}/health`);
1379
1652
  const counts = data.counts || {};
1380
- return { content: [{ type: 'text', text: `MemTap: ${data.status}\nArangoDB: ${data.arango || 'unknown'}\nMemories: ${counts.memories ?? '?'} | Entities: ${counts.entities ?? '?'} | Edges: ${counts.edges ?? '?'}` }] };
1653
+ return { content: [{ type: 'text', text: `MemTap: ${data.status}\nServer: ${data.arango || data.version || 'unknown'}\nMemories: ${counts.memories ?? '?'} | Entities: ${counts.entities ?? '?'} | Edges: ${counts.edges ?? '?'}` }] };
1381
1654
 
1382
1655
  case 'stats':
1383
1656
  data = await bbFetch(cfg, `${base}/stats`);
@@ -1558,26 +1831,54 @@ export default function register(api: any) {
1558
1831
  }
1559
1832
  }
1560
1833
 
1834
+ // Apply instructions filtering (include/exclude patterns)
1835
+ if (cfg.instructions) {
1836
+ if (cfg.instructions.exclude?.length) {
1837
+ memories = memories.filter((m: any) => {
1838
+ const text = (m.content || '').toLowerCase();
1839
+ return !cfg.instructions!.exclude!.some(pat => text.includes(pat.toLowerCase()));
1840
+ });
1841
+ }
1842
+ if (cfg.instructions.include?.length) {
1843
+ // Boost memories matching include patterns
1844
+ memories.forEach((m: any) => {
1845
+ const text = (m.content || '').toLowerCase();
1846
+ if (cfg.instructions!.include!.some(pat => text.includes(pat.toLowerCase()))) {
1847
+ m.importance = Math.min(1, (m.importance || 0.5) + 0.1);
1848
+ }
1849
+ });
1850
+ }
1851
+ }
1852
+
1561
1853
  if (memories.length > 0) {
1562
- // Retrieval Practice Strengthening - memories get stronger when accessed
1854
+ // Adaptive Decay: Track access_count and send reinforcement signal
1563
1855
  memories.forEach(async (m: any) => {
1564
1856
  try {
1565
1857
  if (!useCache && m.id) {
1566
- // Update retrieval count and strengthen memory
1567
- const strengthBoost = FORGETTING_CURVE.retrievalStrengthening *
1568
- (context?.attentionLevel === 'focused' ? 1.2 : 1.0);
1569
-
1570
- // Background update to boost importance (fire and forget)
1858
+ const accessCount = (m.accessCount || m.retrievalCount || 0) + 1;
1859
+
1860
+ // Adaptive reinforcement: frequently accessed memories decay slower
1861
+ const accessBonus = Math.min(0.3, accessCount * 0.02);
1862
+ const attentionMultiplier = conversationCtx.attentionLevel === 'flow' ? 1.5 :
1863
+ conversationCtx.attentionLevel === 'focused' ? 1.2 : 1.0;
1864
+ const strengthBoost = (FORGETTING_CURVE.retrievalStrengthening + accessBonus) * attentionMultiplier;
1865
+
1866
+ // Send reinforcement signal to API (fire and forget)
1571
1867
  bbFetch(cfg, `${baseUrl(cfg)}/memories/${encodeURIComponent(m.id)}/access`, {
1572
1868
  method: 'POST',
1573
- body: JSON.stringify({
1869
+ body: JSON.stringify({
1574
1870
  boost: strengthBoost,
1575
- retrievalCount: (m.retrievalCount || 0) + 1,
1871
+ retrievalCount: accessCount,
1576
1872
  lastAccess: Date.now(),
1577
- attentionLevel: context?.attentionLevel || 'unknown'
1873
+ attentionLevel: conversationCtx.attentionLevel || 'unknown',
1874
+ reinforcement: {
1875
+ accessCount,
1876
+ adaptiveDecayRate: Math.max(0.001, (cfg.decayRate || FORGETTING_CURVE.baseDecayRate) - accessBonus),
1877
+ emotionalProtection: FORGETTING_CURVE.emotionalProtection * attentionMultiplier
1878
+ }
1578
1879
  })
1579
1880
  }).catch(() => {}); // Silent fail
1580
-
1881
+
1581
1882
  // Update local cache retrieval count
1582
1883
  if (memoryCache.has(cacheKey)) {
1583
1884
  const cached = memoryCache.get(cacheKey)!;
@@ -1632,6 +1933,67 @@ ${memoryContext}
1632
1933
 
1633
1934
  injection += instructions;
1634
1935
 
1936
+ // ── Proactive Surfacing ──────────────────────────────────────
1937
+ // Search for thematically related memories NOT already in the recall set
1938
+ try {
1939
+ const existingIds = new Set(memories.map((m: any) => m.id || m._key).filter(Boolean));
1940
+ const proactiveTopics = predictiveTopicBoost(conversationCtx, recallLevel.topics)
1941
+ .filter(t => !recallLevel.topics.includes(t));
1942
+
1943
+ if (proactiveTopics.length > 0) {
1944
+ const proactiveQuery = proactiveTopics.join(' ');
1945
+ const proactiveData = await bbFetch(cfg, `${baseUrl(cfg)}/recall?q=${encodeURIComponent(proactiveQuery)}&limit=5&agent=${currentAgent}`)
1946
+ .catch(() => ({ results: [] }));
1947
+ const proactiveResults = (proactiveData.results || proactiveData.memories || [])
1948
+ .filter((m: any) => !existingIds.has(m.id || m._key))
1949
+ .slice(0, 3);
1950
+
1951
+ if (proactiveResults.length > 0) {
1952
+ const proactiveContext = proactiveResults.map((m: any, i: number) =>
1953
+ `${i + 1}. [${m.type}] ${m.content}\n └─ Importance: ${displayImportance(m.importance)}/10`
1954
+ ).join('\n\n');
1955
+
1956
+ injection += `\n\n## 💡 You might also want to know:\n${proactiveContext}\n`;
1957
+
1958
+ // Track which memories were proactively surfaced for feedback loop
1959
+ const surfacedIds = proactiveResults.map((m: any) => m.id || m._key).filter(Boolean);
1960
+ proactiveSurfacedMemories.set(currentAgent, {
1961
+ memoryIds: surfacedIds,
1962
+ timestamp: Date.now()
1963
+ });
1964
+ }
1965
+ }
1966
+ } catch { /* proactive surfacing failed, not critical */ }
1967
+
1968
+ // ── Decision Outcome Context (Learning Loop) ─────────────────
1969
+ // If the message is decision-relevant, load past decision outcomes
1970
+ try {
1971
+ if (/\b(entscheidung|decision|decide|option|alternative|choose|wahl)\b/i.test(message)) {
1972
+ const outcomeData = await bbFetch(cfg, `${baseUrl(cfg)}/recall?q=${encodeURIComponent('decision outcome result')}&limit=10&agent=${currentAgent}&types=outcome,decision`)
1973
+ .catch(() => ({ results: [] }));
1974
+ const outcomes = (outcomeData.results || outcomeData.memories || [])
1975
+ .filter((m: any) => m.type === 'outcome' || (m.type === 'decision' && m.tags?.includes('has-outcome')));
1976
+
1977
+ if (outcomes.length > 0) {
1978
+ // Summarize past decision outcomes
1979
+ const successCount = outcomes.filter((m: any) => m.tags?.includes('outcome:success')).length;
1980
+ const failCount = outcomes.filter((m: any) => m.tags?.includes('outcome:failure')).length;
1981
+
1982
+ let outcomeContext = '\n\n## 📊 Based on past decisions:\n';
1983
+ if (successCount + failCount > 0) {
1984
+ outcomeContext += `Track record: ${successCount} successful, ${failCount} unsuccessful outcomes recorded.\n\n`;
1985
+ }
1986
+ outcomeContext += outcomes.slice(0, 4).map((m: any, i: number) => {
1987
+ const success = m.tags?.includes('outcome:success') ? '✅' : m.tags?.includes('outcome:failure') ? '❌' : '📝';
1988
+ return `${i + 1}. ${success} ${m.content}\n └─ Importance: ${displayImportance(m.importance)}/10`;
1989
+ }).join('\n\n');
1990
+
1991
+ outcomeContext += '\n\n*Consider these past outcomes when advising on the current decision.*\n';
1992
+ injection += outcomeContext;
1993
+ }
1994
+ }
1995
+ } catch { /* decision outcome loading failed, not critical */ }
1996
+
1635
1997
  // Append to existing system prompt
1636
1998
  event.context.systemPrompt = (event.context.systemPrompt || '') + injection;
1637
1999
 
@@ -1706,22 +2068,31 @@ ${memoryContext}
1706
2068
 
1707
2069
  const finalImportance = (mem.importance ?? 5) * emotionalWeight * contextualWeight;
1708
2070
 
2071
+ // Determine category (from LLM extraction or fallback)
2072
+ const memCategory = MEMORY_CATEGORIES.includes(mem.category as any)
2073
+ ? mem.category
2074
+ : (memoryType === 'preference' ? 'preferences' :
2075
+ memoryType === 'decision' || memoryType === 'goal' || memoryType === 'task' ? 'project' :
2076
+ 'technical');
2077
+
1709
2078
  // Enhanced memory with neuromimetic features
1710
- const enhancedMem = {
2079
+ const enhancedMem: Record<string, any> = {
1711
2080
  content: mem.content,
1712
2081
  type: memoryType,
1713
2082
  agent: currentAgent,
1714
2083
  importance: storeImportance(Math.min(10, finalImportance)),
2084
+ category: memCategory,
1715
2085
  tags: [
1716
- ...(mem.tags || []),
2086
+ ...(mem.tags || []),
1717
2087
  'auto-captured',
1718
2088
  'neuromimetic',
2089
+ `category:${memCategory}`,
1719
2090
  ...(context?.dominantTopic ? [`topic:${context.dominantTopic}`] : []),
1720
2091
  `engagement:${context?.userEngagement || 'unknown'}`,
1721
2092
  `attention:${context?.attentionLevel || 'unknown'}`,
1722
2093
  `emotion:${context?.emotionalContext || 'neutral'}`
1723
2094
  ],
1724
- source: 'plugin:neuromimetic-capture-v2.1',
2095
+ source: 'plugin:neuromimetic-capture-v3.0',
1725
2096
  conversationContext: {
1726
2097
  dominantTopic: context?.dominantTopic,
1727
2098
  engagement: context?.userEngagement,
@@ -1778,11 +2149,70 @@ ${memoryContext}
1778
2149
  const profile = getUserProfile(currentAgent);
1779
2150
  profile.successfulRecalls += stored; // Treat captures as successes
1780
2151
  userProfiles.set(currentAgent, profile);
1781
-
2152
+
1782
2153
  logger.info?.(`[memtap] Auto-captured ${stored} enhanced memories with context`) ??
1783
2154
  console.log(`[memtap] Auto-captured ${stored} enhanced memories with context`);
1784
2155
  }
1785
-
2156
+
2157
+ // ── Proactive Surfacing Feedback Loop ──────────────────────
2158
+ // Check if the agent's response referenced any proactively surfaced memories
2159
+ try {
2160
+ const surfaced = proactiveSurfacedMemories.get(currentAgent);
2161
+ if (surfaced && (Date.now() - surfaced.timestamp) < 600000) { // within 10min
2162
+ const contentLower = content.toLowerCase();
2163
+ const usedIds: string[] = [];
2164
+
2165
+ for (const memId of surfaced.memoryIds) {
2166
+ // Check if the memory ID or content from proactive surfacing appears in the response
2167
+ if (contentLower.includes(memId.toLowerCase())) {
2168
+ usedIds.push(memId);
2169
+ }
2170
+ }
2171
+
2172
+ // Also do a fuzzy check: recall proactive memories and see if their content appears
2173
+ if (usedIds.length === 0 && surfaced.memoryIds.length > 0) {
2174
+ for (const memId of surfaced.memoryIds) {
2175
+ try {
2176
+ const memData = await bbFetch(cfg, `${baseUrl(cfg)}/memories/${encodeURIComponent(memId)}`);
2177
+ const memContent = (memData.content || '').toLowerCase();
2178
+ // Check if key phrases from the memory appear in the response
2179
+ const keyPhrases = memContent.split(/\s+/).filter((w: string) => w.length > 5).slice(0, 5);
2180
+ const matchCount = keyPhrases.filter((phrase: string) => contentLower.includes(phrase)).length;
2181
+ if (matchCount >= 2) {
2182
+ usedIds.push(memId);
2183
+ }
2184
+ } catch { /* skip */ }
2185
+ }
2186
+ }
2187
+
2188
+ // Importance reinforcement for used proactive memories
2189
+ if (usedIds.length > 0) {
2190
+ for (const memId of usedIds) {
2191
+ try {
2192
+ await bbFetch(cfg, `${baseUrl(cfg)}/memories/${encodeURIComponent(memId)}/access`, {
2193
+ method: 'POST',
2194
+ body: JSON.stringify({
2195
+ boost: FORGETTING_CURVE.retrievalStrengthening * 1.5, // Extra boost for proactive hit
2196
+ lastAccess: Date.now(),
2197
+ attentionLevel: 'proactive-hit',
2198
+ reinforcement: {
2199
+ proactiveHit: true,
2200
+ adaptiveDecayRate: Math.max(0.001, (cfg.decayRate || FORGETTING_CURVE.baseDecayRate) * 0.5)
2201
+ }
2202
+ })
2203
+ });
2204
+ } catch { /* reinforcement failed, not critical */ }
2205
+ }
2206
+
2207
+ logger.info?.(`[memtap] Proactive surfacing feedback: ${usedIds.length}/${surfaced.memoryIds.length} memories used by agent`) ??
2208
+ console.log(`[memtap] Proactive surfacing feedback: ${usedIds.length}/${surfaced.memoryIds.length} memories used by agent`);
2209
+ }
2210
+
2211
+ // Clear after processing
2212
+ proactiveSurfacedMemories.delete(currentAgent);
2213
+ }
2214
+ } catch { /* proactive feedback loop failed, not critical */ }
2215
+
1786
2216
  } catch (err: any) {
1787
2217
  logger.warn?.(`[memtap] Auto-capture failed: ${err.message}`) ??
1788
2218
  console.warn(`[memtap] Auto-capture failed: ${err.message}`);
@@ -2811,6 +3241,321 @@ async function neuralMaintenance() {
2811
3241
  },
2812
3242
  });
2813
3243
 
2814
- logger.info?.('[memtap] Plugin v2.1.0 "The Neuron" registered: 13 tools + 5 neuromimetic hooks') ??
2815
- console.log('[memtap] Plugin v2.1.0 "The Neuron" registered: 13 tools + 5 neuromimetic hooks');
3244
+ // ── Tool: memtap_outcome (Agent Learning Loop) ──────────────────────────────
3245
+
3246
+ api.registerTool({
3247
+ name: 'memtap_outcome',
3248
+ description:
3249
+ 'Record the outcome of a past decision for the learning loop. ' +
3250
+ 'Links the outcome to the original decision via a CAUSED_BY edge. ' +
3251
+ 'Future decision contexts will automatically surface past outcomes to improve advice quality.',
3252
+ parameters: {
3253
+ type: 'object',
3254
+ properties: {
3255
+ decisionId: { type: 'string', description: 'The ID of the decision this outcome relates to' },
3256
+ outcome: { type: 'string', description: 'Description of what happened as a result of the decision' },
3257
+ success: { type: 'boolean', description: 'Whether the decision led to a successful outcome' },
3258
+ learnings: { type: 'string', description: 'Optional lessons learned from this outcome' },
3259
+ },
3260
+ required: ['decisionId', 'outcome', 'success'],
3261
+ },
3262
+ async execute(_id: string, params: { decisionId: string; outcome: string; success: boolean; learnings?: string }) {
3263
+ const cfg = getConfig(api);
3264
+ const base = baseUrl(cfg);
3265
+ const agent = agentId(cfg, api);
3266
+
3267
+ try {
3268
+ // 1. Verify the decision exists
3269
+ let decisionContent = '';
3270
+ try {
3271
+ const decisionData = await bbFetch(cfg, `${base}/memories/${encodeURIComponent(params.decisionId)}`);
3272
+ decisionContent = decisionData.content || '';
3273
+ } catch {
3274
+ return { content: [{ type: 'text', text: `Decision [${params.decisionId}] not found.` }], isError: true };
3275
+ }
3276
+
3277
+ // 2. Build outcome memory content
3278
+ const outcomeContent = params.learnings
3279
+ ? `Outcome: ${params.outcome} | Learnings: ${params.learnings}`
3280
+ : `Outcome: ${params.outcome}`;
3281
+
3282
+ // 3. Store outcome memory
3283
+ const outcomeImportance = params.success ? 0.6 : 0.8; // Failures are more important to remember
3284
+ const outcomeMem = await bbFetch(cfg, `${base}/memories`, {
3285
+ method: 'POST',
3286
+ body: JSON.stringify({
3287
+ content: outcomeContent,
3288
+ type: 'outcome',
3289
+ agent,
3290
+ importance: outcomeImportance,
3291
+ tags: [
3292
+ 'outcome',
3293
+ `outcome:${params.success ? 'success' : 'failure'}`,
3294
+ `decision:${params.decisionId}`,
3295
+ ...(params.learnings ? ['has-learnings'] : []),
3296
+ ],
3297
+ source: 'plugin:learning-loop',
3298
+ }),
3299
+ });
3300
+
3301
+ const outcomeId = outcomeMem.id || outcomeMem._key;
3302
+
3303
+ // 4. Create CAUSED_BY edge from outcome to decision
3304
+ await bbFetch(cfg, `${base}/relate`, {
3305
+ method: 'POST',
3306
+ body: JSON.stringify({
3307
+ from: outcomeId,
3308
+ to: params.decisionId,
3309
+ type: 'CAUSED_BY',
3310
+ weight: 0.9,
3311
+ }),
3312
+ });
3313
+
3314
+ // 5. Tag the original decision as having an outcome
3315
+ try {
3316
+ await bbFetch(cfg, `${base}/memories/${encodeURIComponent(params.decisionId)}/update`, {
3317
+ method: 'POST',
3318
+ body: JSON.stringify({
3319
+ tags: ['has-outcome', `outcome:${params.success ? 'success' : 'failure'}`],
3320
+ }),
3321
+ });
3322
+ } catch { /* tagging failed, not critical */ }
3323
+
3324
+ const icon = params.success ? '✅' : '❌';
3325
+ let response = `${icon} Outcome recorded [${outcomeId}] for decision [${params.decisionId}]\n`;
3326
+ response += ` Result: ${params.success ? 'Success' : 'Failure'}\n`;
3327
+ response += ` Edge: [${outcomeId}] -[CAUSED_BY]-> [${params.decisionId}]`;
3328
+ if (params.learnings) response += `\n Learnings: ${params.learnings}`;
3329
+
3330
+ return { content: [{ type: 'text', text: response }] };
3331
+ } catch (err: any) {
3332
+ return { content: [{ type: 'text', text: `MemTap outcome error: ${err.message}` }], isError: true };
3333
+ }
3334
+ },
3335
+ });
3336
+
3337
+ // ── Tool: memtap_infer (Inference Engine) ─────────────────────────────────
3338
+
3339
+ api.registerTool({
3340
+ name: 'memtap_infer',
3341
+ description:
3342
+ 'Inference engine: traverses the knowledge graph (2-3 hops) to discover implicit relationships ' +
3343
+ 'and hidden knowledge not explicitly stored. Uses LLM reasoning to draw conclusions from connected facts. ' +
3344
+ 'Stores inferences as new memories with confidence decay (inferred memories decay 2x faster).',
3345
+ parameters: {
3346
+ type: 'object',
3347
+ properties: {
3348
+ depth: { type: 'number', description: 'Graph traversal depth in hops (default 2, max 3)' },
3349
+ limit: { type: 'number', description: 'Max number of inferences to generate (default 3)' },
3350
+ },
3351
+ },
3352
+ async execute(_id: string, params: { depth?: number; limit?: number }) {
3353
+ const cfg = getConfig(api);
3354
+ const base = baseUrl(cfg);
3355
+ const agent = agentId(cfg, api);
3356
+ const depth = Math.min(3, params.depth ?? 2);
3357
+ const limit = Math.min(5, params.limit ?? 3);
3358
+
3359
+ try {
3360
+ // 1. Get graph overview to find highly connected nodes as starting points
3361
+ const overviewData = await bbFetch(cfg, `${base}/graph/overview`).catch(() => ({}));
3362
+ const topNodes = (overviewData.topConnected || overviewData.hubs || []).slice(0, 5);
3363
+
3364
+ // 2. If no top nodes from overview, use recent important memories as seeds
3365
+ let seedIds: string[] = topNodes.map((n: any) => n.id || n._key).filter(Boolean);
3366
+ if (seedIds.length === 0) {
3367
+ const recallData = await bbFetch(cfg, `${base}/recall?q=important&limit=5&agent=${agent}`);
3368
+ seedIds = (recallData.results || recallData.memories || [])
3369
+ .map((m: any) => m.id || m._key).filter(Boolean);
3370
+ }
3371
+
3372
+ if (seedIds.length === 0) {
3373
+ return { content: [{ type: 'text', text: 'Not enough memories in the graph to run inference. Store more memories first.' }] };
3374
+ }
3375
+
3376
+ // 3. Traverse graph from each seed to collect connected memories
3377
+ const allTraversals: any[] = [];
3378
+ const visitedMemories = new Map<string, any>();
3379
+
3380
+ for (const seedId of seedIds.slice(0, 3)) {
3381
+ try {
3382
+ const traverseUrl = new URL('/graph/traverse', base);
3383
+ traverseUrl.searchParams.set('start', seedId);
3384
+ traverseUrl.searchParams.set('depth', String(depth));
3385
+ const traverseData = await bbFetch(cfg, traverseUrl.toString());
3386
+
3387
+ const nodes = traverseData.nodes || traverseData.results || [];
3388
+ for (const node of nodes) {
3389
+ const nodeId = node.id || node._key;
3390
+ if (nodeId && !visitedMemories.has(nodeId)) {
3391
+ visitedMemories.set(nodeId, node);
3392
+ }
3393
+ }
3394
+
3395
+ allTraversals.push({
3396
+ seed: seedId,
3397
+ nodes: nodes.length,
3398
+ edges: (traverseData.edges || []).length,
3399
+ });
3400
+ } catch { /* skip individual traversal failures */ }
3401
+ }
3402
+
3403
+ if (visitedMemories.size < 3) {
3404
+ return { content: [{ type: 'text', text: `Only ${visitedMemories.size} connected memories found. Need at least 3 for meaningful inference.` }] };
3405
+ }
3406
+
3407
+ // 4. Build context for LLM inference
3408
+ const memoryTexts = Array.from(visitedMemories.values())
3409
+ .slice(0, 20)
3410
+ .map((m: any, i: number) => {
3411
+ const mType = m.type || 'unknown';
3412
+ const mContent = m.content || m.summary || '';
3413
+ return `${i + 1}. [${mType}] ${mContent}`;
3414
+ }).join('\n');
3415
+
3416
+ // 5. Use LLM to find implicit connections and draw inferences
3417
+ const llmUrl = cfg.llmUrl || 'http://127.0.0.1:18789/v1/chat/completions';
3418
+ const model = cfg.llmModel || 'anthropic/claude-sonnet-4-20250514';
3419
+
3420
+ const inferRes = await fetch(llmUrl, {
3421
+ method: 'POST',
3422
+ headers: { 'Content-Type': 'application/json' },
3423
+ body: JSON.stringify({
3424
+ model,
3425
+ max_tokens: 2000,
3426
+ messages: [
3427
+ {
3428
+ role: 'system',
3429
+ content: `Du bist ein Inference-Engine für einen Wissensgraphen. Analysiere die folgenden verbundenen Fakten und finde IMPLIZITE Schlussfolgerungen die nicht direkt gespeichert sind.
3430
+
3431
+ Regeln:
3432
+ - Nur nicht-triviale, wertvolle Schlüsse
3433
+ - Jeder Schluss muss auf mindestens 2 konkreten Fakten basieren
3434
+ - Gib die IDs der Quell-Memories an (Nummern aus der Liste)
3435
+ - Confidence 0-1: wie sicher ist der Schluss?
3436
+
3437
+ Antwort als JSON-Array (NUR das Array):
3438
+ [
3439
+ {
3440
+ "inference": "Der Schluss in einem Satz",
3441
+ "basedOn": [1, 3, 7],
3442
+ "confidence": 0.7,
3443
+ "category": "pattern|prediction|connection|risk"
3444
+ }
3445
+ ]
3446
+
3447
+ Wenn keine sinnvollen Schlüsse möglich: []`
3448
+ },
3449
+ {
3450
+ role: 'user',
3451
+ content: `Welche impliziten Schlüsse kann man aus diesen ${visitedMemories.size} verbundenen Fakten ziehen?\n\n${memoryTexts}`
3452
+ },
3453
+ ],
3454
+ }),
3455
+ });
3456
+
3457
+ if (!inferRes.ok) throw new Error(`LLM inference failed: ${inferRes.status}`);
3458
+ const inferData = await inferRes.json();
3459
+ const inferText = inferData.choices?.[0]?.message?.content?.trim() || '[]';
3460
+ const cleanedInfer = inferText.replace(/^```json?\n?/m, '').replace(/\n?```$/m, '').trim();
3461
+
3462
+ let inferences: any[];
3463
+ try {
3464
+ inferences = JSON.parse(cleanedInfer);
3465
+ if (!Array.isArray(inferences)) inferences = [];
3466
+ } catch {
3467
+ inferences = [];
3468
+ }
3469
+
3470
+ if (inferences.length === 0) {
3471
+ return { content: [{ type: 'text', text: `Traversed ${visitedMemories.size} memories across ${allTraversals.length} paths but no meaningful inferences could be drawn.` }] };
3472
+ }
3473
+
3474
+ // 6. Store inferences as memories with lower importance and 2x decay
3475
+ const storedInferences: string[] = [];
3476
+ const memoryArray = Array.from(visitedMemories.values());
3477
+
3478
+ for (const inf of inferences.slice(0, limit)) {
3479
+ try {
3480
+ // Calculate importance based on confidence (lower than explicit memories)
3481
+ const inferImportance = Math.max(0.2, Math.min(0.6, (inf.confidence || 0.5) * 0.7));
3482
+
3483
+ const inferMem = await bbFetch(cfg, `${base}/memories`, {
3484
+ method: 'POST',
3485
+ body: JSON.stringify({
3486
+ content: inf.inference,
3487
+ type: 'inferred',
3488
+ agent,
3489
+ importance: inferImportance,
3490
+ tags: [
3491
+ 'inferred',
3492
+ `inference:${inf.category || 'connection'}`,
3493
+ `confidence:${Math.round((inf.confidence || 0.5) * 100)}`,
3494
+ 'fast-decay', // Signal to server that this decays 2x faster
3495
+ ],
3496
+ source: 'plugin:inference-engine',
3497
+ metadata: {
3498
+ confidence: inf.confidence || 0.5,
3499
+ decayMultiplier: 2.0, // Inferred memories decay 2x faster
3500
+ basedOnCount: (inf.basedOn || []).length,
3501
+ inferenceCategory: inf.category || 'connection',
3502
+ },
3503
+ }),
3504
+ });
3505
+
3506
+ const inferenceId = inferMem.id || inferMem._key;
3507
+
3508
+ // 7. Create PART_OF edges (inference chain) from source memories to inference
3509
+ const basedOnIndices = (inf.basedOn || []).map((n: number) => n - 1);
3510
+ let edgesCreated = 0;
3511
+
3512
+ for (const idx of basedOnIndices) {
3513
+ if (idx >= 0 && idx < memoryArray.length) {
3514
+ const sourceId = memoryArray[idx].id || memoryArray[idx]._key;
3515
+ if (sourceId && inferenceId) {
3516
+ try {
3517
+ await bbFetch(cfg, `${base}/relate`, {
3518
+ method: 'POST',
3519
+ body: JSON.stringify({
3520
+ from: sourceId,
3521
+ to: inferenceId,
3522
+ type: 'PART_OF',
3523
+ weight: inf.confidence || 0.5,
3524
+ }),
3525
+ });
3526
+ edgesCreated++;
3527
+ } catch { /* skip edge failures */ }
3528
+ }
3529
+ }
3530
+ }
3531
+
3532
+ storedInferences.push(
3533
+ `[${inferenceId}] ${inf.inference}\n ` +
3534
+ `Confidence: ${Math.round((inf.confidence || 0.5) * 100)}% | ` +
3535
+ `Category: ${inf.category || 'connection'} | ` +
3536
+ `Based on: ${(inf.basedOn || []).length} memories | ` +
3537
+ `Edges: ${edgesCreated} PART_OF links`
3538
+ );
3539
+ } catch { /* skip individual inference storage failures */ }
3540
+ }
3541
+
3542
+ let output = `Inference Engine Results\n`;
3543
+ output += `Traversed: ${visitedMemories.size} memories, ${allTraversals.length} paths, depth ${depth}\n\n`;
3544
+
3545
+ if (storedInferences.length > 0) {
3546
+ output += `Inferences discovered and stored (decay: 2x faster):\n\n`;
3547
+ output += storedInferences.map((s, i) => `${i + 1}. ${s}`).join('\n\n');
3548
+ } else {
3549
+ output += 'Inferences were generated but could not be stored.';
3550
+ }
3551
+
3552
+ return { content: [{ type: 'text', text: output }] };
3553
+ } catch (err: any) {
3554
+ return { content: [{ type: 'text', text: `MemTap inference error: ${err.message}` }], isError: true };
3555
+ }
3556
+ },
3557
+ });
3558
+
3559
+ logger.info?.('[memtap] Plugin v3.1.0 "The Oracle" registered: 18 tools + 5 neuromimetic hooks') ??
3560
+ console.log('[memtap] Plugin v3.1.0 "The Oracle" registered: 18 tools + 5 neuromimetic hooks');
2816
3561
  }
@@ -2,8 +2,8 @@
2
2
  "id": "memtap",
3
3
  "name": "MemTap",
4
4
  "kind": "memory",
5
- "version": "2.1.0",
6
- "description": "Graph-based long-term memory for OpenClaw agents — semantic recall, GraphRAG, entity management, decision tracking, neural auto-capture, and anomaly detection. Powered by ArangoDB.",
5
+ "version": "3.1.0",
6
+ "description": "Graph-based long-term memory for OpenClaw agents — semantic recall, GraphRAG, entity management, decision tracking, neural auto-capture, anomaly detection, consolidation, profiles, and adaptive decay.",
7
7
  "configSchema": {
8
8
  "type": "object",
9
9
  "additionalProperties": false,
@@ -60,6 +60,22 @@
60
60
  "decayRate": {
61
61
  "type": "number",
62
62
  "description": "Memory importance decay rate per day (default: 0.005)"
63
+ },
64
+ "instructions": {
65
+ "type": "object",
66
+ "description": "Content filtering instructions for memory recall and capture",
67
+ "properties": {
68
+ "include": {
69
+ "type": "array",
70
+ "items": { "type": "string" },
71
+ "description": "Patterns to prioritize in recall (boost matching memories)"
72
+ },
73
+ "exclude": {
74
+ "type": "array",
75
+ "items": { "type": "string" },
76
+ "description": "Patterns to exclude from recall results"
77
+ }
78
+ }
63
79
  }
64
80
  }
65
81
  },
@@ -110,6 +126,10 @@
110
126
  "decayRate": {
111
127
  "label": "Memory Decay Rate",
112
128
  "placeholder": "0.005"
129
+ },
130
+ "instructions": {
131
+ "label": "Content Filter Instructions",
132
+ "helpText": "Include/exclude patterns for memory recall filtering"
113
133
  }
114
134
  }
115
135
  }
package/package.json CHANGED
@@ -1,14 +1,13 @@
1
1
  {
2
2
  "name": "memtap",
3
- "version": "2.1.0",
4
- "description": "MemTap — Graph-based long-term memory plugin for OpenClaw agents. ArangoDB-backed knowledge graph with semantic recall, GraphRAG, entity management, decision tracking, and auto-capture.",
3
+ "version": "3.1.0",
4
+ "description": "MemTap — Graph-based long-term memory plugin for OpenClaw agents. Knowledge graph with semantic recall, GraphRAG, entity management, decision tracking, and auto-capture.",
5
5
  "keywords": [
6
6
  "openclaw",
7
7
  "openclaw-plugin",
8
8
  "plugin",
9
9
  "memory",
10
10
  "knowledge-graph",
11
- "arangodb",
12
11
  "graphrag",
13
12
  "ai-agents",
14
13
  "long-term-memory",
@@ -19,10 +18,11 @@
19
18
  "homepage": "https://memtap.ai",
20
19
  "repository": {
21
20
  "type": "git",
22
- "url": "https://github.com/memtap/openclaw-plugin.git"
21
+ "url": "https://github.com/psifactory/memtap.git",
22
+ "directory": "plugin"
23
23
  },
24
24
  "bugs": {
25
- "url": "https://github.com/memtap/openclaw-plugin/issues"
25
+ "url": "https://github.com/psifactory/memtap/issues"
26
26
  },
27
27
  "openclaw": {
28
28
  "extensions": [