memtap 2.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +122 -0
- package/index.ts +2816 -0
- package/openclaw.plugin.json +115 -0
- package/package.json +40 -0
package/index.ts
ADDED
|
@@ -0,0 +1,2816 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* MemTap — Graph-based Long-Term Memory Plugin for OpenClaw
|
|
3
|
+
* v2.1.0 "The Neuron" — Neuromimetic Memory System with Human-like Cognition
|
|
4
|
+
*
|
|
5
|
+
* Tools:
|
|
6
|
+
* - memtap_recall — semantic graph recall
|
|
7
|
+
* - memtap_remember — store a memory in the graph
|
|
8
|
+
* - memtap_memory — get, update, delete individual memories
|
|
9
|
+
* - memtap_bulletin — context bulletin with graph expansion
|
|
10
|
+
* - memtap_graph — graph analysis (overview, gaps, clusters, connections, traverse)
|
|
11
|
+
* - memtap_decide — decision tracking (list, create, resolve, defer)
|
|
12
|
+
* - memtap_graphrag — vector/BM25 + graph traversal search
|
|
13
|
+
* - memtap_maintenance — memory maintenance (decay-report, contradictions, dedup-scan, run-all)
|
|
14
|
+
* - memtap_entities — entity management (list, memories, merge)
|
|
15
|
+
* - memtap_edges — create edges between memories
|
|
16
|
+
* - memtap_health — server health check and statistics
|
|
17
|
+
*
|
|
18
|
+
* Hooks:
|
|
19
|
+
* - preMessage — neuromimetic tiered recall with working memory simulation
|
|
20
|
+
* - message_completed — attention-gated encoding with emotional weighting
|
|
21
|
+
* - agent:bootstrap — inject memory bulletin at session start
|
|
22
|
+
* - periodic — dream-mode consolidation and neural maintenance
|
|
23
|
+
* - session_end — performance monitoring and neural analytics
|
|
24
|
+
*/
|
|
25
|
+
|
|
26
|
+
// ── Helpers ──────────────────────────────────────────────────────────────────
|
|
27
|
+
|
|
28
|
+
interface MemTapConfig {
|
|
29
|
+
serverUrl?: string;
|
|
30
|
+
apiKey?: string;
|
|
31
|
+
agentId?: string;
|
|
32
|
+
autoCapture?: boolean;
|
|
33
|
+
bulletinOnBoot?: boolean;
|
|
34
|
+
bulletinTopics?: string[];
|
|
35
|
+
llmUrl?: string;
|
|
36
|
+
llmModel?: string;
|
|
37
|
+
embeddingUrl?: string;
|
|
38
|
+
embeddingModel?: string;
|
|
39
|
+
embeddingApiKey?: string;
|
|
40
|
+
decayRate?: number;
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
function getConfig(api: any): MemTapConfig {
|
|
44
|
+
const entries = api.config?.plugins?.entries ?? {};
|
|
45
|
+
return entries.memtap?.config ?? {};
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
function baseUrl(cfg: MemTapConfig): string {
|
|
49
|
+
return (cfg.serverUrl || 'https://api.memtap.ai').replace(/\/$/, '');
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
function agentId(cfg: MemTapConfig, api: any): string {
|
|
53
|
+
return cfg.agentId || api.config?.agents?.defaults?.id || 'main';
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
async function bbFetch(cfg: MemTapConfig, url: string, opts: RequestInit = {}): Promise<any> {
|
|
57
|
+
const headers: any = { 'Content-Type': 'application/json', ...(opts.headers || {}) };
|
|
58
|
+
|
|
59
|
+
// Add API key authentication if available
|
|
60
|
+
if (cfg.apiKey) {
|
|
61
|
+
headers['Authorization'] = `Bearer ${cfg.apiKey}`;
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
const res = await fetch(url, {
|
|
65
|
+
...opts,
|
|
66
|
+
headers,
|
|
67
|
+
});
|
|
68
|
+
|
|
69
|
+
if (!res.ok) {
|
|
70
|
+
const text = await res.text().catch(() => '');
|
|
71
|
+
if (res.status === 401) {
|
|
72
|
+
throw new Error('MemTap authentication failed. Please check your API key.');
|
|
73
|
+
}
|
|
74
|
+
throw new Error(`MemTap API ${res.status}: ${text}`);
|
|
75
|
+
}
|
|
76
|
+
return res.json();
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
/** Server stores importance as 0-1, we display 1-10 to users */
|
|
80
|
+
function displayImportance(serverValue: number): number {
|
|
81
|
+
return Math.round(serverValue * 10) || 1;
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
/** Users provide importance as 1-10, we store as 0-1 */
|
|
85
|
+
function storeImportance(userValue: number): number {
|
|
86
|
+
return Math.min(1, Math.max(0, userValue / 10));
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
// ── Memory types ─────────────────────────────────────────────────────────────
|
|
90
|
+
|
|
91
|
+
const MEMORY_TYPES = ['fact', 'preference', 'decision', 'identity', 'event', 'observation', 'goal', 'task'] as const;
|
|
92
|
+
|
|
93
|
+
// ── Neuromimetic Memory System (v2.1 "The Neuron") ──────────────────────────
|
|
94
|
+
|
|
95
|
+
interface RecallLevel {
|
|
96
|
+
intensity: number; // 0 = no recall, 1 = light, 2 = standard, 3 = deep
|
|
97
|
+
topics: string[];
|
|
98
|
+
confidence: number; // 0-1, how sure we are about this classification
|
|
99
|
+
reasoning: string;
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
interface ConversationContext {
|
|
103
|
+
recentTopics: string[];
|
|
104
|
+
memoryQueryCount: number;
|
|
105
|
+
lastMemoryAccess?: number;
|
|
106
|
+
dominantTopic?: string;
|
|
107
|
+
userEngagement: 'low' | 'medium' | 'high';
|
|
108
|
+
attentionLevel: 'focused' | 'distracted' | 'flow'; // Determines encoding strength
|
|
109
|
+
emotionalContext: 'positive' | 'neutral' | 'negative' | 'excited';
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
interface UserProfile {
|
|
113
|
+
recallSensitivity: 'low' | 'medium' | 'high';
|
|
114
|
+
preferredMemoryTypes: string[];
|
|
115
|
+
averageQueryComplexity: number;
|
|
116
|
+
lastActive: number;
|
|
117
|
+
totalQueries: number;
|
|
118
|
+
successfulRecalls: number;
|
|
119
|
+
sleepCycles: number; // For consolidation tracking
|
|
120
|
+
attentionPatterns: Array<{ timestamp: number; level: string }>;
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
interface WorkingMemory {
|
|
124
|
+
currentFocus: string[]; // 5-7 most important current topics
|
|
125
|
+
activeMemories: any[]; // Pre-loaded memories for instant access
|
|
126
|
+
attentionSpotlight: string; // Primary focus topic
|
|
127
|
+
cognitiveLoad: number; // 0-1, affects new memory encoding
|
|
128
|
+
lastUpdate: number;
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
interface EpisodicMemory {
|
|
132
|
+
event: string;
|
|
133
|
+
timestamp: number;
|
|
134
|
+
location: 'telegram' | 'discord' | 'local' | 'unknown';
|
|
135
|
+
participants: string[];
|
|
136
|
+
emotionalIntensity: number; // 0-1, affects retention
|
|
137
|
+
contextualCues: string[]; // For context-dependent retrieval
|
|
138
|
+
consolidationScore: number; // How well consolidated this memory is
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
interface MemoryChunk {
|
|
142
|
+
id: string;
|
|
143
|
+
relatedMemories: string[];
|
|
144
|
+
abstractConcept: string;
|
|
145
|
+
strength: number; // How often accessed together
|
|
146
|
+
lastActivation: number;
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
// Emotional weighting for different memory types
|
|
150
|
+
const EMOTIONAL_WEIGHTS = {
|
|
151
|
+
'decision': 1.5, // Decisions are crucial
|
|
152
|
+
'problem': 1.3, // Problems stick in memory
|
|
153
|
+
'success': 1.2, // We remember wins
|
|
154
|
+
'failure': 1.4, // We really remember losses
|
|
155
|
+
'event': 1.1, // Events have mild boost
|
|
156
|
+
'fact': 1.0, // Baseline
|
|
157
|
+
'preference': 0.9, // Less emotionally salient
|
|
158
|
+
'observation': 0.8, // Often forgotten
|
|
159
|
+
'routine': 0.7 // Quickly forgotten
|
|
160
|
+
};
|
|
161
|
+
|
|
162
|
+
// Forgetting curve parameters (Ebbinghaus + emotional modulation)
|
|
163
|
+
const FORGETTING_CURVE = {
|
|
164
|
+
baseDecayRate: 0.01, // Base daily decay
|
|
165
|
+
emotionalProtection: 0.3, // How much emotion protects from decay
|
|
166
|
+
retrievalStrengthening: 0.15, // Boost from each retrieval
|
|
167
|
+
consolidationBonus: 0.1 // Bonus from sleep/consolidation
|
|
168
|
+
};
|
|
169
|
+
|
|
170
|
+
// Global state for neuromimetic features
|
|
171
|
+
const conversationState = new Map<string, ConversationContext>();
|
|
172
|
+
const userProfiles = new Map<string, UserProfile>();
|
|
173
|
+
const workingMemoryState = new Map<string, WorkingMemory>();
|
|
174
|
+
const episodicMemories = new Map<string, EpisodicMemory[]>();
|
|
175
|
+
const memoryChunks = new Map<string, MemoryChunk[]>();
|
|
176
|
+
const memoryCache = new Map<string, { data: any[]; timestamp: number; query: string; retrievalCount: number }>();
|
|
177
|
+
|
|
178
|
+
// Attention tracking for encoding decisions
|
|
179
|
+
let attentionHistory: Array<{ timestamp: number; agent: string; level: string; trigger: string }> = [];
|
|
180
|
+
|
|
181
|
+
// ── Neuromimetic Functions ──────────────────────────────────────────────────
|
|
182
|
+
|
|
183
|
+
function updateWorkingMemory(agentId: string, topics: string[], memories: any[] = []): WorkingMemory {
|
|
184
|
+
const existing = workingMemoryState.get(agentId) || {
|
|
185
|
+
currentFocus: [],
|
|
186
|
+
activeMemories: [],
|
|
187
|
+
attentionSpotlight: '',
|
|
188
|
+
cognitiveLoad: 0,
|
|
189
|
+
lastUpdate: 0
|
|
190
|
+
};
|
|
191
|
+
|
|
192
|
+
// Update focus (maintain 5-7 items max, like human working memory)
|
|
193
|
+
existing.currentFocus = [...existing.currentFocus, ...topics]
|
|
194
|
+
.slice(-7) // Keep only recent 7 items
|
|
195
|
+
.filter((item, index, arr) => arr.indexOf(item) === index); // dedupe
|
|
196
|
+
|
|
197
|
+
// Update active memories (pre-loaded for instant access)
|
|
198
|
+
existing.activeMemories = memories.slice(0, 5); // Max 5 active
|
|
199
|
+
|
|
200
|
+
// Determine attention spotlight (most frequent recent topic)
|
|
201
|
+
const topicCounts = existing.currentFocus.reduce((acc, topic) => {
|
|
202
|
+
acc[topic] = (acc[topic] || 0) + 1;
|
|
203
|
+
return acc;
|
|
204
|
+
}, {} as Record<string, number>);
|
|
205
|
+
|
|
206
|
+
existing.attentionSpotlight = Object.entries(topicCounts)
|
|
207
|
+
.sort(([,a], [,b]) => b - a)[0]?.[0] || '';
|
|
208
|
+
|
|
209
|
+
// Calculate cognitive load (affects new encoding)
|
|
210
|
+
existing.cognitiveLoad = Math.min(1, existing.currentFocus.length / 7);
|
|
211
|
+
existing.lastUpdate = Date.now();
|
|
212
|
+
|
|
213
|
+
workingMemoryState.set(agentId, existing);
|
|
214
|
+
return existing;
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
function analyzeAttentionLevel(message: string, context?: ConversationContext): string {
|
|
218
|
+
const msg = message.toLowerCase();
|
|
219
|
+
|
|
220
|
+
// High attention triggers
|
|
221
|
+
if (/\b(wichtig|urgent|critical|problem|fehler|error|entscheidung|decision)\b/i.test(msg)) {
|
|
222
|
+
return 'focused';
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
// Flow state indicators (long, detailed messages)
|
|
226
|
+
if (message.length > 200 && /\b(projekt|entwicklung|implementierung|strategie)\b/i.test(msg)) {
|
|
227
|
+
return 'flow';
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
// Distracted indicators
|
|
231
|
+
if (message.length < 20 || /\b(btw|übrigens|kurz|quick|mal eben)\b/i.test(msg)) {
|
|
232
|
+
return 'distracted';
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
return 'focused'; // Default to focused
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
function calculateEmotionalContext(message: string): string {
|
|
239
|
+
const msg = message.toLowerCase();
|
|
240
|
+
|
|
241
|
+
// Positive indicators
|
|
242
|
+
if (/\b(super|great|toll|perfekt|excellent|love|awesome|gut gelöst)\b/i.test(msg)) {
|
|
243
|
+
return 'positive';
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
// Excited indicators
|
|
247
|
+
if (/[!]{2,}|🚀|🎉|💯|genial|krass|unglaublich/i.test(message)) {
|
|
248
|
+
return 'excited';
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
// Negative indicators
|
|
252
|
+
if (/\b(problem|fehler|bug|broken|schlecht|terrible|failed|shit)\b/i.test(msg)) {
|
|
253
|
+
return 'negative';
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
return 'neutral';
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
function shouldEncodeMemory(content: string, attentionLevel: string, emotionalContext: string, cognitiveLoad: number): boolean {
|
|
260
|
+
// Attention-gated encoding - like real brain filtering
|
|
261
|
+
|
|
262
|
+
let encodeProbability = 0.5; // Base probability
|
|
263
|
+
|
|
264
|
+
// Attention modulation
|
|
265
|
+
if (attentionLevel === 'focused') encodeProbability += 0.3;
|
|
266
|
+
else if (attentionLevel === 'flow') encodeProbability += 0.4;
|
|
267
|
+
else if (attentionLevel === 'distracted') encodeProbability -= 0.3;
|
|
268
|
+
|
|
269
|
+
// Emotional modulation (emotional events are better encoded)
|
|
270
|
+
if (emotionalContext === 'excited') encodeProbability += 0.3;
|
|
271
|
+
else if (emotionalContext === 'positive') encodeProbability += 0.1;
|
|
272
|
+
else if (emotionalContext === 'negative') encodeProbability += 0.2; // We remember bad things
|
|
273
|
+
|
|
274
|
+
// Cognitive load (harder to encode when overloaded)
|
|
275
|
+
encodeProbability -= cognitiveLoad * 0.2;
|
|
276
|
+
|
|
277
|
+
// Content length and importance
|
|
278
|
+
if (content.length > 100) encodeProbability += 0.1;
|
|
279
|
+
if (/\b(entscheidung|decision|wichtig|important)\b/i.test(content)) encodeProbability += 0.2;
|
|
280
|
+
|
|
281
|
+
return Math.random() < Math.min(1, Math.max(0, encodeProbability));
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
function createEpisodicMemory(agentId: string, content: string, context: ConversationContext): EpisodicMemory {
|
|
285
|
+
// Extract contextual information for episodic encoding
|
|
286
|
+
const episodic: EpisodicMemory = {
|
|
287
|
+
event: content,
|
|
288
|
+
timestamp: Date.now(),
|
|
289
|
+
location: 'telegram', // Could be extracted from context
|
|
290
|
+
participants: [agentId], // Could include other participants
|
|
291
|
+
emotionalIntensity: context.emotionalContext === 'excited' ? 0.8 :
|
|
292
|
+
context.emotionalContext === 'positive' ? 0.6 :
|
|
293
|
+
context.emotionalContext === 'negative' ? 0.7 : 0.4,
|
|
294
|
+
contextualCues: context.recentTopics.slice(-3), // Last 3 topics as retrieval cues
|
|
295
|
+
consolidationScore: 0.1 // Start low, increases with sleep cycles
|
|
296
|
+
};
|
|
297
|
+
|
|
298
|
+
return episodic;
|
|
299
|
+
}
|
|
300
|
+
|
|
301
|
+
function updateMemoryChunks(agentId: string, relatedMemories: any[]) {
|
|
302
|
+
if (relatedMemories.length < 2) return; // Need at least 2 memories to chunk
|
|
303
|
+
|
|
304
|
+
const chunks = memoryChunks.get(agentId) || [];
|
|
305
|
+
const memoryIds = relatedMemories.map(m => m.id || m._key).filter(Boolean);
|
|
306
|
+
|
|
307
|
+
if (memoryIds.length < 2) return;
|
|
308
|
+
|
|
309
|
+
// Find existing chunk or create new one
|
|
310
|
+
let chunk = chunks.find(c =>
|
|
311
|
+
c.relatedMemories.some(id => memoryIds.includes(id))
|
|
312
|
+
);
|
|
313
|
+
|
|
314
|
+
if (chunk) {
|
|
315
|
+
// Strengthen existing chunk
|
|
316
|
+
chunk.relatedMemories = [...new Set([...chunk.relatedMemories, ...memoryIds])];
|
|
317
|
+
chunk.strength += 0.1;
|
|
318
|
+
chunk.lastActivation = Date.now();
|
|
319
|
+
} else {
|
|
320
|
+
// Create new chunk
|
|
321
|
+
const abstractConcept = extractAbstractConcept(relatedMemories);
|
|
322
|
+
chunk = {
|
|
323
|
+
id: `chunk_${Date.now()}`,
|
|
324
|
+
relatedMemories: memoryIds,
|
|
325
|
+
abstractConcept,
|
|
326
|
+
strength: 1.0,
|
|
327
|
+
lastActivation: Date.now()
|
|
328
|
+
};
|
|
329
|
+
chunks.push(chunk);
|
|
330
|
+
}
|
|
331
|
+
|
|
332
|
+
memoryChunks.set(agentId, chunks);
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
function extractAbstractConcept(memories: any[]): string {
|
|
336
|
+
// Simple concept extraction based on common words
|
|
337
|
+
const allContent = memories.map(m => m.content || '').join(' ').toLowerCase();
|
|
338
|
+
|
|
339
|
+
if (/memtap.*development|plugin.*code/i.test(allContent)) return 'MemTap Development';
|
|
340
|
+
if (/business.*model|pricing.*strategy/i.test(allContent)) return 'Business Strategy';
|
|
341
|
+
if (/server.*deploy|infrastructure/i.test(allContent)) return 'Infrastructure';
|
|
342
|
+
if (/problem.*solution|debug|fix/i.test(allContent)) return 'Problem Solving';
|
|
343
|
+
|
|
344
|
+
return 'General Knowledge';
|
|
345
|
+
}
|
|
346
|
+
|
|
347
|
+
function analyzeConversationContext(agentId: string, message: string): ConversationContext {
|
|
348
|
+
const existing = conversationState.get(agentId) || {
|
|
349
|
+
recentTopics: [],
|
|
350
|
+
memoryQueryCount: 0,
|
|
351
|
+
userEngagement: 'medium',
|
|
352
|
+
attentionLevel: 'focused',
|
|
353
|
+
emotionalContext: 'neutral'
|
|
354
|
+
};
|
|
355
|
+
|
|
356
|
+
// Extract topics from current message
|
|
357
|
+
const topics: string[] = [];
|
|
358
|
+
const msg = message.toLowerCase();
|
|
359
|
+
|
|
360
|
+
if (/memtap/i.test(msg)) topics.push('memtap');
|
|
361
|
+
if (/business|pricing|model/i.test(msg)) topics.push('business');
|
|
362
|
+
if (/server|deployment|vps|infrastructure/i.test(msg)) topics.push('infrastructure');
|
|
363
|
+
if (/plugin|entwicklung|development|code/i.test(msg)) topics.push('development');
|
|
364
|
+
if (/entscheidung|decision/i.test(msg)) topics.push('decisions');
|
|
365
|
+
if (/problem|issue|fehler|bug|error/i.test(msg)) topics.push('problems');
|
|
366
|
+
|
|
367
|
+
// Update recent topics (sliding window of 5)
|
|
368
|
+
existing.recentTopics = [...existing.recentTopics, ...topics].slice(-5);
|
|
369
|
+
|
|
370
|
+
// Analyze attention and emotional state
|
|
371
|
+
existing.attentionLevel = analyzeAttentionLevel(message, existing) as any;
|
|
372
|
+
existing.emotionalContext = calculateEmotionalContext(message) as any;
|
|
373
|
+
|
|
374
|
+
// Track attention patterns for user profile
|
|
375
|
+
attentionHistory.push({
|
|
376
|
+
timestamp: Date.now(),
|
|
377
|
+
agent: agentId,
|
|
378
|
+
level: existing.attentionLevel,
|
|
379
|
+
trigger: message.substring(0, 50)
|
|
380
|
+
});
|
|
381
|
+
|
|
382
|
+
// Limit attention history size
|
|
383
|
+
if (attentionHistory.length > 1000) {
|
|
384
|
+
attentionHistory = attentionHistory.slice(-500);
|
|
385
|
+
}
|
|
386
|
+
|
|
387
|
+
// Count memory-related queries
|
|
388
|
+
if (isMemoryQuery(message)) {
|
|
389
|
+
existing.memoryQueryCount++;
|
|
390
|
+
existing.lastMemoryAccess = Date.now();
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
// Determine dominant topic
|
|
394
|
+
const topicCounts = existing.recentTopics.reduce((acc, topic) => {
|
|
395
|
+
acc[topic] = (acc[topic] || 0) + 1;
|
|
396
|
+
return acc;
|
|
397
|
+
}, {} as Record<string, number>);
|
|
398
|
+
|
|
399
|
+
existing.dominantTopic = Object.entries(topicCounts)
|
|
400
|
+
.sort(([,a], [,b]) => b - a)[0]?.[0];
|
|
401
|
+
|
|
402
|
+
// Calculate engagement based on frequency, complexity, and attention
|
|
403
|
+
const queryFreq = existing.memoryQueryCount;
|
|
404
|
+
const attentionBoost = existing.attentionLevel === 'flow' ? 1 :
|
|
405
|
+
existing.attentionLevel === 'focused' ? 0.5 : 0;
|
|
406
|
+
const adjustedFreq = queryFreq + attentionBoost;
|
|
407
|
+
|
|
408
|
+
existing.userEngagement = adjustedFreq > 3 ? 'high' : adjustedFreq > 1 ? 'medium' : 'low';
|
|
409
|
+
|
|
410
|
+
conversationState.set(agentId, existing);
|
|
411
|
+
return existing;
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
function isMemoryQuery(message: string): boolean {
|
|
415
|
+
const memoryKeywords = [
|
|
416
|
+
/\b(erinnerst|remember|recall|was war|what was|früher|previously)\b/i,
|
|
417
|
+
/\b(entscheidung|decision|status|projekt|project)\b/i,
|
|
418
|
+
/\b(wie haben wir|how did we|wo stehen|where are)\b/i
|
|
419
|
+
];
|
|
420
|
+
return memoryKeywords.some(kw => kw.test(message));
|
|
421
|
+
}
|
|
422
|
+
|
|
423
|
+
function getUserProfile(agentId: string): UserProfile {
|
|
424
|
+
return userProfiles.get(agentId) || {
|
|
425
|
+
recallSensitivity: 'medium',
|
|
426
|
+
preferredMemoryTypes: ['fact', 'decision', 'event'],
|
|
427
|
+
averageQueryComplexity: 2,
|
|
428
|
+
lastActive: Date.now(),
|
|
429
|
+
totalQueries: 0,
|
|
430
|
+
successfulRecalls: 0
|
|
431
|
+
};
|
|
432
|
+
}
|
|
433
|
+
|
|
434
|
+
function updateUserProfile(agentId: string, queryComplexity: number, successful: boolean) {
|
|
435
|
+
const profile = getUserProfile(agentId);
|
|
436
|
+
profile.totalQueries++;
|
|
437
|
+
profile.lastActive = Date.now();
|
|
438
|
+
profile.averageQueryComplexity = (profile.averageQueryComplexity + queryComplexity) / 2;
|
|
439
|
+
if (successful) profile.successfulRecalls++;
|
|
440
|
+
userProfiles.set(agentId, profile);
|
|
441
|
+
}
|
|
442
|
+
|
|
443
|
+
function predictiveTopicBoost(context: ConversationContext, baseTopics: string[]): string[] {
|
|
444
|
+
const enhanced = [...baseTopics];
|
|
445
|
+
|
|
446
|
+
// If we're in a topic-focused conversation, boost related topics
|
|
447
|
+
if (context.dominantTopic === 'memtap' && context.recentTopics.length > 2) {
|
|
448
|
+
enhanced.push('development', 'business', 'infrastructure');
|
|
449
|
+
}
|
|
450
|
+
|
|
451
|
+
if (context.dominantTopic === 'problems' && context.recentTopics.includes('memtap')) {
|
|
452
|
+
enhanced.push('debugging', 'server issues', 'deployment');
|
|
453
|
+
}
|
|
454
|
+
|
|
455
|
+
return [...new Set(enhanced)]; // dedupe
|
|
456
|
+
}
|
|
457
|
+
|
|
458
|
+
function analyzeRecallLevel(message: string, agentId: string): RecallLevel {
|
|
459
|
+
const msg = message.toLowerCase();
|
|
460
|
+
const context = analyzeConversationContext(agentId, message);
|
|
461
|
+
const userProfile = getUserProfile(agentId);
|
|
462
|
+
|
|
463
|
+
// Base keyword analysis
|
|
464
|
+
let intensity = 0;
|
|
465
|
+
let confidence = 0.5;
|
|
466
|
+
let reasoning = '';
|
|
467
|
+
|
|
468
|
+
// Deep recall triggers (level 3)
|
|
469
|
+
const deepTriggers = [
|
|
470
|
+
/\b(project status|projektstand|wo stehen wir|complete overview)\b/i,
|
|
471
|
+
/\b(alle entscheidungen|all decisions|full context|comprehensive)\b/i,
|
|
472
|
+
/\b(memtap.*status|memtap.*fortschritt|memtap.*stand)\b/i,
|
|
473
|
+
/\b(business.*model|geschäfts.*modell|pricing.*strategy)\b/i
|
|
474
|
+
];
|
|
475
|
+
|
|
476
|
+
// Standard recall triggers (level 2)
|
|
477
|
+
const standardTriggers = [
|
|
478
|
+
/\b(erinnerst du|remember|recall)\b/i,
|
|
479
|
+
/\b(entscheidung|decision|beschlossen|agreed)\b/i,
|
|
480
|
+
/\b(was war|what was|wie haben wir|how did we)\b/i,
|
|
481
|
+
/\b(letzte mal|last time|früher|previously)\b/i,
|
|
482
|
+
/\b(wer ist|who is|wer war|who was)\b/i
|
|
483
|
+
];
|
|
484
|
+
|
|
485
|
+
// Light recall triggers (level 1)
|
|
486
|
+
const lightTriggers = [
|
|
487
|
+
/\b(status|update|aktuell|current)\b/i,
|
|
488
|
+
/\b(problem|issue|fehler|bug)\b/i,
|
|
489
|
+
/\b(wie geht|how.*going|weiter|next)\b/i,
|
|
490
|
+
/\b(info|information|details|erklärung)\b/i
|
|
491
|
+
];
|
|
492
|
+
|
|
493
|
+
// Base classification
|
|
494
|
+
if (deepTriggers.some(t => t.test(msg))) {
|
|
495
|
+
intensity = 3;
|
|
496
|
+
confidence = 0.9;
|
|
497
|
+
reasoning = 'Deep trigger detected';
|
|
498
|
+
} else if (standardTriggers.some(t => t.test(msg))) {
|
|
499
|
+
intensity = 2;
|
|
500
|
+
confidence = 0.8;
|
|
501
|
+
reasoning = 'Standard memory query';
|
|
502
|
+
} else if (lightTriggers.some(t => t.test(msg))) {
|
|
503
|
+
intensity = 1;
|
|
504
|
+
confidence = 0.6;
|
|
505
|
+
reasoning = 'Light context hint';
|
|
506
|
+
}
|
|
507
|
+
|
|
508
|
+
// Conversation context adjustments
|
|
509
|
+
if (context.dominantTopic === 'memtap' && context.recentTopics.length > 2) {
|
|
510
|
+
intensity = Math.min(3, intensity + 1);
|
|
511
|
+
confidence += 0.2;
|
|
512
|
+
reasoning += ' + MemTap conversation context';
|
|
513
|
+
}
|
|
514
|
+
|
|
515
|
+
if (context.memoryQueryCount > 2 && intensity > 0) {
|
|
516
|
+
intensity = Math.min(3, intensity + 1);
|
|
517
|
+
confidence += 0.1;
|
|
518
|
+
reasoning += ' + frequent memory queries';
|
|
519
|
+
}
|
|
520
|
+
|
|
521
|
+
// User profile adjustments
|
|
522
|
+
if (userProfile.recallSensitivity === 'high' && intensity > 0) {
|
|
523
|
+
intensity = Math.min(3, intensity + 1);
|
|
524
|
+
reasoning += ' + high user sensitivity';
|
|
525
|
+
} else if (userProfile.recallSensitivity === 'low' && intensity > 0) {
|
|
526
|
+
intensity = Math.max(1, intensity - 1);
|
|
527
|
+
reasoning += ' + low user sensitivity';
|
|
528
|
+
}
|
|
529
|
+
|
|
530
|
+
// Recent successful recalls boost confidence
|
|
531
|
+
if (userProfile.successfulRecalls > userProfile.totalQueries * 0.7) {
|
|
532
|
+
confidence += 0.1;
|
|
533
|
+
}
|
|
534
|
+
|
|
535
|
+
// Topic extraction with predictive enhancement
|
|
536
|
+
const baseTopics: string[] = [];
|
|
537
|
+
if (/memtap/i.test(msg)) baseTopics.push('MemTap');
|
|
538
|
+
if (/business|pricing|model/i.test(msg)) baseTopics.push('business model');
|
|
539
|
+
if (/server|deployment|vps/i.test(msg)) baseTopics.push('infrastructure');
|
|
540
|
+
if (/plugin|entwicklung|development/i.test(msg)) baseTopics.push('development');
|
|
541
|
+
if (/entscheidung|decision/i.test(msg)) baseTopics.push('decisions');
|
|
542
|
+
|
|
543
|
+
const topics = predictiveTopicBoost(context, baseTopics);
|
|
544
|
+
|
|
545
|
+
// Clamp confidence
|
|
546
|
+
confidence = Math.min(1, Math.max(0, confidence));
|
|
547
|
+
|
|
548
|
+
return {
|
|
549
|
+
intensity,
|
|
550
|
+
topics: topics.length ? topics : ['recent activity'],
|
|
551
|
+
confidence,
|
|
552
|
+
reasoning: reasoning || 'no specific triggers'
|
|
553
|
+
};
|
|
554
|
+
}
|
|
555
|
+
|
|
556
|
+
// ── LLM Memory Extraction ────────────────────────────────────────────────────
|
|
557
|
+
|
|
558
|
+
const EXTRACTION_PROMPT = `Du bist ein Memory-Extractor. Analysiere die folgende Assistenten-Nachricht und extrahiere Informationen die langfristig wissenswert sind.
|
|
559
|
+
|
|
560
|
+
Regeln:
|
|
561
|
+
- Nur NEUE Fakten, Entscheidungen, Präferenzen, Events extrahieren
|
|
562
|
+
- Keine trivialen Dinge ("ich hab gesucht", "hier ist das Ergebnis")
|
|
563
|
+
- Keine Wiederholungen von bereits bekanntem Wissen
|
|
564
|
+
- Technische Konfigurationen, Entscheidungen, Personen-Info = wichtig
|
|
565
|
+
- Smalltalk, Statusmeldungen, Zwischenschritte = unwichtig
|
|
566
|
+
- Wenn NICHTS wissenswert ist: leeres Array zurückgeben
|
|
567
|
+
|
|
568
|
+
Antwort als JSON-Array (NUR das Array, kein Markdown):
|
|
569
|
+
[
|
|
570
|
+
{
|
|
571
|
+
"content": "Kurze, prägnante Beschreibung des Fakts",
|
|
572
|
+
"type": "fact|preference|decision|identity|event|observation|goal|task",
|
|
573
|
+
"importance": 1-10,
|
|
574
|
+
"tags": ["tag1", "tag2"]
|
|
575
|
+
}
|
|
576
|
+
]
|
|
577
|
+
|
|
578
|
+
Wenn nichts extrahiert werden soll: []`;
|
|
579
|
+
|
|
580
|
+
async function llmExtract(cfg: MemTapConfig, content: string): Promise<any[]> {
|
|
581
|
+
const llmUrl = cfg.llmUrl || 'http://127.0.0.1:18789/v1/chat/completions';
|
|
582
|
+
const model = cfg.llmModel || 'anthropic/claude-sonnet-4-20250514';
|
|
583
|
+
|
|
584
|
+
const res = await fetch(llmUrl, {
|
|
585
|
+
method: 'POST',
|
|
586
|
+
headers: { 'Content-Type': 'application/json' },
|
|
587
|
+
body: JSON.stringify({
|
|
588
|
+
model,
|
|
589
|
+
max_tokens: 1000,
|
|
590
|
+
messages: [
|
|
591
|
+
{ role: 'system', content: EXTRACTION_PROMPT },
|
|
592
|
+
{ role: 'user', content: `Assistenten-Nachricht:\n\n${content}` },
|
|
593
|
+
],
|
|
594
|
+
}),
|
|
595
|
+
});
|
|
596
|
+
|
|
597
|
+
if (!res.ok) throw new Error(`LLM ${res.status}`);
|
|
598
|
+
const data = await res.json();
|
|
599
|
+
const text = data.choices?.[0]?.message?.content?.trim() || '[]';
|
|
600
|
+
|
|
601
|
+
const cleaned = text.replace(/^```json?\n?/m, '').replace(/\n?```$/m, '').trim();
|
|
602
|
+
|
|
603
|
+
try {
|
|
604
|
+
const parsed = JSON.parse(cleaned);
|
|
605
|
+
return Array.isArray(parsed) ? parsed : [];
|
|
606
|
+
} catch {
|
|
607
|
+
return [];
|
|
608
|
+
}
|
|
609
|
+
}
|
|
610
|
+
|
|
611
|
+
// ── Plugin Entry ─────────────────────────────────────────────────────────────
|
|
612
|
+
|
|
613
|
+
export default function register(api: any) {
|
|
614
|
+
const logger = api.logger ?? console;
|
|
615
|
+
|
|
616
|
+
// ── Tool: memtap_recall ──────────────────────────────────────────────────
|
|
617
|
+
|
|
618
|
+
api.registerTool({
|
|
619
|
+
name: 'memtap_recall',
|
|
620
|
+
description:
|
|
621
|
+
'Search the MemTap knowledge graph for relevant memories. ' +
|
|
622
|
+
'Returns semantically matched memories with entities and relationships. ' +
|
|
623
|
+
'Use this before answering questions about prior decisions, people, projects, or preferences.',
|
|
624
|
+
parameters: {
|
|
625
|
+
type: 'object',
|
|
626
|
+
properties: {
|
|
627
|
+
query: { type: 'string', description: 'Natural language search query' },
|
|
628
|
+
types: {
|
|
629
|
+
type: 'array',
|
|
630
|
+
items: { type: 'string', enum: MEMORY_TYPES },
|
|
631
|
+
description: 'Filter by memory types (optional)',
|
|
632
|
+
},
|
|
633
|
+
limit: { type: 'number', description: 'Max results (default 10)' },
|
|
634
|
+
},
|
|
635
|
+
required: ['query'],
|
|
636
|
+
},
|
|
637
|
+
async execute(_id: string, params: { query: string; types?: string[]; limit?: number }) {
|
|
638
|
+
const cfg = getConfig(api);
|
|
639
|
+
const url = new URL('/recall', baseUrl(cfg));
|
|
640
|
+
url.searchParams.set('q', params.query);
|
|
641
|
+
url.searchParams.set('agent', agentId(cfg, api));
|
|
642
|
+
if (params.limit) url.searchParams.set('limit', String(params.limit));
|
|
643
|
+
if (params.types?.length) url.searchParams.set('types', params.types.join(','));
|
|
644
|
+
|
|
645
|
+
try {
|
|
646
|
+
const data = await bbFetch(cfg, url.toString());
|
|
647
|
+
const memories = data.results || data.memories || [];
|
|
648
|
+
|
|
649
|
+
if (memories.length === 0) {
|
|
650
|
+
return { content: [{ type: 'text', text: 'No matching memories found in MemTap.' }] };
|
|
651
|
+
}
|
|
652
|
+
|
|
653
|
+
const formatted = memories.map((m: any, i: number) => {
|
|
654
|
+
const entities = (m.entities || []).map((e: any) => e.name).join(', ');
|
|
655
|
+
const meta = [`Type: ${m.type}`, `Importance: ${displayImportance(m.importance)}/10`];
|
|
656
|
+
if (entities) meta.push(`Entities: ${entities}`);
|
|
657
|
+
if (m.created) meta.push(`Created: ${m.created.split('T')[0]}`);
|
|
658
|
+
return `${i + 1}. [${m.id || m._key}] ${m.content}\n ${meta.join(' | ')}`;
|
|
659
|
+
}).join('\n\n');
|
|
660
|
+
|
|
661
|
+
return { content: [{ type: 'text', text: `Found ${memories.length} memories:\n\n${formatted}` }] };
|
|
662
|
+
} catch (err: any) {
|
|
663
|
+
return { content: [{ type: 'text', text: `MemTap recall error: ${err.message}` }], isError: true };
|
|
664
|
+
}
|
|
665
|
+
},
|
|
666
|
+
});
|
|
667
|
+
|
|
668
|
+
// ── Tool: memtap_remember ────────────────────────────────────────────────
|
|
669
|
+
|
|
670
|
+
api.registerTool({
|
|
671
|
+
name: 'memtap_remember',
|
|
672
|
+
description:
|
|
673
|
+
'Store a memory in the MemTap knowledge graph. ' +
|
|
674
|
+
'Use for important facts, decisions, preferences, events, goals, or observations worth remembering long-term. ' +
|
|
675
|
+
'Entities mentioned are automatically extracted and linked.',
|
|
676
|
+
parameters: {
|
|
677
|
+
type: 'object',
|
|
678
|
+
properties: {
|
|
679
|
+
content: { type: 'string', description: 'The memory content to store' },
|
|
680
|
+
type: { type: 'string', enum: MEMORY_TYPES, description: 'Memory type (default: fact)' },
|
|
681
|
+
importance: { type: 'number', description: 'Importance 1-10 (default: 5)' },
|
|
682
|
+
tags: { type: 'array', items: { type: 'string' }, description: 'Optional tags for categorization' },
|
|
683
|
+
},
|
|
684
|
+
required: ['content'],
|
|
685
|
+
},
|
|
686
|
+
async execute(_id: string, params: { content: string; type?: string; importance?: number; tags?: string[] }) {
|
|
687
|
+
const cfg = getConfig(api);
|
|
688
|
+
const importance = params.importance ?? 5;
|
|
689
|
+
const body = {
|
|
690
|
+
content: params.content,
|
|
691
|
+
type: params.type || 'fact',
|
|
692
|
+
agent: agentId(cfg, api),
|
|
693
|
+
importance: storeImportance(importance),
|
|
694
|
+
tags: params.tags || [],
|
|
695
|
+
};
|
|
696
|
+
|
|
697
|
+
try {
|
|
698
|
+
const data = await bbFetch(cfg, `${baseUrl(cfg)}/memories`, {
|
|
699
|
+
method: 'POST',
|
|
700
|
+
body: JSON.stringify(body),
|
|
701
|
+
});
|
|
702
|
+
|
|
703
|
+
const entities = (data.entities || []).map((e: any) => e.name).join(', ');
|
|
704
|
+
let response = `Memory stored [${data.id || data._key}] (type: ${body.type}, importance: ${importance}/10)`;
|
|
705
|
+
if (entities) response += `\n Linked entities: ${entities}`;
|
|
706
|
+
|
|
707
|
+
return { content: [{ type: 'text', text: response }] };
|
|
708
|
+
} catch (err: any) {
|
|
709
|
+
return { content: [{ type: 'text', text: `MemTap store error: ${err.message}` }], isError: true };
|
|
710
|
+
}
|
|
711
|
+
},
|
|
712
|
+
});
|
|
713
|
+
|
|
714
|
+
// ── Tool: memtap_bulletin (enhanced with graph expansion) ────────────────
|
|
715
|
+
|
|
716
|
+
api.registerTool({
|
|
717
|
+
name: 'memtap_bulletin',
|
|
718
|
+
description:
|
|
719
|
+
'Get a contextual memory bulletin from MemTap with graph expansion. ' +
|
|
720
|
+
'Returns relevant memories for given topics PLUS related memories discovered via graph traversal. ' +
|
|
721
|
+
'Agents suddenly "know" things they didn\'t explicitly search for.',
|
|
722
|
+
parameters: {
|
|
723
|
+
type: 'object',
|
|
724
|
+
properties: {
|
|
725
|
+
topics: { type: 'array', items: { type: 'string' }, description: 'Topics to get context for' },
|
|
726
|
+
limit: { type: 'number', description: 'Max seed memories per topic (default 3)' },
|
|
727
|
+
},
|
|
728
|
+
required: ['topics'],
|
|
729
|
+
},
|
|
730
|
+
async execute(_id: string, params: { topics: string[]; limit?: number }) {
|
|
731
|
+
const cfg = getConfig(api);
|
|
732
|
+
|
|
733
|
+
try {
|
|
734
|
+
const data = await bbFetch(cfg, `${baseUrl(cfg)}/bulletin`, {
|
|
735
|
+
method: 'POST',
|
|
736
|
+
body: JSON.stringify({
|
|
737
|
+
topics: params.topics,
|
|
738
|
+
agent: agentId(cfg, api),
|
|
739
|
+
limit: params.limit || 3,
|
|
740
|
+
}),
|
|
741
|
+
});
|
|
742
|
+
|
|
743
|
+
const sections = (data.sections || []).map((s: any) => {
|
|
744
|
+
// Direct search hits
|
|
745
|
+
const items = (s.memories || []).map((m: any) =>
|
|
746
|
+
` - [${m.type}] ${m.summary} (importance: ${m.importance}/10)`
|
|
747
|
+
).join('\n');
|
|
748
|
+
|
|
749
|
+
// Graph-expanded context
|
|
750
|
+
const graphItems = (s.graphContext || []).map((m: any) =>
|
|
751
|
+
` - [${m.type}] ${m.summary} (importance: ${Math.round(m.importance)}/10)\n Found via: ${m.foundVia}`
|
|
752
|
+
).join('\n');
|
|
753
|
+
|
|
754
|
+
// Notable connections
|
|
755
|
+
const connItems = (s.connections || []).slice(0, 5).map((c: any) =>
|
|
756
|
+
` - ${c.fromSummary} \u2192 ${c.edgeType} \u2192 ${c.toSummary}`
|
|
757
|
+
).join('\n');
|
|
758
|
+
|
|
759
|
+
let section = `**${s.topic}** (${s.memories?.length || 0} direct + ${s.graphContext?.length || 0} via graph)`;
|
|
760
|
+
section += `\n${items || ' (no direct matches)'}`;
|
|
761
|
+
if (graphItems) section += `\n\nRelated (via graph):\n${graphItems}`;
|
|
762
|
+
if (connItems) section += `\n\nConnections:\n${connItems}`;
|
|
763
|
+
return section;
|
|
764
|
+
}).join('\n\n---\n\n');
|
|
765
|
+
|
|
766
|
+
return { content: [{ type: 'text', text: sections || 'No relevant memories found.' }] };
|
|
767
|
+
} catch (err: any) {
|
|
768
|
+
return { content: [{ type: 'text', text: `MemTap bulletin error: ${err.message}` }], isError: true };
|
|
769
|
+
}
|
|
770
|
+
},
|
|
771
|
+
});
|
|
772
|
+
|
|
773
|
+
// ── Tool: memtap_graphrag ────────────────────────────────────────────────
|
|
774
|
+
|
|
775
|
+
api.registerTool({
|
|
776
|
+
name: 'memtap_graphrag',
|
|
777
|
+
description:
|
|
778
|
+
'Advanced GraphRAG search: combines vector/BM25 search with multi-hop graph traversal. ' +
|
|
779
|
+
'Returns memories found through the knowledge graph with full path explanations. ' +
|
|
780
|
+
'Falls back to BM25 if embeddings are not configured.',
|
|
781
|
+
parameters: {
|
|
782
|
+
type: 'object',
|
|
783
|
+
properties: {
|
|
784
|
+
query: { type: 'string', description: 'Natural language search query' },
|
|
785
|
+
depth: { type: 'number', description: 'Graph traversal depth (default 2, max 4)' },
|
|
786
|
+
topK: { type: 'number', description: 'Number of seed results (default 5)' },
|
|
787
|
+
},
|
|
788
|
+
required: ['query'],
|
|
789
|
+
},
|
|
790
|
+
async execute(_id: string, params: { query: string; depth?: number; topK?: number }) {
|
|
791
|
+
const cfg = getConfig(api);
|
|
792
|
+
|
|
793
|
+
try {
|
|
794
|
+
const data = await bbFetch(cfg, `${baseUrl(cfg)}/graphrag/query`, {
|
|
795
|
+
method: 'POST',
|
|
796
|
+
body: JSON.stringify({
|
|
797
|
+
query: params.query,
|
|
798
|
+
agent: agentId(cfg, api),
|
|
799
|
+
graphDepth: params.depth ?? 2,
|
|
800
|
+
embeddingTopK: params.topK ?? 5,
|
|
801
|
+
}),
|
|
802
|
+
});
|
|
803
|
+
|
|
804
|
+
const method = data.method || 'unknown';
|
|
805
|
+
const seeds = data.seeds || [];
|
|
806
|
+
const graphResults = data.results || data.graphResults || [];
|
|
807
|
+
|
|
808
|
+
if (seeds.length === 0 && graphResults.length === 0) {
|
|
809
|
+
return { content: [{ type: 'text', text: 'No results found via GraphRAG.' }] };
|
|
810
|
+
}
|
|
811
|
+
|
|
812
|
+
let output = `GraphRAG results (method: ${method}, ${seeds.length} seeds, ${graphResults.length} graph results):\n\n`;
|
|
813
|
+
|
|
814
|
+
// Show seeds
|
|
815
|
+
if (seeds.length > 0) {
|
|
816
|
+
output += 'Direct matches:\n';
|
|
817
|
+
output += seeds.map((s: any, i: number) =>
|
|
818
|
+
` ${i + 1}. [${s.type}] ${s.summary || s.content} (importance: ${Math.round(s.importance)}/10)`
|
|
819
|
+
).join('\n');
|
|
820
|
+
output += '\n\n';
|
|
821
|
+
}
|
|
822
|
+
|
|
823
|
+
// Show graph results with traversal paths
|
|
824
|
+
if (graphResults.length > 0) {
|
|
825
|
+
output += 'Discovered via graph traversal:\n';
|
|
826
|
+
output += graphResults.map((r: any, i: number) => {
|
|
827
|
+
const imp = Math.round(r.importance);
|
|
828
|
+
let line = ` ${i + 1}. [${r.type}] ${r.summary || r.content} (importance: ${imp}/10, ${r.hopDistance ?? r.depth ?? '?'}-hop)`;
|
|
829
|
+
// Show path explanation
|
|
830
|
+
if (r.path && r.path.length > 0) {
|
|
831
|
+
const pathStr = r.path.map((p: any) => `${p.id} \u2192 ${p.edgeType}`).join(' \u2192 ');
|
|
832
|
+
line += `\n Path: ${pathStr} \u2192 ${r.id}`;
|
|
833
|
+
} else if (r.foundVia) {
|
|
834
|
+
line += `\n Found via: ${r.foundVia}`;
|
|
835
|
+
}
|
|
836
|
+
return line;
|
|
837
|
+
}).join('\n');
|
|
838
|
+
}
|
|
839
|
+
|
|
840
|
+
return { content: [{ type: 'text', text: output }] };
|
|
841
|
+
} catch (err: any) {
|
|
842
|
+
// Graceful fallback if GraphRAG endpoint not available
|
|
843
|
+
if (err.message.includes('404') || err.message.includes('ECONNREFUSED')) {
|
|
844
|
+
return { content: [{ type: 'text', text: 'GraphRAG not available (embeddings may not be configured). Use memtap_recall for BM25 search.' }] };
|
|
845
|
+
}
|
|
846
|
+
return { content: [{ type: 'text', text: `MemTap GraphRAG error: ${err.message}` }], isError: true };
|
|
847
|
+
}
|
|
848
|
+
},
|
|
849
|
+
});
|
|
850
|
+
|
|
851
|
+
// ── Tool: memtap_maintenance ─────────────────────────────────────────────
|
|
852
|
+
|
|
853
|
+
api.registerTool({
|
|
854
|
+
name: 'memtap_maintenance',
|
|
855
|
+
description:
|
|
856
|
+
'Run memory maintenance checks on the MemTap knowledge graph. Actions:\n' +
|
|
857
|
+
'- decay-report: find memories that have decayed below importance threshold\n' +
|
|
858
|
+
'- contradictions: find contradicting memory pairs\n' +
|
|
859
|
+
'- dedup-scan: find potential duplicate memories\n' +
|
|
860
|
+
'- run-all: combined report of all checks',
|
|
861
|
+
parameters: {
|
|
862
|
+
type: 'object',
|
|
863
|
+
properties: {
|
|
864
|
+
action: {
|
|
865
|
+
type: 'string',
|
|
866
|
+
enum: ['decay-report', 'contradictions', 'dedup-scan', 'run-all'],
|
|
867
|
+
description: 'Maintenance action to run',
|
|
868
|
+
},
|
|
869
|
+
},
|
|
870
|
+
required: ['action'],
|
|
871
|
+
},
|
|
872
|
+
async execute(_id: string, params: { action: string }) {
|
|
873
|
+
const cfg = getConfig(api);
|
|
874
|
+
const base = baseUrl(cfg);
|
|
875
|
+
|
|
876
|
+
try {
|
|
877
|
+
let data: any;
|
|
878
|
+
|
|
879
|
+
switch (params.action) {
|
|
880
|
+
case 'decay-report': {
|
|
881
|
+
data = await bbFetch(cfg, `${base}/maintenance/decay-report`);
|
|
882
|
+
const results = data.results || data.memories || [];
|
|
883
|
+
if (results.length === 0) {
|
|
884
|
+
return { content: [{ type: 'text', text: 'All memories are above the decay threshold. No action needed.' }] };
|
|
885
|
+
}
|
|
886
|
+
let output = `Decay Report: ${results.length} memories have decayed significantly\n\n`;
|
|
887
|
+
output += results.slice(0, 20).map((m: any, i: number) => {
|
|
888
|
+
const eff = m.effectiveImportance ?? m.effImportance ?? '?';
|
|
889
|
+
const days = m.daysSinceAccess ?? '?';
|
|
890
|
+
return ` ${i + 1}. [${m.id}] ${m.summary} (effective: ${eff}/10, ${days} days since access)`;
|
|
891
|
+
}).join('\n');
|
|
892
|
+
output += `\n\nRecommendation: These memories have decayed below 20% importance. Consider reviewing or archiving them.`;
|
|
893
|
+
return { content: [{ type: 'text', text: output }] };
|
|
894
|
+
}
|
|
895
|
+
|
|
896
|
+
case 'contradictions': {
|
|
897
|
+
data = await bbFetch(cfg, `${base}/maintenance/contradictions`);
|
|
898
|
+
const results = data.results || data.contradictions || [];
|
|
899
|
+
if (results.length === 0) {
|
|
900
|
+
return { content: [{ type: 'text', text: 'No contradictions found in the knowledge graph.' }] };
|
|
901
|
+
}
|
|
902
|
+
let output = `Contradictions: ${results.length} contradicting pair(s) found\n\n`;
|
|
903
|
+
output += results.map((c: any, i: number) => {
|
|
904
|
+
const m1 = c.memory1 || c;
|
|
905
|
+
const m2 = c.memory2 || c;
|
|
906
|
+
return ` ${i + 1}. "${m1.summary || m1.from}" CONTRADICTS "${m2.summary || m2.to}"`;
|
|
907
|
+
}).join('\n');
|
|
908
|
+
output += `\n\nRecommendation: Resolve by archiving the outdated memory or updating the relationship.`;
|
|
909
|
+
return { content: [{ type: 'text', text: output }] };
|
|
910
|
+
}
|
|
911
|
+
|
|
912
|
+
case 'dedup-scan': {
|
|
913
|
+
data = await bbFetch(cfg, `${base}/maintenance/dedup-scan`, { method: 'POST', body: '{}' });
|
|
914
|
+
const dupes = data.duplicates || [];
|
|
915
|
+
if (dupes.length === 0) {
|
|
916
|
+
return { content: [{ type: 'text', text: 'No obvious duplicates detected.' }] };
|
|
917
|
+
}
|
|
918
|
+
let output = `Dedup Scan: ${dupes.length} potential duplicate(s) found\n\n`;
|
|
919
|
+
output += dupes.slice(0, 20).map((d: any, i: number) => {
|
|
920
|
+
const m1 = d.memory1 || d;
|
|
921
|
+
const m2 = d.memory2 || d;
|
|
922
|
+
const kind = d.type === 'update_chain' ? 'Update chain' : 'Similar content';
|
|
923
|
+
return ` ${i + 1}. [${kind}] "${m1.summary || m1.older}" vs "${m2.summary || m2.newer}"\n ${d.recommendation || 'Consider merging or archiving the older version.'}`;
|
|
924
|
+
}).join('\n');
|
|
925
|
+
return { content: [{ type: 'text', text: output }] };
|
|
926
|
+
}
|
|
927
|
+
|
|
928
|
+
case 'run-all': {
|
|
929
|
+
data = await bbFetch(cfg, `${base}/maintenance/run-all`, { method: 'POST', body: '{}' });
|
|
930
|
+
const decay = data.decay || data.decayReport || {};
|
|
931
|
+
const contras = data.contradictions || {};
|
|
932
|
+
const dedup = data.duplicates || data.dedupScan || {};
|
|
933
|
+
|
|
934
|
+
let output = 'MemTap Maintenance Report\n\n';
|
|
935
|
+
|
|
936
|
+
// Decay
|
|
937
|
+
const decayCount = decay.count ?? (decay.results || decay.memories || []).length;
|
|
938
|
+
output += `Decay: ${decayCount} memories below threshold\n`;
|
|
939
|
+
if (decayCount > 0) {
|
|
940
|
+
const decayItems = decay.results || decay.memories || [];
|
|
941
|
+
output += decayItems.slice(0, 5).map((m: any) =>
|
|
942
|
+
` - [${m.id}] ${m.summary} (effective: ${m.effectiveImportance ?? '?'}/10)`
|
|
943
|
+
).join('\n');
|
|
944
|
+
output += `\n Action: Review or archive these ${decayCount} decayed memories\n`;
|
|
945
|
+
}
|
|
946
|
+
|
|
947
|
+
// Contradictions
|
|
948
|
+
const contraCount = contras.count ?? (contras.results || contras.pairs || []).length;
|
|
949
|
+
output += `\nContradictions: ${contraCount} pair(s)\n`;
|
|
950
|
+
if (contraCount > 0) {
|
|
951
|
+
const contraItems = contras.results || contras.pairs || [];
|
|
952
|
+
output += contraItems.slice(0, 5).map((c: any) =>
|
|
953
|
+
` - "${c.from || c.memory1?.summary}" vs "${c.to || c.memory2?.summary}"`
|
|
954
|
+
).join('\n');
|
|
955
|
+
output += `\n Action: Resolve by archiving outdated versions\n`;
|
|
956
|
+
}
|
|
957
|
+
|
|
958
|
+
// Dedup
|
|
959
|
+
const dedupCount = dedup.count ?? (dedup.results || dedup.pairs || []).length;
|
|
960
|
+
output += `\nDuplicates: ${dedupCount} potential duplicate(s)\n`;
|
|
961
|
+
if (dedupCount > 0) {
|
|
962
|
+
const dedupItems = dedup.results || dedup.pairs || [];
|
|
963
|
+
output += dedupItems.slice(0, 5).map((d: any) =>
|
|
964
|
+
` - "${d.older || d.memory1?.summary}" vs "${d.newer || d.memory2?.summary}"`
|
|
965
|
+
).join('\n');
|
|
966
|
+
output += `\n Action: Merge or archive older versions\n`;
|
|
967
|
+
}
|
|
968
|
+
|
|
969
|
+
return { content: [{ type: 'text', text: output }] };
|
|
970
|
+
}
|
|
971
|
+
|
|
972
|
+
default:
|
|
973
|
+
return { content: [{ type: 'text', text: `Unknown maintenance action: ${params.action}` }], isError: true };
|
|
974
|
+
}
|
|
975
|
+
} catch (err: any) {
|
|
976
|
+
return { content: [{ type: 'text', text: `MemTap maintenance error: ${err.message}` }], isError: true };
|
|
977
|
+
}
|
|
978
|
+
},
|
|
979
|
+
});
|
|
980
|
+
|
|
981
|
+
// ── Tool: memtap_graph ───────────────────────────────────────────────────
|
|
982
|
+
|
|
983
|
+
api.registerTool({
|
|
984
|
+
name: 'memtap_graph',
|
|
985
|
+
description:
|
|
986
|
+
'Analyze the MemTap knowledge graph structure. Actions:\n' +
|
|
987
|
+
'- overview: global graph stats (most connected nodes, edge distribution)\n' +
|
|
988
|
+
'- gaps: find orphan memories and weakly-connected nodes\n' +
|
|
989
|
+
'- clusters: discover topic clusters via connected components\n' +
|
|
990
|
+
'- connections: find paths between two memories (requires from, to)\n' +
|
|
991
|
+
'- traverse: BFS traversal from a start node (requires start, optional depth)',
|
|
992
|
+
parameters: {
|
|
993
|
+
type: 'object',
|
|
994
|
+
properties: {
|
|
995
|
+
action: {
|
|
996
|
+
type: 'string',
|
|
997
|
+
enum: ['overview', 'gaps', 'clusters', 'connections', 'traverse'],
|
|
998
|
+
description: 'Graph analysis action',
|
|
999
|
+
},
|
|
1000
|
+
from: { type: 'string', description: 'Source memory ID (for connections)' },
|
|
1001
|
+
to: { type: 'string', description: 'Target memory ID (for connections)' },
|
|
1002
|
+
start: { type: 'string', description: 'Start node ID (for traverse)' },
|
|
1003
|
+
depth: { type: 'number', description: 'Traversal depth (for traverse, default server-side)' },
|
|
1004
|
+
},
|
|
1005
|
+
required: ['action'],
|
|
1006
|
+
},
|
|
1007
|
+
async execute(_id: string, params: { action: string; from?: string; to?: string; start?: string; depth?: number }) {
|
|
1008
|
+
const cfg = getConfig(api);
|
|
1009
|
+
const base = baseUrl(cfg);
|
|
1010
|
+
|
|
1011
|
+
try {
|
|
1012
|
+
let data: any;
|
|
1013
|
+
|
|
1014
|
+
switch (params.action) {
|
|
1015
|
+
case 'overview':
|
|
1016
|
+
data = await bbFetch(cfg, `${base}/graph/overview`);
|
|
1017
|
+
break;
|
|
1018
|
+
case 'gaps':
|
|
1019
|
+
data = await bbFetch(cfg, `${base}/graph/gaps`);
|
|
1020
|
+
break;
|
|
1021
|
+
case 'clusters':
|
|
1022
|
+
data = await bbFetch(cfg, `${base}/graph/clusters`);
|
|
1023
|
+
break;
|
|
1024
|
+
case 'connections': {
|
|
1025
|
+
if (!params.from || !params.to) {
|
|
1026
|
+
return { content: [{ type: 'text', text: 'connections requires both "from" and "to" parameters.' }], isError: true };
|
|
1027
|
+
}
|
|
1028
|
+
const url = new URL('/graph/connections', base);
|
|
1029
|
+
url.searchParams.set('from', params.from);
|
|
1030
|
+
url.searchParams.set('to', params.to);
|
|
1031
|
+
data = await bbFetch(cfg, url.toString());
|
|
1032
|
+
break;
|
|
1033
|
+
}
|
|
1034
|
+
case 'traverse': {
|
|
1035
|
+
if (!params.start) {
|
|
1036
|
+
return { content: [{ type: 'text', text: 'traverse requires a "start" parameter.' }], isError: true };
|
|
1037
|
+
}
|
|
1038
|
+
const url = new URL('/graph/traverse', base);
|
|
1039
|
+
url.searchParams.set('start', params.start);
|
|
1040
|
+
if (params.depth != null) url.searchParams.set('depth', String(params.depth));
|
|
1041
|
+
data = await bbFetch(cfg, url.toString());
|
|
1042
|
+
break;
|
|
1043
|
+
}
|
|
1044
|
+
default:
|
|
1045
|
+
return { content: [{ type: 'text', text: `Unknown graph action: ${params.action}` }], isError: true };
|
|
1046
|
+
}
|
|
1047
|
+
|
|
1048
|
+
return { content: [{ type: 'text', text: JSON.stringify(data, null, 2) }] };
|
|
1049
|
+
} catch (err: any) {
|
|
1050
|
+
return { content: [{ type: 'text', text: `MemTap graph error: ${err.message}` }], isError: true };
|
|
1051
|
+
}
|
|
1052
|
+
},
|
|
1053
|
+
});
|
|
1054
|
+
|
|
1055
|
+
// ── Tool: memtap_decide ──────────────────────────────────────────────────
|
|
1056
|
+
|
|
1057
|
+
api.registerTool({
|
|
1058
|
+
name: 'memtap_decide',
|
|
1059
|
+
description:
|
|
1060
|
+
'Track decisions in MemTap. Actions:\n' +
|
|
1061
|
+
'- list: show open/overdue/resolved decisions\n' +
|
|
1062
|
+
'- create: create a new decision (requires content, optional options, deadline, context)\n' +
|
|
1063
|
+
'- resolve: resolve a decision (requires id, resolution)\n' +
|
|
1064
|
+
'- defer: defer a decision (requires id, reason)',
|
|
1065
|
+
parameters: {
|
|
1066
|
+
type: 'object',
|
|
1067
|
+
properties: {
|
|
1068
|
+
action: {
|
|
1069
|
+
type: 'string',
|
|
1070
|
+
enum: ['list', 'create', 'resolve', 'defer'],
|
|
1071
|
+
description: 'Decision action',
|
|
1072
|
+
},
|
|
1073
|
+
id: { type: 'string', description: 'Decision ID (for resolve/defer)' },
|
|
1074
|
+
content: { type: 'string', description: 'Decision description (for create)' },
|
|
1075
|
+
options: {
|
|
1076
|
+
type: 'array',
|
|
1077
|
+
items: { type: 'string' },
|
|
1078
|
+
description: 'Decision options (for create, optional)',
|
|
1079
|
+
},
|
|
1080
|
+
deadline: { type: 'string', description: 'Deadline ISO8601 (for create, optional)' },
|
|
1081
|
+
context: { type: 'string', description: 'Additional context (for create, optional)' },
|
|
1082
|
+
resolution: { type: 'string', description: 'Resolution text (for resolve)' },
|
|
1083
|
+
reason: { type: 'string', description: 'Reason for deferral (for defer)' },
|
|
1084
|
+
},
|
|
1085
|
+
required: ['action'],
|
|
1086
|
+
},
|
|
1087
|
+
async execute(_id: string, params: {
|
|
1088
|
+
action: string; id?: string; content?: string; options?: string[];
|
|
1089
|
+
deadline?: string; context?: string; resolution?: string; reason?: string;
|
|
1090
|
+
}) {
|
|
1091
|
+
const cfg = getConfig(api);
|
|
1092
|
+
const base = baseUrl(cfg);
|
|
1093
|
+
|
|
1094
|
+
try {
|
|
1095
|
+
let data: any;
|
|
1096
|
+
|
|
1097
|
+
switch (params.action) {
|
|
1098
|
+
case 'list':
|
|
1099
|
+
data = await bbFetch(cfg, `${base}/decisions`);
|
|
1100
|
+
break;
|
|
1101
|
+
case 'create': {
|
|
1102
|
+
if (!params.content) {
|
|
1103
|
+
return { content: [{ type: 'text', text: 'create requires a "content" parameter.' }], isError: true };
|
|
1104
|
+
}
|
|
1105
|
+
const body: any = { content: params.content, agent: agentId(cfg, api) };
|
|
1106
|
+
if (params.options?.length) body.options = params.options;
|
|
1107
|
+
if (params.deadline) body.deadline = params.deadline;
|
|
1108
|
+
if (params.context) body.context = params.context;
|
|
1109
|
+
data = await bbFetch(cfg, `${base}/decisions`, {
|
|
1110
|
+
method: 'POST',
|
|
1111
|
+
body: JSON.stringify(body),
|
|
1112
|
+
});
|
|
1113
|
+
break;
|
|
1114
|
+
}
|
|
1115
|
+
case 'resolve': {
|
|
1116
|
+
if (!params.id || !params.resolution) {
|
|
1117
|
+
return { content: [{ type: 'text', text: 'resolve requires "id" and "resolution" parameters.' }], isError: true };
|
|
1118
|
+
}
|
|
1119
|
+
data = await bbFetch(cfg, `${base}/decisions/${encodeURIComponent(params.id)}/resolve`, {
|
|
1120
|
+
method: 'POST',
|
|
1121
|
+
body: JSON.stringify({ resolution: params.resolution }),
|
|
1122
|
+
});
|
|
1123
|
+
break;
|
|
1124
|
+
}
|
|
1125
|
+
case 'defer': {
|
|
1126
|
+
if (!params.id || !params.reason) {
|
|
1127
|
+
return { content: [{ type: 'text', text: 'defer requires "id" and "reason" parameters.' }], isError: true };
|
|
1128
|
+
}
|
|
1129
|
+
data = await bbFetch(cfg, `${base}/decisions/${encodeURIComponent(params.id)}/defer`, {
|
|
1130
|
+
method: 'POST',
|
|
1131
|
+
body: JSON.stringify({ reason: params.reason }),
|
|
1132
|
+
});
|
|
1133
|
+
break;
|
|
1134
|
+
}
|
|
1135
|
+
default:
|
|
1136
|
+
return { content: [{ type: 'text', text: `Unknown decide action: ${params.action}` }], isError: true };
|
|
1137
|
+
}
|
|
1138
|
+
|
|
1139
|
+
return { content: [{ type: 'text', text: JSON.stringify(data, null, 2) }] };
|
|
1140
|
+
} catch (err: any) {
|
|
1141
|
+
return { content: [{ type: 'text', text: `MemTap decide error: ${err.message}` }], isError: true };
|
|
1142
|
+
}
|
|
1143
|
+
},
|
|
1144
|
+
});
|
|
1145
|
+
|
|
1146
|
+
// ── Tool: memtap_memory ──────────────────────────────────────────────────
|
|
1147
|
+
|
|
1148
|
+
api.registerTool({
|
|
1149
|
+
name: 'memtap_memory',
|
|
1150
|
+
description:
|
|
1151
|
+
'Manage individual memories in MemTap. Actions:\n' +
|
|
1152
|
+
'- get: retrieve a memory by ID\n' +
|
|
1153
|
+
'- update: update a memory (content, summary, importance, tags, archived)\n' +
|
|
1154
|
+
'- delete: archive a memory (soft delete) or hard-delete with hard=true',
|
|
1155
|
+
parameters: {
|
|
1156
|
+
type: 'object',
|
|
1157
|
+
properties: {
|
|
1158
|
+
action: {
|
|
1159
|
+
type: 'string',
|
|
1160
|
+
enum: ['get', 'update', 'delete'],
|
|
1161
|
+
description: 'Memory action',
|
|
1162
|
+
},
|
|
1163
|
+
id: { type: 'string', description: 'Memory ID' },
|
|
1164
|
+
content: { type: 'string', description: 'Updated content (for update)' },
|
|
1165
|
+
summary: { type: 'string', description: 'Updated summary (for update)' },
|
|
1166
|
+
importance: { type: 'number', description: 'Updated importance 1-10 (for update)' },
|
|
1167
|
+
tags: { type: 'array', items: { type: 'string' }, description: 'Updated tags (for update)' },
|
|
1168
|
+
archived: { type: 'boolean', description: 'Archive status (for update)' },
|
|
1169
|
+
hard: { type: 'boolean', description: 'Hard delete instead of archive (for delete, default false)' },
|
|
1170
|
+
},
|
|
1171
|
+
required: ['action', 'id'],
|
|
1172
|
+
},
|
|
1173
|
+
async execute(_id: string, params: {
|
|
1174
|
+
action: string; id: string; content?: string; summary?: string;
|
|
1175
|
+
importance?: number; tags?: string[]; archived?: boolean; hard?: boolean;
|
|
1176
|
+
}) {
|
|
1177
|
+
const cfg = getConfig(api);
|
|
1178
|
+
const base = baseUrl(cfg);
|
|
1179
|
+
|
|
1180
|
+
try {
|
|
1181
|
+
let data: any;
|
|
1182
|
+
|
|
1183
|
+
switch (params.action) {
|
|
1184
|
+
case 'get':
|
|
1185
|
+
data = await bbFetch(cfg, `${base}/memories/${encodeURIComponent(params.id)}`);
|
|
1186
|
+
return { content: [{ type: 'text', text: `Memory [${data._key || params.id}]:\n Content: ${data.content}\n Type: ${data.type} | Importance: ${data.importance}/10 | Tags: ${(data.tags || []).join(', ')}\n Created: ${data.created} | Accessed: ${data.accessCount || 0} times` }] };
|
|
1187
|
+
|
|
1188
|
+
case 'update': {
|
|
1189
|
+
const body: Record<string, any> = {};
|
|
1190
|
+
if (params.content !== undefined) body.content = params.content;
|
|
1191
|
+
if (params.summary !== undefined) body.summary = params.summary;
|
|
1192
|
+
if (params.importance !== undefined) body.importance = storeImportance(params.importance);
|
|
1193
|
+
if (params.tags !== undefined) body.tags = params.tags;
|
|
1194
|
+
if (params.archived !== undefined) body.archived = params.archived;
|
|
1195
|
+
data = await bbFetch(cfg, `${base}/memories/${encodeURIComponent(params.id)}/update`, {
|
|
1196
|
+
method: 'POST',
|
|
1197
|
+
body: JSON.stringify(body),
|
|
1198
|
+
});
|
|
1199
|
+
return { content: [{ type: 'text', text: `Memory [${params.id}] updated.` }] };
|
|
1200
|
+
}
|
|
1201
|
+
|
|
1202
|
+
case 'delete': {
|
|
1203
|
+
const url = new URL(`/memories/${encodeURIComponent(params.id)}`, base);
|
|
1204
|
+
if (params.hard) url.searchParams.set('hard', 'true');
|
|
1205
|
+
data = await bbFetch(cfg, url.toString(), { method: 'DELETE' });
|
|
1206
|
+
const action = data.deleted ? 'permanently deleted' : 'archived';
|
|
1207
|
+
return { content: [{ type: 'text', text: `Memory [${params.id}] ${action}.` }] };
|
|
1208
|
+
}
|
|
1209
|
+
|
|
1210
|
+
default:
|
|
1211
|
+
return { content: [{ type: 'text', text: `Unknown memory action: ${params.action}` }], isError: true };
|
|
1212
|
+
}
|
|
1213
|
+
} catch (err: any) {
|
|
1214
|
+
return { content: [{ type: 'text', text: `MemTap memory error: ${err.message}` }], isError: true };
|
|
1215
|
+
}
|
|
1216
|
+
},
|
|
1217
|
+
});
|
|
1218
|
+
|
|
1219
|
+
// ── Tool: memtap_entities ─────────────────────────────────────────────────
|
|
1220
|
+
|
|
1221
|
+
api.registerTool({
|
|
1222
|
+
name: 'memtap_entities',
|
|
1223
|
+
description:
|
|
1224
|
+
'Manage entities in the MemTap knowledge graph. Actions:\n' +
|
|
1225
|
+
'- list: list all entities (optional type filter)\n' +
|
|
1226
|
+
'- memories: get memories linked to an entity (requires key)\n' +
|
|
1227
|
+
'- merge: merge duplicate entities (requires key, mergeFrom)',
|
|
1228
|
+
parameters: {
|
|
1229
|
+
type: 'object',
|
|
1230
|
+
properties: {
|
|
1231
|
+
action: {
|
|
1232
|
+
type: 'string',
|
|
1233
|
+
enum: ['list', 'memories', 'merge'],
|
|
1234
|
+
description: 'Entity action',
|
|
1235
|
+
},
|
|
1236
|
+
key: { type: 'string', description: 'Entity key (for memories/merge)' },
|
|
1237
|
+
type: { type: 'string', description: 'Filter by entity type (for list)' },
|
|
1238
|
+
mergeFrom: { type: 'string', description: 'Entity key to merge from (for merge)' },
|
|
1239
|
+
limit: { type: 'number', description: 'Max results (default 50)' },
|
|
1240
|
+
},
|
|
1241
|
+
required: ['action'],
|
|
1242
|
+
},
|
|
1243
|
+
async execute(_id: string, params: { action: string; key?: string; type?: string; mergeFrom?: string; limit?: number }) {
|
|
1244
|
+
const cfg = getConfig(api);
|
|
1245
|
+
const base = baseUrl(cfg);
|
|
1246
|
+
|
|
1247
|
+
try {
|
|
1248
|
+
let data: any;
|
|
1249
|
+
|
|
1250
|
+
switch (params.action) {
|
|
1251
|
+
case 'list': {
|
|
1252
|
+
const url = new URL('/entities', base);
|
|
1253
|
+
if (params.type) url.searchParams.set('type', params.type);
|
|
1254
|
+
if (params.limit) url.searchParams.set('limit', String(params.limit));
|
|
1255
|
+
data = await bbFetch(cfg, url.toString());
|
|
1256
|
+
const entities = Array.isArray(data) ? data : [];
|
|
1257
|
+
if (entities.length === 0) {
|
|
1258
|
+
return { content: [{ type: 'text', text: 'No entities found.' }] };
|
|
1259
|
+
}
|
|
1260
|
+
const formatted = entities.map((e: any) =>
|
|
1261
|
+
`- [${e._key}] ${e.name} (type: ${e.type || 'unknown'}${e.aliases?.length ? `, aliases: ${e.aliases.join(', ')}` : ''})`
|
|
1262
|
+
).join('\n');
|
|
1263
|
+
return { content: [{ type: 'text', text: `${entities.length} entities:\n${formatted}` }] };
|
|
1264
|
+
}
|
|
1265
|
+
|
|
1266
|
+
case 'memories': {
|
|
1267
|
+
if (!params.key) {
|
|
1268
|
+
return { content: [{ type: 'text', text: 'memories requires a "key" parameter.' }], isError: true };
|
|
1269
|
+
}
|
|
1270
|
+
const url = new URL(`/entities/${encodeURIComponent(params.key)}/memories`, base);
|
|
1271
|
+
if (params.limit) url.searchParams.set('limit', String(params.limit));
|
|
1272
|
+
data = await bbFetch(cfg, url.toString());
|
|
1273
|
+
const mems = Array.isArray(data) ? data : [];
|
|
1274
|
+
if (mems.length === 0) {
|
|
1275
|
+
return { content: [{ type: 'text', text: `No memories linked to entity "${params.key}".` }] };
|
|
1276
|
+
}
|
|
1277
|
+
const formatted = mems.map((m: any, i: number) =>
|
|
1278
|
+
`${i + 1}. [${m._key}] ${m.content?.substring(0, 120)} (type: ${m.type}, importance: ${m.importance}/10)`
|
|
1279
|
+
).join('\n');
|
|
1280
|
+
return { content: [{ type: 'text', text: `${mems.length} memories for entity "${params.key}":\n${formatted}` }] };
|
|
1281
|
+
}
|
|
1282
|
+
|
|
1283
|
+
case 'merge': {
|
|
1284
|
+
if (!params.key || !params.mergeFrom) {
|
|
1285
|
+
return { content: [{ type: 'text', text: 'merge requires "key" (target) and "mergeFrom" (source) parameters.' }], isError: true };
|
|
1286
|
+
}
|
|
1287
|
+
data = await bbFetch(cfg, `${base}/entities/${encodeURIComponent(params.key)}/merge`, {
|
|
1288
|
+
method: 'POST',
|
|
1289
|
+
body: JSON.stringify({ mergeFrom: params.mergeFrom }),
|
|
1290
|
+
});
|
|
1291
|
+
return { content: [{ type: 'text', text: `Entity "${params.mergeFrom}" merged into "${params.key}". Aliases: ${(data.aliases || []).join(', ')}` }] };
|
|
1292
|
+
}
|
|
1293
|
+
|
|
1294
|
+
default:
|
|
1295
|
+
return { content: [{ type: 'text', text: `Unknown entity action: ${params.action}` }], isError: true };
|
|
1296
|
+
}
|
|
1297
|
+
} catch (err: any) {
|
|
1298
|
+
return { content: [{ type: 'text', text: `MemTap entities error: ${err.message}` }], isError: true };
|
|
1299
|
+
}
|
|
1300
|
+
},
|
|
1301
|
+
});
|
|
1302
|
+
|
|
1303
|
+
// ── Tool: memtap_edges ────────────────────────────────────────────────────
|
|
1304
|
+
|
|
1305
|
+
api.registerTool({
|
|
1306
|
+
name: 'memtap_edges',
|
|
1307
|
+
description:
|
|
1308
|
+
'Create edges (relationships) between memories in the knowledge graph. ' +
|
|
1309
|
+
'Edge types: RELATED_TO, UPDATES, CONTRADICTS, CAUSED_BY, PART_OF, DEPENDS_ON, MENTIONS.',
|
|
1310
|
+
parameters: {
|
|
1311
|
+
type: 'object',
|
|
1312
|
+
properties: {
|
|
1313
|
+
from: { type: 'string', description: 'Source memory ID' },
|
|
1314
|
+
to: { type: 'string', description: 'Target memory ID' },
|
|
1315
|
+
type: {
|
|
1316
|
+
type: 'string',
|
|
1317
|
+
enum: ['RELATED_TO', 'UPDATES', 'CONTRADICTS', 'CAUSED_BY', 'PART_OF', 'DEPENDS_ON', 'MENTIONS'],
|
|
1318
|
+
description: 'Edge type',
|
|
1319
|
+
},
|
|
1320
|
+
weight: { type: 'number', description: 'Edge weight 0-1 (default 0.5)' },
|
|
1321
|
+
},
|
|
1322
|
+
required: ['from', 'to', 'type'],
|
|
1323
|
+
},
|
|
1324
|
+
async execute(_id: string, params: { from: string; to: string; type: string; weight?: number }) {
|
|
1325
|
+
const cfg = getConfig(api);
|
|
1326
|
+
|
|
1327
|
+
try {
|
|
1328
|
+
const data = await bbFetch(cfg, `${baseUrl(cfg)}/relate`, {
|
|
1329
|
+
method: 'POST',
|
|
1330
|
+
body: JSON.stringify({
|
|
1331
|
+
from: params.from,
|
|
1332
|
+
to: params.to,
|
|
1333
|
+
type: params.type,
|
|
1334
|
+
weight: params.weight ?? 0.5,
|
|
1335
|
+
}),
|
|
1336
|
+
});
|
|
1337
|
+
|
|
1338
|
+
return { content: [{ type: 'text', text: `Edge created: ${params.from} -[${params.type}]-> ${params.to} (weight: ${params.weight ?? 0.5})` }] };
|
|
1339
|
+
} catch (err: any) {
|
|
1340
|
+
return { content: [{ type: 'text', text: `MemTap edge error: ${err.message}` }], isError: true };
|
|
1341
|
+
}
|
|
1342
|
+
},
|
|
1343
|
+
});
|
|
1344
|
+
|
|
1345
|
+
// ── Tool: memtap_health (Enhanced Neural Monitoring) ────────────────────────
|
|
1346
|
+
|
|
1347
|
+
api.registerTool({
|
|
1348
|
+
name: 'memtap_health',
|
|
1349
|
+
description:
|
|
1350
|
+
'Check MemTap server health and get neural system statistics. Actions:\n' +
|
|
1351
|
+
'- health: server health check (ArangoDB version, counts)\n' +
|
|
1352
|
+
'- stats: detailed statistics (by type, by agent, entity/edge counts)\n' +
|
|
1353
|
+
'- neural: neuromimetic system status (working memory, attention, consolidation)\n' +
|
|
1354
|
+
'- performance: system performance metrics (cache hits, response times, memory usage)\n' +
|
|
1355
|
+
'- analytics: user behavior analytics (engagement, success rates, patterns)',
|
|
1356
|
+
parameters: {
|
|
1357
|
+
type: 'object',
|
|
1358
|
+
properties: {
|
|
1359
|
+
action: {
|
|
1360
|
+
type: 'string',
|
|
1361
|
+
enum: ['health', 'stats', 'neural', 'performance', 'analytics'],
|
|
1362
|
+
description: 'Which check to run (default: health)',
|
|
1363
|
+
},
|
|
1364
|
+
agent: { type: 'string', description: 'Specific agent for neural/performance analysis (optional)' },
|
|
1365
|
+
timeRange: { type: 'string', description: 'Time range for analytics (1h, 24h, 7d) default: 24h' }
|
|
1366
|
+
},
|
|
1367
|
+
},
|
|
1368
|
+
async execute(_id: string, params: { action?: string; agent?: string; timeRange?: string }) {
|
|
1369
|
+
const cfg = getConfig(api);
|
|
1370
|
+
const base = baseUrl(cfg);
|
|
1371
|
+
const action = params.action || 'health';
|
|
1372
|
+
|
|
1373
|
+
try {
|
|
1374
|
+
let data: any;
|
|
1375
|
+
|
|
1376
|
+
switch (action) {
|
|
1377
|
+
case 'health':
|
|
1378
|
+
data = await bbFetch(cfg, `${base}/health`);
|
|
1379
|
+
const counts = data.counts || {};
|
|
1380
|
+
return { content: [{ type: 'text', text: `MemTap: ${data.status}\nArangoDB: ${data.arango || 'unknown'}\nMemories: ${counts.memories ?? '?'} | Entities: ${counts.entities ?? '?'} | Edges: ${counts.edges ?? '?'}` }] };
|
|
1381
|
+
|
|
1382
|
+
case 'stats':
|
|
1383
|
+
data = await bbFetch(cfg, `${base}/stats`);
|
|
1384
|
+
const byType = (data.byType || []).map((t: any) => ` ${t.type}: ${t.count}`).join('\n');
|
|
1385
|
+
const byAgent = (data.byAgent || []).map((a: any) => ` ${a.agent}: ${a.count}`).join('\n');
|
|
1386
|
+
return { content: [{ type: 'text', text: `MemTap Stats:\nTotal: ${data.total ?? '?'} memories | ${data.entityCount ?? '?'} entities | ${data.edgeCount ?? '?'} edges\n\nBy type:\n${byType || ' (none)'}\n\nBy agent:\n${byAgent || ' (none)'}` }] };
|
|
1387
|
+
|
|
1388
|
+
case 'neural':
|
|
1389
|
+
return { content: [{ type: 'text', text: generateNeuralReport(params.agent) }] };
|
|
1390
|
+
|
|
1391
|
+
case 'performance':
|
|
1392
|
+
return { content: [{ type: 'text', text: generatePerformanceReport(params.agent) }] };
|
|
1393
|
+
|
|
1394
|
+
case 'analytics':
|
|
1395
|
+
return { content: [{ type: 'text', text: generateAnalyticsReport(params.agent, params.timeRange || '24h') }] };
|
|
1396
|
+
|
|
1397
|
+
default:
|
|
1398
|
+
return { content: [{ type: 'text', text: `Unknown health action: ${action}` }], isError: true };
|
|
1399
|
+
}
|
|
1400
|
+
} catch (err: any) {
|
|
1401
|
+
return { content: [{ type: 'text', text: `MemTap health error: ${err.message}` }], isError: true };
|
|
1402
|
+
}
|
|
1403
|
+
},
|
|
1404
|
+
});
|
|
1405
|
+
|
|
1406
|
+
// ── Tool: memtap_monitor (Real-time Neural Activity) ─────────────────────
|
|
1407
|
+
|
|
1408
|
+
api.registerTool({
|
|
1409
|
+
name: 'memtap_monitor',
|
|
1410
|
+
description:
|
|
1411
|
+
'Real-time monitoring of neural memory activity. Shows live working memory, attention patterns, ' +
|
|
1412
|
+
'and memory operations as they happen. Use for debugging and optimization.',
|
|
1413
|
+
parameters: {
|
|
1414
|
+
type: 'object',
|
|
1415
|
+
properties: {
|
|
1416
|
+
action: {
|
|
1417
|
+
type: 'string',
|
|
1418
|
+
enum: ['live', 'working-memory', 'attention', 'cache', 'consolidation'],
|
|
1419
|
+
description: 'Type of monitoring to display'
|
|
1420
|
+
},
|
|
1421
|
+
agent: { type: 'string', description: 'Specific agent to monitor (optional)' },
|
|
1422
|
+
duration: { type: 'number', description: 'Monitoring duration in seconds (default: 30)' }
|
|
1423
|
+
},
|
|
1424
|
+
required: ['action']
|
|
1425
|
+
},
|
|
1426
|
+
async execute(_id: string, params: { action: string; agent?: string; duration?: number }) {
|
|
1427
|
+
const duration = params.duration || 30;
|
|
1428
|
+
|
|
1429
|
+
try {
|
|
1430
|
+
switch (params.action) {
|
|
1431
|
+
case 'live':
|
|
1432
|
+
return { content: [{ type: 'text', text: generateLiveMonitoring(params.agent, duration) }] };
|
|
1433
|
+
|
|
1434
|
+
case 'working-memory':
|
|
1435
|
+
return { content: [{ type: 'text', text: generateWorkingMemoryReport(params.agent) }] };
|
|
1436
|
+
|
|
1437
|
+
case 'attention':
|
|
1438
|
+
return { content: [{ type: 'text', text: generateAttentionReport(params.agent) }] };
|
|
1439
|
+
|
|
1440
|
+
case 'cache':
|
|
1441
|
+
return { content: [{ type: 'text', text: generateCacheReport(params.agent) }] };
|
|
1442
|
+
|
|
1443
|
+
case 'consolidation':
|
|
1444
|
+
return { content: [{ type: 'text', text: generateConsolidationReport(params.agent) }] };
|
|
1445
|
+
|
|
1446
|
+
default:
|
|
1447
|
+
return { content: [{ type: 'text', text: `Unknown monitor action: ${params.action}` }], isError: true };
|
|
1448
|
+
}
|
|
1449
|
+
} catch (err: any) {
|
|
1450
|
+
return { content: [{ type: 'text', text: `MemTap monitor error: ${err.message}` }], isError: true };
|
|
1451
|
+
}
|
|
1452
|
+
},
|
|
1453
|
+
});
|
|
1454
|
+
|
|
1455
|
+
// ── Hook: Smart Memory Recall (preMessage) ──────────────────────────────
|
|
1456
|
+
|
|
1457
|
+
api.registerHook(
|
|
1458
|
+
'preMessage',
|
|
1459
|
+
async (event: any) => {
|
|
1460
|
+
const cfg = getConfig(api);
|
|
1461
|
+
|
|
1462
|
+
const message = event.context?.message || event.message || '';
|
|
1463
|
+
if (!message || message.length < 10) return;
|
|
1464
|
+
|
|
1465
|
+
// Neuromimetic Tiered Memory Recall with Working Memory Integration
|
|
1466
|
+
const currentAgent = agentId(cfg, api);
|
|
1467
|
+
const conversationCtx = analyzeConversationContext(currentAgent, message);
|
|
1468
|
+
const recallLevel = analyzeRecallLevel(message, currentAgent);
|
|
1469
|
+
|
|
1470
|
+
// Update Working Memory with current focus
|
|
1471
|
+
const topics = recallLevel.topics;
|
|
1472
|
+
updateWorkingMemory(currentAgent, topics);
|
|
1473
|
+
|
|
1474
|
+
// Check cache first for performance (with neuromimetic enhancements)
|
|
1475
|
+
const cacheKey = `${currentAgent}:${message.substring(0, 100)}`;
|
|
1476
|
+
const cached = memoryCache.get(cacheKey);
|
|
1477
|
+
const cacheValid = cached && (Date.now() - cached.timestamp) < 300000; // 5min cache
|
|
1478
|
+
|
|
1479
|
+
if (recallLevel.intensity > 0) {
|
|
1480
|
+
try {
|
|
1481
|
+
let memories: any[] = [];
|
|
1482
|
+
let contextTitle = '';
|
|
1483
|
+
let bulletinContext = '';
|
|
1484
|
+
let useCache = false;
|
|
1485
|
+
|
|
1486
|
+
// Use cache if available and confidence is low
|
|
1487
|
+
if (cacheValid && recallLevel.confidence < 0.7) {
|
|
1488
|
+
memories = cached!.data;
|
|
1489
|
+
contextTitle = `Cached Context (${recallLevel.reasoning})`;
|
|
1490
|
+
useCache = true;
|
|
1491
|
+
} else {
|
|
1492
|
+
|
|
1493
|
+
// Dynamic limits based on user profile
|
|
1494
|
+
const profile = getUserProfile(currentAgent);
|
|
1495
|
+
const baseLimit = profile.recallSensitivity === 'high' ? 1.5 :
|
|
1496
|
+
profile.recallSensitivity === 'low' ? 0.7 : 1.0;
|
|
1497
|
+
|
|
1498
|
+
if (recallLevel.intensity === 1) {
|
|
1499
|
+
// Light: Quick context
|
|
1500
|
+
const limit = Math.ceil(2 * baseLimit);
|
|
1501
|
+
const data = await bbFetch(cfg, `${baseUrl(cfg)}/recall?q=${encodeURIComponent(message)}&limit=${limit}&agent=${currentAgent}`);
|
|
1502
|
+
memories = data.results || data.memories || [];
|
|
1503
|
+
contextTitle = `Quick Context (confidence: ${Math.round(recallLevel.confidence * 100)}%)`;
|
|
1504
|
+
}
|
|
1505
|
+
else if (recallLevel.intensity === 2) {
|
|
1506
|
+
// Standard: Normal recall
|
|
1507
|
+
const limit = Math.ceil(4 * baseLimit);
|
|
1508
|
+
const data = await bbFetch(cfg, `${baseUrl(cfg)}/recall?q=${encodeURIComponent(message)}&limit=${limit}&agent=${currentAgent}`);
|
|
1509
|
+
memories = data.results || data.memories || [];
|
|
1510
|
+
contextTitle = `Relevant Memories (confidence: ${Math.round(recallLevel.confidence * 100)}%)`;
|
|
1511
|
+
}
|
|
1512
|
+
else if (recallLevel.intensity === 3) {
|
|
1513
|
+
// Deep: Full context with bulletin
|
|
1514
|
+
const limit = Math.ceil(6 * baseLimit);
|
|
1515
|
+
|
|
1516
|
+
const [recallData, bulletinData] = await Promise.all([
|
|
1517
|
+
bbFetch(cfg, `${baseUrl(cfg)}/recall?q=${encodeURIComponent(message)}&limit=${limit}&agent=${currentAgent}`),
|
|
1518
|
+
bbFetch(cfg, `${baseUrl(cfg)}/bulletin`, {
|
|
1519
|
+
method: 'POST',
|
|
1520
|
+
body: JSON.stringify({
|
|
1521
|
+
topics: recallLevel.topics,
|
|
1522
|
+
agent: currentAgent,
|
|
1523
|
+
limit: Math.ceil(2 * baseLimit)
|
|
1524
|
+
})
|
|
1525
|
+
}).catch(() => ({ sections: [] }))
|
|
1526
|
+
]);
|
|
1527
|
+
|
|
1528
|
+
memories = recallData.results || recallData.memories || [];
|
|
1529
|
+
const sections = bulletinData.sections || [];
|
|
1530
|
+
contextTitle = `Deep Memory Context (confidence: ${Math.round(recallLevel.confidence * 100)}%)`;
|
|
1531
|
+
|
|
1532
|
+
if (sections.length > 0) {
|
|
1533
|
+
bulletinContext = sections.map((s: any) => {
|
|
1534
|
+
const items = (s.memories || []).slice(0, 2).map((m: any) =>
|
|
1535
|
+
`- [${m.type}] ${m.summary} (${Math.round(m.importance)}/10)`
|
|
1536
|
+
).join('\n');
|
|
1537
|
+
const graphItems = (s.graphContext || []).slice(0, 1).map((m: any) =>
|
|
1538
|
+
`- [via graph] ${m.summary}`
|
|
1539
|
+
).join('\n');
|
|
1540
|
+
return `**${s.topic}:**\n${items}${graphItems ? '\n' + graphItems : ''}`;
|
|
1541
|
+
}).join('\n\n');
|
|
1542
|
+
}
|
|
1543
|
+
}
|
|
1544
|
+
|
|
1545
|
+
// Cache results for performance
|
|
1546
|
+
if (memories.length > 0) {
|
|
1547
|
+
memoryCache.set(cacheKey, {
|
|
1548
|
+
data: memories,
|
|
1549
|
+
timestamp: Date.now(),
|
|
1550
|
+
query: message
|
|
1551
|
+
});
|
|
1552
|
+
|
|
1553
|
+
// Limit cache size
|
|
1554
|
+
if (memoryCache.size > 100) {
|
|
1555
|
+
const oldestKey = Array.from(memoryCache.keys())[0];
|
|
1556
|
+
memoryCache.delete(oldestKey);
|
|
1557
|
+
}
|
|
1558
|
+
}
|
|
1559
|
+
}
|
|
1560
|
+
|
|
1561
|
+
if (memories.length > 0) {
|
|
1562
|
+
// Retrieval Practice Strengthening - memories get stronger when accessed
|
|
1563
|
+
memories.forEach(async (m: any) => {
|
|
1564
|
+
try {
|
|
1565
|
+
if (!useCache && m.id) {
|
|
1566
|
+
// Update retrieval count and strengthen memory
|
|
1567
|
+
const strengthBoost = FORGETTING_CURVE.retrievalStrengthening *
|
|
1568
|
+
(context?.attentionLevel === 'focused' ? 1.2 : 1.0);
|
|
1569
|
+
|
|
1570
|
+
// Background update to boost importance (fire and forget)
|
|
1571
|
+
bbFetch(cfg, `${baseUrl(cfg)}/memories/${encodeURIComponent(m.id)}/access`, {
|
|
1572
|
+
method: 'POST',
|
|
1573
|
+
body: JSON.stringify({
|
|
1574
|
+
boost: strengthBoost,
|
|
1575
|
+
retrievalCount: (m.retrievalCount || 0) + 1,
|
|
1576
|
+
lastAccess: Date.now(),
|
|
1577
|
+
attentionLevel: context?.attentionLevel || 'unknown'
|
|
1578
|
+
})
|
|
1579
|
+
}).catch(() => {}); // Silent fail
|
|
1580
|
+
|
|
1581
|
+
// Update local cache retrieval count
|
|
1582
|
+
if (memoryCache.has(cacheKey)) {
|
|
1583
|
+
const cached = memoryCache.get(cacheKey)!;
|
|
1584
|
+
cached.retrievalCount = (cached.retrievalCount || 0) + 1;
|
|
1585
|
+
}
|
|
1586
|
+
}
|
|
1587
|
+
} catch { /* ignore update failures */ }
|
|
1588
|
+
});
|
|
1589
|
+
|
|
1590
|
+
// Sort by relevance + importance + recency
|
|
1591
|
+
memories.sort((a, b) => {
|
|
1592
|
+
const scoreA = (a.importance || 0.5) * 0.6 + (a.relevanceScore || 0.5) * 0.3 + (a.recency || 0) * 0.1;
|
|
1593
|
+
const scoreB = (b.importance || 0.5) * 0.6 + (b.relevanceScore || 0.5) * 0.3 + (b.recency || 0) * 0.1;
|
|
1594
|
+
return scoreB - scoreA;
|
|
1595
|
+
});
|
|
1596
|
+
|
|
1597
|
+
// Format memories with enhanced context
|
|
1598
|
+
const memoryContext = memories.map((m: any, i: number) => {
|
|
1599
|
+
const entities = (m.entities || []).map((e: any) => e.name).join(', ');
|
|
1600
|
+
const age = m.created ? Math.floor((Date.now() - Date.parse(m.created)) / (1000 * 60 * 60 * 24)) : '?';
|
|
1601
|
+
const hotness = m.accessCount > 5 ? ' 🔥' : m.accessCount > 2 ? ' ⚡' : '';
|
|
1602
|
+
|
|
1603
|
+
return `${i + 1}. [${m.type}] ${m.content}${hotness}\n └─ Importance: ${displayImportance(m.importance)}/10${entities ? ` | Entities: ${entities}` : ''} | Age: ${age}d`;
|
|
1604
|
+
}).join('\n\n');
|
|
1605
|
+
|
|
1606
|
+
let injection = `
|
|
1607
|
+
## ${contextTitle}:
|
|
1608
|
+
${memoryContext}
|
|
1609
|
+
`;
|
|
1610
|
+
|
|
1611
|
+
// Add bulletin context if available
|
|
1612
|
+
if (bulletinContext) {
|
|
1613
|
+
injection += `\n## Topic Context:\n${bulletinContext}`;
|
|
1614
|
+
}
|
|
1615
|
+
|
|
1616
|
+
// Add conversation context hints
|
|
1617
|
+
const context = conversationState.get(currentAgent);
|
|
1618
|
+
if (context?.dominantTopic) {
|
|
1619
|
+
injection += `\n## Conversation Context:\nDominant topic: ${context.dominantTopic} | Engagement: ${context.userEngagement}`;
|
|
1620
|
+
}
|
|
1621
|
+
|
|
1622
|
+
// Enhanced instructions based on context
|
|
1623
|
+
let instructions = '\n\n**Memory Instructions:**\n';
|
|
1624
|
+
instructions += `- Use this context when relevant (confidence: ${Math.round(recallLevel.confidence * 100)}%)\n`;
|
|
1625
|
+
instructions += `- Reasoning: ${recallLevel.reasoning}\n`;
|
|
1626
|
+
if (recallLevel.confidence < 0.7) {
|
|
1627
|
+
instructions += '- Consider using memtap_recall() for more specific information\n';
|
|
1628
|
+
}
|
|
1629
|
+
if (useCache) {
|
|
1630
|
+
instructions += '- This is cached data - use memtap_recall() for fresh results if needed\n';
|
|
1631
|
+
}
|
|
1632
|
+
|
|
1633
|
+
injection += instructions;
|
|
1634
|
+
|
|
1635
|
+
// Append to existing system prompt
|
|
1636
|
+
event.context.systemPrompt = (event.context.systemPrompt || '') + injection;
|
|
1637
|
+
|
|
1638
|
+
// Update metrics
|
|
1639
|
+
updateUserProfile(currentAgent, recallLevel.intensity, true);
|
|
1640
|
+
|
|
1641
|
+
const logMsg = useCache ?
|
|
1642
|
+
`[memtap] ${contextTitle} injected: ${memories.length} cached memories` :
|
|
1643
|
+
`[memtap] ${contextTitle} injected: ${memories.length} memories (level ${recallLevel.intensity}, conf: ${Math.round(recallLevel.confidence * 100)}%)`;
|
|
1644
|
+
|
|
1645
|
+
logger.info?.(logMsg) ?? console.log(logMsg);
|
|
1646
|
+
} else {
|
|
1647
|
+
// No memories found - update metrics
|
|
1648
|
+
updateUserProfile(currentAgent, recallLevel.intensity, false);
|
|
1649
|
+
}
|
|
1650
|
+
} catch (err: any) {
|
|
1651
|
+
logger.warn?.(`[memtap] preMessage recall failed: ${err.message}`) ??
|
|
1652
|
+
console.warn(`[memtap] preMessage recall failed: ${err.message}`);
|
|
1653
|
+
}
|
|
1654
|
+
}
|
|
1655
|
+
},
|
|
1656
|
+
{
|
|
1657
|
+
name: 'memtap.smart-recall',
|
|
1658
|
+
description: 'Smart automatic memory recall based on message content analysis',
|
|
1659
|
+
}
|
|
1660
|
+
);
|
|
1661
|
+
|
|
1662
|
+
// ── Hook: Enhanced Auto-Capture with Learning (message_completed) ─────────
|
|
1663
|
+
|
|
1664
|
+
api.registerHook(
|
|
1665
|
+
'message_completed',
|
|
1666
|
+
async (event: any) => {
|
|
1667
|
+
const cfg = getConfig(api);
|
|
1668
|
+
if (!cfg.autoCapture) return;
|
|
1669
|
+
|
|
1670
|
+
const content = event.context?.content;
|
|
1671
|
+
if (!content || content === 'NO_REPLY' || content === 'HEARTBEAT_OK') return;
|
|
1672
|
+
if (content.length < 80) return;
|
|
1673
|
+
|
|
1674
|
+
const currentAgent = agentId(cfg, api);
|
|
1675
|
+
|
|
1676
|
+
(async () => {
|
|
1677
|
+
try {
|
|
1678
|
+
const context = conversationState.get(currentAgent);
|
|
1679
|
+
const workingMemory = workingMemoryState.get(currentAgent);
|
|
1680
|
+
|
|
1681
|
+
// Attention-gated encoding - only encode if attention allows
|
|
1682
|
+
if (context && !shouldEncodeMemory(
|
|
1683
|
+
content,
|
|
1684
|
+
context.attentionLevel,
|
|
1685
|
+
context.emotionalContext,
|
|
1686
|
+
workingMemory?.cognitiveLoad || 0
|
|
1687
|
+
)) {
|
|
1688
|
+
return; // Skip encoding due to low attention/high cognitive load
|
|
1689
|
+
}
|
|
1690
|
+
|
|
1691
|
+
const memories = await llmExtract(cfg, content);
|
|
1692
|
+
if (!memories.length) return;
|
|
1693
|
+
|
|
1694
|
+
let stored = 0;
|
|
1695
|
+
|
|
1696
|
+
for (const mem of memories) {
|
|
1697
|
+
if (!mem.content || mem.content.length < 10) continue;
|
|
1698
|
+
|
|
1699
|
+
try {
|
|
1700
|
+
// Calculate emotional weighting
|
|
1701
|
+
const memoryType = mem.type || 'fact';
|
|
1702
|
+
const emotionalWeight = EMOTIONAL_WEIGHTS[memoryType] || 1.0;
|
|
1703
|
+
const contextualWeight = context?.emotionalContext === 'excited' ? 1.2 :
|
|
1704
|
+
context?.emotionalContext === 'positive' ? 1.1 :
|
|
1705
|
+
context?.emotionalContext === 'negative' ? 1.15 : 1.0;
|
|
1706
|
+
|
|
1707
|
+
const finalImportance = (mem.importance ?? 5) * emotionalWeight * contextualWeight;
|
|
1708
|
+
|
|
1709
|
+
// Enhanced memory with neuromimetic features
|
|
1710
|
+
const enhancedMem = {
|
|
1711
|
+
content: mem.content,
|
|
1712
|
+
type: memoryType,
|
|
1713
|
+
agent: currentAgent,
|
|
1714
|
+
importance: storeImportance(Math.min(10, finalImportance)),
|
|
1715
|
+
tags: [
|
|
1716
|
+
...(mem.tags || []),
|
|
1717
|
+
'auto-captured',
|
|
1718
|
+
'neuromimetic',
|
|
1719
|
+
...(context?.dominantTopic ? [`topic:${context.dominantTopic}`] : []),
|
|
1720
|
+
`engagement:${context?.userEngagement || 'unknown'}`,
|
|
1721
|
+
`attention:${context?.attentionLevel || 'unknown'}`,
|
|
1722
|
+
`emotion:${context?.emotionalContext || 'neutral'}`
|
|
1723
|
+
],
|
|
1724
|
+
source: 'plugin:neuromimetic-capture-v2.1',
|
|
1725
|
+
conversationContext: {
|
|
1726
|
+
dominantTopic: context?.dominantTopic,
|
|
1727
|
+
engagement: context?.userEngagement,
|
|
1728
|
+
queryCount: context?.memoryQueryCount || 0,
|
|
1729
|
+
attentionLevel: context?.attentionLevel,
|
|
1730
|
+
emotionalContext: context?.emotionalContext
|
|
1731
|
+
},
|
|
1732
|
+
neuromimeticData: {
|
|
1733
|
+
emotionalIntensity: contextualWeight,
|
|
1734
|
+
attentionStrength: context?.attentionLevel === 'flow' ? 1.0 :
|
|
1735
|
+
context?.attentionLevel === 'focused' ? 0.8 : 0.5,
|
|
1736
|
+
consolidationScore: 0.1, // Will increase with dream cycles
|
|
1737
|
+
retrievalCount: 0,
|
|
1738
|
+
lastAccess: Date.now()
|
|
1739
|
+
}
|
|
1740
|
+
};
|
|
1741
|
+
|
|
1742
|
+
// Create episodic memory entry
|
|
1743
|
+
if (context && memoryType === 'event') {
|
|
1744
|
+
const episodic = createEpisodicMemory(currentAgent, mem.content, context);
|
|
1745
|
+
const episodics = episodicMemories.get(currentAgent) || [];
|
|
1746
|
+
episodics.push(episodic);
|
|
1747
|
+
episodicMemories.set(currentAgent, episodics.slice(-100)); // Keep last 100
|
|
1748
|
+
}
|
|
1749
|
+
|
|
1750
|
+
await bbFetch(cfg, `${baseUrl(cfg)}/memories`, {
|
|
1751
|
+
method: 'POST',
|
|
1752
|
+
body: JSON.stringify(enhancedMem),
|
|
1753
|
+
});
|
|
1754
|
+
stored++;
|
|
1755
|
+
|
|
1756
|
+
// Cross-session memory sharing for MemTap agents
|
|
1757
|
+
if (currentAgent.includes('memtap') && mem.type === 'decision') {
|
|
1758
|
+
try {
|
|
1759
|
+
// Share important decisions across MemTap agent ecosystem
|
|
1760
|
+
const crossSessionMem = {
|
|
1761
|
+
...enhancedMem,
|
|
1762
|
+
agent: 'memtap-shared',
|
|
1763
|
+
tags: [...enhancedMem.tags, 'cross-session', `from:${currentAgent}`]
|
|
1764
|
+
};
|
|
1765
|
+
|
|
1766
|
+
await bbFetch(cfg, `${baseUrl(cfg)}/memories`, {
|
|
1767
|
+
method: 'POST',
|
|
1768
|
+
body: JSON.stringify(crossSessionMem)
|
|
1769
|
+
});
|
|
1770
|
+
} catch { /* cross-session sharing failed, not critical */ }
|
|
1771
|
+
}
|
|
1772
|
+
|
|
1773
|
+
} catch { /* skip individual failures */ }
|
|
1774
|
+
}
|
|
1775
|
+
|
|
1776
|
+
if (stored > 0) {
|
|
1777
|
+
// Update user profile with successful capture
|
|
1778
|
+
const profile = getUserProfile(currentAgent);
|
|
1779
|
+
profile.successfulRecalls += stored; // Treat captures as successes
|
|
1780
|
+
userProfiles.set(currentAgent, profile);
|
|
1781
|
+
|
|
1782
|
+
logger.info?.(`[memtap] Auto-captured ${stored} enhanced memories with context`) ??
|
|
1783
|
+
console.log(`[memtap] Auto-captured ${stored} enhanced memories with context`);
|
|
1784
|
+
}
|
|
1785
|
+
|
|
1786
|
+
} catch (err: any) {
|
|
1787
|
+
logger.warn?.(`[memtap] Auto-capture failed: ${err.message}`) ??
|
|
1788
|
+
console.warn(`[memtap] Auto-capture failed: ${err.message}`);
|
|
1789
|
+
}
|
|
1790
|
+
})();
|
|
1791
|
+
},
|
|
1792
|
+
{
|
|
1793
|
+
name: 'memtap.auto-capture-enhanced',
|
|
1794
|
+
description: 'Enhanced LLM-based memory extraction with context awareness and cross-session sharing',
|
|
1795
|
+
}
|
|
1796
|
+
);
|
|
1797
|
+
|
|
1798
|
+
// ── Hook: Bulletin on Bootstrap (agent:bootstrap) ──────────────────────────
|
|
1799
|
+
|
|
1800
|
+
api.registerHook(
|
|
1801
|
+
'agent:bootstrap',
|
|
1802
|
+
async (event: any) => {
|
|
1803
|
+
const cfg = getConfig(api);
|
|
1804
|
+
if (!cfg.bulletinOnBoot) return;
|
|
1805
|
+
|
|
1806
|
+
const topics = cfg.bulletinTopics || ['active projects', 'recent decisions', 'pending tasks'];
|
|
1807
|
+
|
|
1808
|
+
try {
|
|
1809
|
+
const data = await bbFetch(cfg, `${baseUrl(cfg)}/bulletin`, {
|
|
1810
|
+
method: 'POST',
|
|
1811
|
+
body: JSON.stringify({
|
|
1812
|
+
topics,
|
|
1813
|
+
agent: agentId(cfg, api),
|
|
1814
|
+
limit: 3,
|
|
1815
|
+
}),
|
|
1816
|
+
});
|
|
1817
|
+
|
|
1818
|
+
const sections = (data.sections || []).map((s: any) => {
|
|
1819
|
+
const items = (s.memories || []).map((m: any) =>
|
|
1820
|
+
`- [${m.type}] ${m.summary}`
|
|
1821
|
+
).join('\n');
|
|
1822
|
+
const graphItems = (s.graphContext || []).slice(0, 3).map((m: any) =>
|
|
1823
|
+
`- [via graph] [${m.type}] ${m.summary}`
|
|
1824
|
+
).join('\n');
|
|
1825
|
+
return `### ${s.topic}\n${items || '(keine Erinnerungen)'}${graphItems ? '\n' + graphItems : ''}`;
|
|
1826
|
+
}).join('\n\n');
|
|
1827
|
+
|
|
1828
|
+
if (sections) {
|
|
1829
|
+
const bootstrapFiles = event.context?.bootstrapFiles || [];
|
|
1830
|
+
const fileEntry = {
|
|
1831
|
+
name: 'MEMTAP_BULLETIN.md',
|
|
1832
|
+
path: 'MEMTAP_BULLETIN.md',
|
|
1833
|
+
filePath: 'MEMTAP_BULLETIN.md',
|
|
1834
|
+
content: `# MemTap Memory Bulletin\n\n${sections}\n`,
|
|
1835
|
+
missing: false,
|
|
1836
|
+
source: 'memtap-plugin',
|
|
1837
|
+
};
|
|
1838
|
+
bootstrapFiles.push(fileEntry);
|
|
1839
|
+
}
|
|
1840
|
+
} catch (err: any) {
|
|
1841
|
+
logger.warn?.(`[memtap] Bulletin injection failed: ${err.message}`) ??
|
|
1842
|
+
console.warn(`[memtap] Bulletin injection failed: ${err.message}`);
|
|
1843
|
+
}
|
|
1844
|
+
},
|
|
1845
|
+
{
|
|
1846
|
+
name: 'memtap.bulletin-inject',
|
|
1847
|
+
description: 'Inject a memory bulletin into agent bootstrap context',
|
|
1848
|
+
}
|
|
1849
|
+
);
|
|
1850
|
+
|
|
1851
|
+
// ── Hook: Dream-Mode Consolidation & Neural Maintenance (periodic) ────────
|
|
1852
|
+
|
|
1853
|
+
api.registerHook(
|
|
1854
|
+
'periodic',
|
|
1855
|
+
async (event: any) => {
|
|
1856
|
+
const cfg = getConfig(api);
|
|
1857
|
+
|
|
1858
|
+
// Dream-mode consolidation (every ~10 calls, representing sleep cycles)
|
|
1859
|
+
if (Math.random() < 0.1) {
|
|
1860
|
+
await dreamModeConsolidation(cfg);
|
|
1861
|
+
}
|
|
1862
|
+
|
|
1863
|
+
// Neural maintenance and cleanup
|
|
1864
|
+
if (Math.random() < 0.05) {
|
|
1865
|
+
await neuralMaintenance();
|
|
1866
|
+
}
|
|
1867
|
+
},
|
|
1868
|
+
{
|
|
1869
|
+
name: 'memtap.dream-consolidation',
|
|
1870
|
+
description: 'Neuromimetic dream-mode consolidation and neural maintenance',
|
|
1871
|
+
}
|
|
1872
|
+
);
|
|
1873
|
+
|
|
1874
|
+
// ── Neural Monitoring & Analytics Functions ──────────────────────────────────
|
|
1875
|
+
|
|
1876
|
+
function generateNeuralReport(agentFilter?: string): string {
|
|
1877
|
+
const now = Date.now();
|
|
1878
|
+
let report = `# 🧠 MemTap Neural System Report\n\n`;
|
|
1879
|
+
|
|
1880
|
+
// Working Memory Analysis
|
|
1881
|
+
report += `## Working Memory States:\n`;
|
|
1882
|
+
let wmCount = 0;
|
|
1883
|
+
for (const [agentId, wm] of workingMemoryState.entries()) {
|
|
1884
|
+
if (agentFilter && agentId !== agentFilter) continue;
|
|
1885
|
+
wmCount++;
|
|
1886
|
+
|
|
1887
|
+
const age = Math.round((now - wm.lastUpdate) / (1000 * 60));
|
|
1888
|
+
const loadStatus = wm.cognitiveLoad > 0.8 ? '🔴 HIGH' :
|
|
1889
|
+
wm.cognitiveLoad > 0.5 ? '🟡 MEDIUM' : '🟢 LOW';
|
|
1890
|
+
|
|
1891
|
+
report += ` **${agentId}:**\n`;
|
|
1892
|
+
report += ` Focus: [${wm.currentFocus.join(', ')}]\n`;
|
|
1893
|
+
report += ` Spotlight: "${wm.attentionSpotlight}"\n`;
|
|
1894
|
+
report += ` Load: ${Math.round(wm.cognitiveLoad * 100)}% ${loadStatus}\n`;
|
|
1895
|
+
report += ` Active Memories: ${wm.activeMemories.length}\n`;
|
|
1896
|
+
report += ` Last Update: ${age}min ago\n\n`;
|
|
1897
|
+
}
|
|
1898
|
+
if (wmCount === 0) report += ` No active working memory states\n\n`;
|
|
1899
|
+
|
|
1900
|
+
// Conversation Context Analysis
|
|
1901
|
+
report += `## Conversation Contexts:\n`;
|
|
1902
|
+
let ctxCount = 0;
|
|
1903
|
+
for (const [agentId, ctx] of conversationState.entries()) {
|
|
1904
|
+
if (agentFilter && agentId !== agentFilter) continue;
|
|
1905
|
+
ctxCount++;
|
|
1906
|
+
|
|
1907
|
+
const lastAccess = ctx.lastMemoryAccess ?
|
|
1908
|
+
Math.round((now - ctx.lastMemoryAccess) / (1000 * 60)) + 'min ago' :
|
|
1909
|
+
'never';
|
|
1910
|
+
|
|
1911
|
+
report += ` **${agentId}:**\n`;
|
|
1912
|
+
report += ` Topics: [${ctx.recentTopics.join(', ')}]\n`;
|
|
1913
|
+
report += ` Dominant: "${ctx.dominantTopic || 'none'}"\n`;
|
|
1914
|
+
report += ` Attention: ${ctx.attentionLevel} | Emotion: ${ctx.emotionalContext}\n`;
|
|
1915
|
+
report += ` Engagement: ${ctx.userEngagement} | Queries: ${ctx.memoryQueryCount}\n`;
|
|
1916
|
+
report += ` Last Memory Access: ${lastAccess}\n\n`;
|
|
1917
|
+
}
|
|
1918
|
+
if (ctxCount === 0) report += ` No active conversation contexts\n\n`;
|
|
1919
|
+
|
|
1920
|
+
// Episodic Memory Summary
|
|
1921
|
+
report += `## Episodic Memories:\n`;
|
|
1922
|
+
let episodicCount = 0;
|
|
1923
|
+
for (const [agentId, episodes] of episodicMemories.entries()) {
|
|
1924
|
+
if (agentFilter && agentId !== agentFilter) continue;
|
|
1925
|
+
episodicCount += episodes.length;
|
|
1926
|
+
|
|
1927
|
+
const avgEmotion = episodes.length > 0 ?
|
|
1928
|
+
episodes.reduce((sum, e) => sum + e.emotionalIntensity, 0) / episodes.length : 0;
|
|
1929
|
+
const avgConsolidation = episodes.length > 0 ?
|
|
1930
|
+
episodes.reduce((sum, e) => sum + e.consolidationScore, 0) / episodes.length : 0;
|
|
1931
|
+
|
|
1932
|
+
report += ` **${agentId}:** ${episodes.length} episodes\n`;
|
|
1933
|
+
report += ` Avg Emotional Intensity: ${Math.round(avgEmotion * 100)}%\n`;
|
|
1934
|
+
report += ` Avg Consolidation: ${Math.round(avgConsolidation * 100)}%\n`;
|
|
1935
|
+
}
|
|
1936
|
+
report += ` Total Episodic Memories: ${episodicCount}\n\n`;
|
|
1937
|
+
|
|
1938
|
+
// Memory Chunks Analysis
|
|
1939
|
+
report += `## Memory Chunks:\n`;
|
|
1940
|
+
let chunkCount = 0;
|
|
1941
|
+
for (const [agentId, chunks] of memoryChunks.entries()) {
|
|
1942
|
+
if (agentFilter && agentId !== agentFilter) continue;
|
|
1943
|
+
chunkCount += chunks.length;
|
|
1944
|
+
|
|
1945
|
+
const strongChunks = chunks.filter(c => c.strength > 0.8).length;
|
|
1946
|
+
const concepts = chunks.map(c => c.abstractConcept).slice(0, 3).join(', ');
|
|
1947
|
+
|
|
1948
|
+
report += ` **${agentId}:** ${chunks.length} chunks (${strongChunks} strong)\n`;
|
|
1949
|
+
report += ` Top Concepts: ${concepts}\n`;
|
|
1950
|
+
}
|
|
1951
|
+
report += ` Total Memory Chunks: ${chunkCount}\n\n`;
|
|
1952
|
+
|
|
1953
|
+
return report;
|
|
1954
|
+
}
|
|
1955
|
+
|
|
1956
|
+
function generatePerformanceReport(agentFilter?: string): string {
|
|
1957
|
+
let report = `# ⚡ MemTap Performance Report\n\n`;
|
|
1958
|
+
|
|
1959
|
+
// Cache Performance
|
|
1960
|
+
report += `## Cache Performance:\n`;
|
|
1961
|
+
let totalCacheEntries = 0;
|
|
1962
|
+
let totalRetrievals = 0;
|
|
1963
|
+
let hitRate = 0;
|
|
1964
|
+
|
|
1965
|
+
for (const [key, cached] of memoryCache.entries()) {
|
|
1966
|
+
if (agentFilter && !key.startsWith(agentFilter + ':')) continue;
|
|
1967
|
+
totalCacheEntries++;
|
|
1968
|
+
totalRetrievals += cached.retrievalCount || 0;
|
|
1969
|
+
}
|
|
1970
|
+
|
|
1971
|
+
if (totalCacheEntries > 0) {
|
|
1972
|
+
hitRate = totalRetrievals / totalCacheEntries;
|
|
1973
|
+
}
|
|
1974
|
+
|
|
1975
|
+
report += ` Cache Entries: ${totalCacheEntries}\n`;
|
|
1976
|
+
report += ` Total Retrievals: ${totalRetrievals}\n`;
|
|
1977
|
+
report += ` Avg Hit Rate: ${Math.round(hitRate * 100)}%\n`;
|
|
1978
|
+
report += ` Cache Efficiency: ${hitRate > 1.5 ? '🟢 EXCELLENT' : hitRate > 0.8 ? '🟡 GOOD' : '🔴 POOR'}\n\n`;
|
|
1979
|
+
|
|
1980
|
+
// User Profile Performance
|
|
1981
|
+
report += `## User Profiles:\n`;
|
|
1982
|
+
let totalProfiles = 0;
|
|
1983
|
+
let totalQueries = 0;
|
|
1984
|
+
let totalSuccessful = 0;
|
|
1985
|
+
|
|
1986
|
+
for (const [agentId, profile] of userProfiles.entries()) {
|
|
1987
|
+
if (agentFilter && agentId !== agentFilter) continue;
|
|
1988
|
+
totalProfiles++;
|
|
1989
|
+
totalQueries += profile.totalQueries;
|
|
1990
|
+
totalSuccessful += profile.successfulRecalls;
|
|
1991
|
+
|
|
1992
|
+
if (!agentFilter) {
|
|
1993
|
+
const successRate = profile.totalQueries > 0 ?
|
|
1994
|
+
Math.round((profile.successfulRecalls / profile.totalQueries) * 100) : 0;
|
|
1995
|
+
|
|
1996
|
+
report += ` **${agentId}:**\n`;
|
|
1997
|
+
report += ` Queries: ${profile.totalQueries} | Success: ${successRate}%\n`;
|
|
1998
|
+
report += ` Sensitivity: ${profile.recallSensitivity} | Complexity: ${profile.averageQueryComplexity.toFixed(1)}\n`;
|
|
1999
|
+
report += ` Sleep Cycles: ${profile.sleepCycles} | Last Active: ${new Date(profile.lastActive).toLocaleString()}\n\n`;
|
|
2000
|
+
}
|
|
2001
|
+
}
|
|
2002
|
+
|
|
2003
|
+
const overallSuccessRate = totalQueries > 0 ?
|
|
2004
|
+
Math.round((totalSuccessful / totalQueries) * 100) : 0;
|
|
2005
|
+
|
|
2006
|
+
report += `## Overall Performance:\n`;
|
|
2007
|
+
report += ` Active Profiles: ${totalProfiles}\n`;
|
|
2008
|
+
report += ` Total Queries: ${totalQueries}\n`;
|
|
2009
|
+
report += ` Success Rate: ${overallSuccessRate}%\n`;
|
|
2010
|
+
report += ` System Health: ${overallSuccessRate > 80 ? '🟢 EXCELLENT' : overallSuccessRate > 60 ? '🟡 GOOD' : '🔴 NEEDS ATTENTION'}\n\n`;
|
|
2011
|
+
|
|
2012
|
+
return report;
|
|
2013
|
+
}
|
|
2014
|
+
|
|
2015
|
+
function generateAnalyticsReport(agentFilter?: string, timeRange: string = '24h'): string {
|
|
2016
|
+
const now = Date.now();
|
|
2017
|
+
const ranges = {
|
|
2018
|
+
'1h': 60 * 60 * 1000,
|
|
2019
|
+
'24h': 24 * 60 * 60 * 1000,
|
|
2020
|
+
'7d': 7 * 24 * 60 * 60 * 1000
|
|
2021
|
+
};
|
|
2022
|
+
const cutoff = now - (ranges[timeRange as keyof typeof ranges] || ranges['24h']);
|
|
2023
|
+
|
|
2024
|
+
let report = `# 📊 MemTap Analytics Report (${timeRange})\n\n`;
|
|
2025
|
+
|
|
2026
|
+
// Attention Patterns Analysis
|
|
2027
|
+
const recentAttention = attentionHistory.filter(a =>
|
|
2028
|
+
a.timestamp > cutoff && (!agentFilter || a.agent === agentFilter)
|
|
2029
|
+
);
|
|
2030
|
+
|
|
2031
|
+
if (recentAttention.length > 0) {
|
|
2032
|
+
const attentionCounts = recentAttention.reduce((acc, a) => {
|
|
2033
|
+
acc[a.level] = (acc[a.level] || 0) + 1;
|
|
2034
|
+
return acc;
|
|
2035
|
+
}, {} as Record<string, number>);
|
|
2036
|
+
|
|
2037
|
+
report += `## Attention Patterns (${recentAttention.length} events):\n`;
|
|
2038
|
+
for (const [level, count] of Object.entries(attentionCounts)) {
|
|
2039
|
+
const pct = Math.round((count / recentAttention.length) * 100);
|
|
2040
|
+
report += ` ${level}: ${count} events (${pct}%)\n`;
|
|
2041
|
+
}
|
|
2042
|
+
report += '\n';
|
|
2043
|
+
}
|
|
2044
|
+
|
|
2045
|
+
// Engagement Analysis
|
|
2046
|
+
report += `## User Engagement:\n`;
|
|
2047
|
+
for (const [agentId, ctx] of conversationState.entries()) {
|
|
2048
|
+
if (agentFilter && agentId !== agentFilter) continue;
|
|
2049
|
+
|
|
2050
|
+
const profile = userProfiles.get(agentId);
|
|
2051
|
+
if (!profile || profile.lastActive < cutoff) continue;
|
|
2052
|
+
|
|
2053
|
+
report += ` **${agentId}:**\n`;
|
|
2054
|
+
report += ` Current Engagement: ${ctx.userEngagement}\n`;
|
|
2055
|
+
report += ` Memory Queries: ${ctx.memoryQueryCount} in session\n`;
|
|
2056
|
+
report += ` Dominant Topic: ${ctx.dominantTopic || 'none'}\n`;
|
|
2057
|
+
report += ` Emotional Context: ${ctx.emotionalContext}\n\n`;
|
|
2058
|
+
}
|
|
2059
|
+
|
|
2060
|
+
// Memory Operation Trends
|
|
2061
|
+
const recentCacheOps = Array.from(memoryCache.entries()).filter(([key, cached]) =>
|
|
2062
|
+
cached.timestamp > cutoff && (!agentFilter || key.startsWith(agentFilter + ':'))
|
|
2063
|
+
);
|
|
2064
|
+
|
|
2065
|
+
report += `## Memory Operations:\n`;
|
|
2066
|
+
report += ` Cache Operations: ${recentCacheOps.length}\n`;
|
|
2067
|
+
|
|
2068
|
+
if (recentCacheOps.length > 0) {
|
|
2069
|
+
const avgRetrievals = recentCacheOps.reduce((sum, [, cached]) =>
|
|
2070
|
+
sum + (cached.retrievalCount || 0), 0) / recentCacheOps.length;
|
|
2071
|
+
report += ` Avg Retrievals per Entry: ${avgRetrievals.toFixed(1)}\n`;
|
|
2072
|
+
}
|
|
2073
|
+
|
|
2074
|
+
// Query Complexity Distribution
|
|
2075
|
+
const complexities = Array.from(userProfiles.values())
|
|
2076
|
+
.filter(p => !agentFilter || agentFilter === 'all' || p.lastActive > cutoff)
|
|
2077
|
+
.map(p => p.averageQueryComplexity);
|
|
2078
|
+
|
|
2079
|
+
if (complexities.length > 0) {
|
|
2080
|
+
const avgComplexity = complexities.reduce((sum, c) => sum + c, 0) / complexities.length;
|
|
2081
|
+
report += ` Avg Query Complexity: ${avgComplexity.toFixed(2)}\n`;
|
|
2082
|
+
}
|
|
2083
|
+
|
|
2084
|
+
return report;
|
|
2085
|
+
}
|
|
2086
|
+
|
|
2087
|
+
function generateLiveMonitoring(agentFilter?: string, duration: number = 30): string {
|
|
2088
|
+
let report = `# 🔴 LIVE Neural Monitoring (${duration}s)\n\n`;
|
|
2089
|
+
|
|
2090
|
+
report += `## Current System State:\n`;
|
|
2091
|
+
report += `- Working Memory States: ${workingMemoryState.size}\n`;
|
|
2092
|
+
report += `- Active Conversations: ${conversationState.size}\n`;
|
|
2093
|
+
report += `- Cache Entries: ${memoryCache.size}\n`;
|
|
2094
|
+
report += `- User Profiles: ${userProfiles.size}\n`;
|
|
2095
|
+
report += `- Memory Chunks: ${Array.from(memoryChunks.values()).reduce((sum, chunks) => sum + chunks.length, 0)}\n`;
|
|
2096
|
+
report += `- Episodic Memories: ${Array.from(episodicMemories.values()).reduce((sum, eps) => sum + eps.length, 0)}\n\n`;
|
|
2097
|
+
|
|
2098
|
+
if (agentFilter) {
|
|
2099
|
+
const wm = workingMemoryState.get(agentFilter);
|
|
2100
|
+
const ctx = conversationState.get(agentFilter);
|
|
2101
|
+
|
|
2102
|
+
if (wm) {
|
|
2103
|
+
report += `## Live Working Memory (${agentFilter}):\n`;
|
|
2104
|
+
report += `- Focus: [${wm.currentFocus.join(', ')}]\n`;
|
|
2105
|
+
report += `- Cognitive Load: ${Math.round(wm.cognitiveLoad * 100)}%\n`;
|
|
2106
|
+
report += `- Active Memories: ${wm.activeMemories.length}\n\n`;
|
|
2107
|
+
}
|
|
2108
|
+
|
|
2109
|
+
if (ctx) {
|
|
2110
|
+
report += `## Live Context (${agentFilter}):\n`;
|
|
2111
|
+
report += `- Attention: ${ctx.attentionLevel}\n`;
|
|
2112
|
+
report += `- Emotion: ${ctx.emotionalContext}\n`;
|
|
2113
|
+
report += `- Engagement: ${ctx.userEngagement}\n`;
|
|
2114
|
+
report += `- Recent Topics: [${ctx.recentTopics.join(', ')}]\n\n`;
|
|
2115
|
+
}
|
|
2116
|
+
}
|
|
2117
|
+
|
|
2118
|
+
report += `*Monitoring started. Use memtap_monitor again to refresh.*\n`;
|
|
2119
|
+
|
|
2120
|
+
return report;
|
|
2121
|
+
}
|
|
2122
|
+
|
|
2123
|
+
function generateWorkingMemoryReport(agentFilter?: string): string {
|
|
2124
|
+
let report = `# 🧠 Working Memory Analysis\n\n`;
|
|
2125
|
+
|
|
2126
|
+
for (const [agentId, wm] of workingMemoryState.entries()) {
|
|
2127
|
+
if (agentFilter && agentId !== agentFilter) continue;
|
|
2128
|
+
|
|
2129
|
+
const age = Math.round((Date.now() - wm.lastUpdate) / 1000);
|
|
2130
|
+
|
|
2131
|
+
report += `## Agent: ${agentId}\n`;
|
|
2132
|
+
report += `**Current Focus:** [${wm.currentFocus.join(', ')}]\n`;
|
|
2133
|
+
report += `**Attention Spotlight:** "${wm.attentionSpotlight}"\n`;
|
|
2134
|
+
report += `**Cognitive Load:** ${Math.round(wm.cognitiveLoad * 100)}% ${'▓'.repeat(Math.round(wm.cognitiveLoad * 10))}${'░'.repeat(10 - Math.round(wm.cognitiveLoad * 10))}\n`;
|
|
2135
|
+
report += `**Active Memories:** ${wm.activeMemories.length}\n`;
|
|
2136
|
+
report += `**Last Update:** ${age}s ago\n\n`;
|
|
2137
|
+
|
|
2138
|
+
if (wm.activeMemories.length > 0) {
|
|
2139
|
+
report += `**Preloaded Memories:**\n`;
|
|
2140
|
+
wm.activeMemories.slice(0, 3).forEach((mem, i) => {
|
|
2141
|
+
report += ` ${i + 1}. [${mem.type}] ${mem.content?.substring(0, 60)}...\n`;
|
|
2142
|
+
});
|
|
2143
|
+
report += '\n';
|
|
2144
|
+
}
|
|
2145
|
+
}
|
|
2146
|
+
|
|
2147
|
+
if (workingMemoryState.size === 0) {
|
|
2148
|
+
report += `No active working memory states found.\n`;
|
|
2149
|
+
}
|
|
2150
|
+
|
|
2151
|
+
return report;
|
|
2152
|
+
}
|
|
2153
|
+
|
|
2154
|
+
function generateAttentionReport(agentFilter?: string): string {
|
|
2155
|
+
const recentAttention = attentionHistory
|
|
2156
|
+
.filter(a => (!agentFilter || a.agent === agentFilter))
|
|
2157
|
+
.slice(-20);
|
|
2158
|
+
|
|
2159
|
+
let report = `# 👁️ Attention Pattern Analysis\n\n`;
|
|
2160
|
+
|
|
2161
|
+
if (recentAttention.length === 0) {
|
|
2162
|
+
return report + `No attention data available.\n`;
|
|
2163
|
+
}
|
|
2164
|
+
|
|
2165
|
+
// Attention distribution
|
|
2166
|
+
const attentionCounts = recentAttention.reduce((acc, a) => {
|
|
2167
|
+
acc[a.level] = (acc[a.level] || 0) + 1;
|
|
2168
|
+
return acc;
|
|
2169
|
+
}, {} as Record<string, number>);
|
|
2170
|
+
|
|
2171
|
+
report += `## Recent Attention Distribution (${recentAttention.length} events):\n`;
|
|
2172
|
+
for (const [level, count] of Object.entries(attentionCounts)) {
|
|
2173
|
+
const pct = Math.round((count / recentAttention.length) * 100);
|
|
2174
|
+
const bar = '█'.repeat(Math.round(pct / 5));
|
|
2175
|
+
report += `**${level}:** ${count} (${pct}%) ${bar}\n`;
|
|
2176
|
+
}
|
|
2177
|
+
report += '\n';
|
|
2178
|
+
|
|
2179
|
+
// Recent events
|
|
2180
|
+
report += `## Recent Attention Events:\n`;
|
|
2181
|
+
recentAttention.slice(-10).reverse().forEach((event, i) => {
|
|
2182
|
+
const timeAgo = Math.round((Date.now() - event.timestamp) / 1000);
|
|
2183
|
+
report += `${i + 1}. **${event.level}** - "${event.trigger}" (${timeAgo}s ago)\n`;
|
|
2184
|
+
});
|
|
2185
|
+
|
|
2186
|
+
return report;
|
|
2187
|
+
}
|
|
2188
|
+
|
|
2189
|
+
function generateCacheReport(agentFilter?: string): string {
|
|
2190
|
+
let report = `# 💾 Memory Cache Analysis\n\n`;
|
|
2191
|
+
|
|
2192
|
+
const relevantCache = Array.from(memoryCache.entries()).filter(([key]) =>
|
|
2193
|
+
!agentFilter || key.startsWith(agentFilter + ':')
|
|
2194
|
+
);
|
|
2195
|
+
|
|
2196
|
+
if (relevantCache.length === 0) {
|
|
2197
|
+
return report + `No cache entries found.\n`;
|
|
2198
|
+
}
|
|
2199
|
+
|
|
2200
|
+
// Cache statistics
|
|
2201
|
+
const totalRetrievals = relevantCache.reduce((sum, [, cached]) => sum + (cached.retrievalCount || 0), 0);
|
|
2202
|
+
const avgAge = relevantCache.reduce((sum, [, cached]) => sum + (Date.now() - cached.timestamp), 0) / relevantCache.length / 1000;
|
|
2203
|
+
const avgRetrievals = totalRetrievals / relevantCache.length;
|
|
2204
|
+
|
|
2205
|
+
report += `## Cache Statistics:\n`;
|
|
2206
|
+
report += `**Total Entries:** ${relevantCache.length}\n`;
|
|
2207
|
+
report += `**Total Retrievals:** ${totalRetrievals}\n`;
|
|
2208
|
+
report += `**Avg Retrievals/Entry:** ${avgRetrievals.toFixed(2)}\n`;
|
|
2209
|
+
report += `**Avg Age:** ${Math.round(avgAge)}s\n`;
|
|
2210
|
+
report += `**Hit Rate:** ${avgRetrievals > 1 ? '🟢 Good' : '🟡 Low'}\n\n`;
|
|
2211
|
+
|
|
2212
|
+
// Top cached queries
|
|
2213
|
+
const topCached = relevantCache
|
|
2214
|
+
.sort(([, a], [, b]) => (b.retrievalCount || 0) - (a.retrievalCount || 0))
|
|
2215
|
+
.slice(0, 5);
|
|
2216
|
+
|
|
2217
|
+
report += `## Most Accessed Cache Entries:\n`;
|
|
2218
|
+
topCached.forEach(([key, cached], i) => {
|
|
2219
|
+
const query = cached.query.substring(0, 50);
|
|
2220
|
+
const age = Math.round((Date.now() - cached.timestamp) / 1000);
|
|
2221
|
+
report += `${i + 1}. "${query}..." (${cached.retrievalCount || 0} hits, ${age}s old)\n`;
|
|
2222
|
+
});
|
|
2223
|
+
|
|
2224
|
+
return report;
|
|
2225
|
+
}
|
|
2226
|
+
|
|
2227
|
+
function generateConsolidationReport(agentFilter?: string): string {
|
|
2228
|
+
let report = `# 🌙 Dream-Mode Consolidation Report\n\n`;
|
|
2229
|
+
|
|
2230
|
+
// Memory chunks analysis
|
|
2231
|
+
const relevantChunks = Array.from(memoryChunks.entries()).filter(([agentId]) =>
|
|
2232
|
+
!agentFilter || agentId === agentFilter
|
|
2233
|
+
);
|
|
2234
|
+
|
|
2235
|
+
if (relevantChunks.length === 0) {
|
|
2236
|
+
return report + `No consolidation data available.\n`;
|
|
2237
|
+
}
|
|
2238
|
+
|
|
2239
|
+
const allChunks = relevantChunks.flatMap(([, chunks]) => chunks);
|
|
2240
|
+
const strongChunks = allChunks.filter(c => c.strength > 0.8);
|
|
2241
|
+
const avgStrength = allChunks.reduce((sum, c) => sum + c.strength, 0) / allChunks.length;
|
|
2242
|
+
|
|
2243
|
+
report += `## Consolidation Statistics:\n`;
|
|
2244
|
+
report += `**Total Chunks:** ${allChunks.length}\n`;
|
|
2245
|
+
report += `**Strong Chunks:** ${strongChunks.length} (${Math.round(strongChunks.length / allChunks.length * 100)}%)\n`;
|
|
2246
|
+
report += `**Avg Strength:** ${avgStrength.toFixed(2)}\n`;
|
|
2247
|
+
report += `**Consolidation Health:** ${avgStrength > 0.7 ? '🟢 Excellent' : avgStrength > 0.5 ? '🟡 Good' : '🔴 Poor'}\n\n`;
|
|
2248
|
+
|
|
2249
|
+
// Top consolidated concepts
|
|
2250
|
+
report += `## Top Consolidated Concepts:\n`;
|
|
2251
|
+
strongChunks.slice(0, 5).forEach((chunk, i) => {
|
|
2252
|
+
const age = Math.round((Date.now() - chunk.lastActivation) / (1000 * 60));
|
|
2253
|
+
report += `${i + 1}. **${chunk.abstractConcept}** (strength: ${chunk.strength.toFixed(2)}, ${chunk.relatedMemories.length} memories, ${age}min ago)\n`;
|
|
2254
|
+
});
|
|
2255
|
+
|
|
2256
|
+
// Episodic consolidation
|
|
2257
|
+
const episodicData = Array.from(episodicMemories.entries()).filter(([agentId]) =>
|
|
2258
|
+
!agentFilter || agentId === agentFilter
|
|
2259
|
+
);
|
|
2260
|
+
|
|
2261
|
+
if (episodicData.length > 0) {
|
|
2262
|
+
const allEpisodics = episodicData.flatMap(([, eps]) => eps);
|
|
2263
|
+
const avgConsolidation = allEpisodics.reduce((sum, e) => sum + e.consolidationScore, 0) / allEpisodics.length;
|
|
2264
|
+
|
|
2265
|
+
report += `\n## Episodic Memory Consolidation:\n`;
|
|
2266
|
+
report += `**Total Episodes:** ${allEpisodics.length}\n`;
|
|
2267
|
+
report += `**Avg Consolidation Score:** ${Math.round(avgConsolidation * 100)}%\n`;
|
|
2268
|
+
}
|
|
2269
|
+
|
|
2270
|
+
return report;
|
|
2271
|
+
}
|
|
2272
|
+
|
|
2273
|
+
// ── Alert System & Dashboard Functions ───────────────────────────────────────
|
|
2274
|
+
|
|
2275
|
+
interface Alert {
|
|
2276
|
+
id: string;
|
|
2277
|
+
severity: 'info' | 'warning' | 'critical';
|
|
2278
|
+
message: string;
|
|
2279
|
+
timestamp: number;
|
|
2280
|
+
agent?: string;
|
|
2281
|
+
metric?: string;
|
|
2282
|
+
value?: number;
|
|
2283
|
+
threshold?: number;
|
|
2284
|
+
}
|
|
2285
|
+
|
|
2286
|
+
const activeAlerts: Alert[] = [];
|
|
2287
|
+
|
|
2288
|
+
function performAnomalyDetection(minSeverity: string = 'warning'): string {
|
|
2289
|
+
const now = Date.now();
|
|
2290
|
+
let report = `# 🚨 Neural Anomaly Detection\n\n`;
|
|
2291
|
+
let alertCount = 0;
|
|
2292
|
+
|
|
2293
|
+
// Check working memory overload
|
|
2294
|
+
for (const [agentId, wm] of workingMemoryState.entries()) {
|
|
2295
|
+
if (wm.cognitiveLoad > 0.9) {
|
|
2296
|
+
const alert: Alert = {
|
|
2297
|
+
id: `cognitive_overload_${agentId}`,
|
|
2298
|
+
severity: 'warning',
|
|
2299
|
+
message: `High cognitive load detected for ${agentId}`,
|
|
2300
|
+
timestamp: now,
|
|
2301
|
+
agent: agentId,
|
|
2302
|
+
metric: 'cognitive_load',
|
|
2303
|
+
value: wm.cognitiveLoad,
|
|
2304
|
+
threshold: 0.9
|
|
2305
|
+
};
|
|
2306
|
+
addAlert(alert);
|
|
2307
|
+
alertCount++;
|
|
2308
|
+
}
|
|
2309
|
+
}
|
|
2310
|
+
|
|
2311
|
+
// Check for memory cache bloat
|
|
2312
|
+
if (memoryCache.size > 80) {
|
|
2313
|
+
const alert: Alert = {
|
|
2314
|
+
id: 'cache_bloat',
|
|
2315
|
+
severity: 'warning',
|
|
2316
|
+
message: 'Memory cache size approaching limit',
|
|
2317
|
+
timestamp: now,
|
|
2318
|
+
metric: 'cache_size',
|
|
2319
|
+
value: memoryCache.size,
|
|
2320
|
+
threshold: 80
|
|
2321
|
+
};
|
|
2322
|
+
addAlert(alert);
|
|
2323
|
+
alertCount++;
|
|
2324
|
+
}
|
|
2325
|
+
|
|
2326
|
+
// Check for inactive working memory states (memory leak detection)
|
|
2327
|
+
const oneHourAgo = now - 60 * 60 * 1000;
|
|
2328
|
+
for (const [agentId, wm] of workingMemoryState.entries()) {
|
|
2329
|
+
if (wm.lastUpdate < oneHourAgo) {
|
|
2330
|
+
const alert: Alert = {
|
|
2331
|
+
id: `stale_wm_${agentId}`,
|
|
2332
|
+
severity: 'info',
|
|
2333
|
+
message: `Stale working memory state for ${agentId}`,
|
|
2334
|
+
timestamp: now,
|
|
2335
|
+
agent: agentId,
|
|
2336
|
+
metric: 'last_update',
|
|
2337
|
+
value: (now - wm.lastUpdate) / (1000 * 60), // minutes
|
|
2338
|
+
threshold: 60
|
|
2339
|
+
};
|
|
2340
|
+
addAlert(alert);
|
|
2341
|
+
alertCount++;
|
|
2342
|
+
}
|
|
2343
|
+
}
|
|
2344
|
+
|
|
2345
|
+
// Check attention pattern anomalies
|
|
2346
|
+
const recentAttention = attentionHistory.filter(a => a.timestamp > now - 24 * 60 * 60 * 1000);
|
|
2347
|
+
if (recentAttention.length > 0) {
|
|
2348
|
+
const distractedCount = recentAttention.filter(a => a.level === 'distracted').length;
|
|
2349
|
+
const distractedRatio = distractedCount / recentAttention.length;
|
|
2350
|
+
|
|
2351
|
+
if (distractedRatio > 0.7) {
|
|
2352
|
+
const alert: Alert = {
|
|
2353
|
+
id: 'attention_degradation',
|
|
2354
|
+
severity: 'warning',
|
|
2355
|
+
message: 'High distraction rate detected across system',
|
|
2356
|
+
timestamp: now,
|
|
2357
|
+
metric: 'distracted_ratio',
|
|
2358
|
+
value: distractedRatio,
|
|
2359
|
+
threshold: 0.7
|
|
2360
|
+
};
|
|
2361
|
+
addAlert(alert);
|
|
2362
|
+
alertCount++;
|
|
2363
|
+
}
|
|
2364
|
+
}
|
|
2365
|
+
|
|
2366
|
+
// Check user profile health
|
|
2367
|
+
for (const [agentId, profile] of userProfiles.entries()) {
|
|
2368
|
+
if (profile.totalQueries > 10) {
|
|
2369
|
+
const successRate = profile.successfulRecalls / profile.totalQueries;
|
|
2370
|
+
if (successRate < 0.6) {
|
|
2371
|
+
const alert: Alert = {
|
|
2372
|
+
id: `low_success_rate_${agentId}`,
|
|
2373
|
+
severity: 'warning',
|
|
2374
|
+
message: `Low memory recall success rate for ${agentId}`,
|
|
2375
|
+
timestamp: now,
|
|
2376
|
+
agent: agentId,
|
|
2377
|
+
metric: 'success_rate',
|
|
2378
|
+
value: successRate,
|
|
2379
|
+
threshold: 0.6
|
|
2380
|
+
};
|
|
2381
|
+
addAlert(alert);
|
|
2382
|
+
alertCount++;
|
|
2383
|
+
}
|
|
2384
|
+
}
|
|
2385
|
+
}
|
|
2386
|
+
|
|
2387
|
+
// Filter alerts by severity
|
|
2388
|
+
const filteredAlerts = activeAlerts.filter(a => {
|
|
2389
|
+
const levels = { info: 0, warning: 1, critical: 2 };
|
|
2390
|
+
return levels[a.severity] >= levels[minSeverity as keyof typeof levels];
|
|
2391
|
+
});
|
|
2392
|
+
|
|
2393
|
+
report += `**Anomaly Detection Complete**\n`;
|
|
2394
|
+
report += `- Total Checks: 5\n`;
|
|
2395
|
+
report += `- New Alerts: ${alertCount}\n`;
|
|
2396
|
+
report += `- Active Alerts (${minSeverity}+): ${filteredAlerts.length}\n\n`;
|
|
2397
|
+
|
|
2398
|
+
if (filteredAlerts.length > 0) {
|
|
2399
|
+
report += `## Active Alerts:\n`;
|
|
2400
|
+
filteredAlerts.slice(-10).forEach((alert, i) => {
|
|
2401
|
+
const age = Math.round((now - alert.timestamp) / (1000 * 60));
|
|
2402
|
+
const icon = alert.severity === 'critical' ? '🔴' :
|
|
2403
|
+
alert.severity === 'warning' ? '🟡' : '🔵';
|
|
2404
|
+
|
|
2405
|
+
report += `${i + 1}. ${icon} **${alert.severity.toUpperCase()}**: ${alert.message}\n`;
|
|
2406
|
+
if (alert.value && alert.threshold) {
|
|
2407
|
+
report += ` Value: ${alert.value.toFixed(2)} | Threshold: ${alert.threshold} | ${age}min ago\n`;
|
|
2408
|
+
}
|
|
2409
|
+
report += '\n';
|
|
2410
|
+
});
|
|
2411
|
+
} else {
|
|
2412
|
+
report += `✅ **System Status: HEALTHY**\nNo anomalies detected at ${minSeverity} level or above.\n`;
|
|
2413
|
+
}
|
|
2414
|
+
|
|
2415
|
+
return report;
|
|
2416
|
+
}
|
|
2417
|
+
|
|
2418
|
+
function addAlert(alert: Alert) {
|
|
2419
|
+
// Remove existing alert with same ID
|
|
2420
|
+
const existingIndex = activeAlerts.findIndex(a => a.id === alert.id);
|
|
2421
|
+
if (existingIndex >= 0) {
|
|
2422
|
+
activeAlerts[existingIndex] = alert;
|
|
2423
|
+
} else {
|
|
2424
|
+
activeAlerts.push(alert);
|
|
2425
|
+
}
|
|
2426
|
+
|
|
2427
|
+
// Limit alert history
|
|
2428
|
+
if (activeAlerts.length > 100) {
|
|
2429
|
+
activeAlerts.splice(0, 50); // Keep most recent 50
|
|
2430
|
+
}
|
|
2431
|
+
}
|
|
2432
|
+
|
|
2433
|
+
function listActiveAlerts(minSeverity: string = 'warning'): string {
|
|
2434
|
+
const levels = { info: 0, warning: 1, critical: 2 };
|
|
2435
|
+
const filtered = activeAlerts.filter(a =>
|
|
2436
|
+
levels[a.severity] >= levels[minSeverity as keyof typeof levels]
|
|
2437
|
+
);
|
|
2438
|
+
|
|
2439
|
+
if (filtered.length === 0) {
|
|
2440
|
+
return `No active alerts at ${minSeverity} level or above.`;
|
|
2441
|
+
}
|
|
2442
|
+
|
|
2443
|
+
let report = `# 🚨 Active Alerts (${filtered.length})\n\n`;
|
|
2444
|
+
|
|
2445
|
+
const now = Date.now();
|
|
2446
|
+
filtered.slice(-20).reverse().forEach((alert, i) => {
|
|
2447
|
+
const age = Math.round((now - alert.timestamp) / (1000 * 60));
|
|
2448
|
+
const icon = alert.severity === 'critical' ? '🔴' :
|
|
2449
|
+
alert.severity === 'warning' ? '🟡' : '🔵';
|
|
2450
|
+
|
|
2451
|
+
report += `## ${i + 1}. ${icon} ${alert.severity.toUpperCase()}\n`;
|
|
2452
|
+
report += `**Message:** ${alert.message}\n`;
|
|
2453
|
+
report += `**Time:** ${age} minutes ago\n`;
|
|
2454
|
+
if (alert.agent) report += `**Agent:** ${alert.agent}\n`;
|
|
2455
|
+
if (alert.value && alert.threshold) {
|
|
2456
|
+
report += `**Value:** ${alert.value.toFixed(2)} (threshold: ${alert.threshold})\n`;
|
|
2457
|
+
}
|
|
2458
|
+
report += '\n';
|
|
2459
|
+
});
|
|
2460
|
+
|
|
2461
|
+
return report;
|
|
2462
|
+
}
|
|
2463
|
+
|
|
2464
|
+
function clearAlerts() {
|
|
2465
|
+
activeAlerts.length = 0;
|
|
2466
|
+
}
|
|
2467
|
+
|
|
2468
|
+
function generateDashboard(view: string = 'overview', agentFilter?: string): string {
|
|
2469
|
+
const now = Date.now();
|
|
2470
|
+
let dashboard = `# 🧠 MemTap Neural System Dashboard\n`;
|
|
2471
|
+
dashboard += `*Updated: ${new Date().toLocaleString()}*\n\n`;
|
|
2472
|
+
|
|
2473
|
+
// System Health Overview
|
|
2474
|
+
const totalProfiles = userProfiles.size;
|
|
2475
|
+
const activeWM = workingMemoryState.size;
|
|
2476
|
+
const activeConversations = conversationState.size;
|
|
2477
|
+
const cacheSize = memoryCache.size;
|
|
2478
|
+
const totalAlerts = activeAlerts.filter(a => a.severity !== 'info').length;
|
|
2479
|
+
|
|
2480
|
+
dashboard += `## 🚦 System Health\n`;
|
|
2481
|
+
dashboard += `| Metric | Value | Status |\n`;
|
|
2482
|
+
dashboard += `|--------|-------|--------|\n`;
|
|
2483
|
+
dashboard += `| Active Users | ${totalProfiles} | ${totalProfiles > 0 ? '🟢' : '🟡'} |\n`;
|
|
2484
|
+
dashboard += `| Working Memory | ${activeWM} | ${activeWM > 0 ? '🟢' : '🟡'} |\n`;
|
|
2485
|
+
dashboard += `| Conversations | ${activeConversations} | ${activeConversations > 0 ? '🟢' : '🟡'} |\n`;
|
|
2486
|
+
dashboard += `| Cache Size | ${cacheSize}/100 | ${cacheSize < 80 ? '🟢' : cacheSize < 95 ? '🟡' : '🔴'} |\n`;
|
|
2487
|
+
dashboard += `| Alerts | ${totalAlerts} | ${totalAlerts === 0 ? '🟢' : totalAlerts < 5 ? '🟡' : '🔴'} |\n\n`;
|
|
2488
|
+
|
|
2489
|
+
// Performance Metrics
|
|
2490
|
+
if (view !== 'compact') {
|
|
2491
|
+
const totalQueries = Array.from(userProfiles.values()).reduce((sum, p) => sum + p.totalQueries, 0);
|
|
2492
|
+
const successfulRecalls = Array.from(userProfiles.values()).reduce((sum, p) => sum + p.successfulRecalls, 0);
|
|
2493
|
+
const overallSuccessRate = totalQueries > 0 ? (successfulRecalls / totalQueries) * 100 : 0;
|
|
2494
|
+
|
|
2495
|
+
dashboard += `## 📊 Performance Metrics\n`;
|
|
2496
|
+
dashboard += `- **Total Queries:** ${totalQueries}\n`;
|
|
2497
|
+
dashboard += `- **Success Rate:** ${overallSuccessRate.toFixed(1)}%\n`;
|
|
2498
|
+
dashboard += `- **Cache Hit Rate:** ${cacheSize > 0 ? 'Available' : 'N/A'}\n`;
|
|
2499
|
+
dashboard += `- **Memory Efficiency:** ${overallSuccessRate > 80 ? '🟢 Excellent' : overallSuccessRate > 60 ? '🟡 Good' : '🔴 Poor'}\n\n`;
|
|
2500
|
+
}
|
|
2501
|
+
|
|
2502
|
+
// Neural Activity Summary
|
|
2503
|
+
if (view === 'detailed') {
|
|
2504
|
+
dashboard += `## 🧠 Neural Activity\n`;
|
|
2505
|
+
|
|
2506
|
+
// Working Memory Summary
|
|
2507
|
+
if (workingMemoryState.size > 0) {
|
|
2508
|
+
const avgCognitiveLoad = Array.from(workingMemoryState.values())
|
|
2509
|
+
.reduce((sum, wm) => sum + wm.cognitiveLoad, 0) / workingMemoryState.size;
|
|
2510
|
+
|
|
2511
|
+
dashboard += `### Working Memory\n`;
|
|
2512
|
+
dashboard += `- **Active States:** ${workingMemoryState.size}\n`;
|
|
2513
|
+
dashboard += `- **Avg Cognitive Load:** ${Math.round(avgCognitiveLoad * 100)}%\n`;
|
|
2514
|
+
|
|
2515
|
+
// Show individual agents if filtered
|
|
2516
|
+
if (agentFilter) {
|
|
2517
|
+
const wm = workingMemoryState.get(agentFilter);
|
|
2518
|
+
if (wm) {
|
|
2519
|
+
dashboard += `- **Focus (${agentFilter}):** [${wm.currentFocus.join(', ')}]\n`;
|
|
2520
|
+
dashboard += `- **Spotlight:** "${wm.attentionSpotlight}"\n`;
|
|
2521
|
+
}
|
|
2522
|
+
}
|
|
2523
|
+
dashboard += '\n';
|
|
2524
|
+
}
|
|
2525
|
+
|
|
2526
|
+
// Attention Patterns
|
|
2527
|
+
const recentAttention = attentionHistory.filter(a =>
|
|
2528
|
+
a.timestamp > now - 60 * 60 * 1000 && (!agentFilter || a.agent === agentFilter)
|
|
2529
|
+
);
|
|
2530
|
+
|
|
2531
|
+
if (recentAttention.length > 0) {
|
|
2532
|
+
const attentionCounts = recentAttention.reduce((acc, a) => {
|
|
2533
|
+
acc[a.level] = (acc[a.level] || 0) + 1;
|
|
2534
|
+
return acc;
|
|
2535
|
+
}, {} as Record<string, number>);
|
|
2536
|
+
|
|
2537
|
+
dashboard += `### Attention Patterns (Last Hour)\n`;
|
|
2538
|
+
for (const [level, count] of Object.entries(attentionCounts)) {
|
|
2539
|
+
const pct = Math.round((count / recentAttention.length) * 100);
|
|
2540
|
+
dashboard += `- **${level}:** ${count} (${pct}%)\n`;
|
|
2541
|
+
}
|
|
2542
|
+
dashboard += '\n';
|
|
2543
|
+
}
|
|
2544
|
+
|
|
2545
|
+
// Memory Consolidation
|
|
2546
|
+
const totalChunks = Array.from(memoryChunks.values()).reduce((sum, chunks) => sum + chunks.length, 0);
|
|
2547
|
+
const totalEpisodics = Array.from(episodicMemories.values()).reduce((sum, eps) => sum + eps.length, 0);
|
|
2548
|
+
|
|
2549
|
+
dashboard += `### Memory Consolidation\n`;
|
|
2550
|
+
dashboard += `- **Memory Chunks:** ${totalChunks}\n`;
|
|
2551
|
+
dashboard += `- **Episodic Memories:** ${totalEpisodics}\n`;
|
|
2552
|
+
dashboard += `- **Consolidation Status:** ${totalChunks > 0 ? '🟢 Active' : '🟡 Minimal'}\n\n`;
|
|
2553
|
+
}
|
|
2554
|
+
|
|
2555
|
+
// Recent Alerts
|
|
2556
|
+
const recentAlerts = activeAlerts.filter(a => a.timestamp > now - 60 * 60 * 1000);
|
|
2557
|
+
if (recentAlerts.length > 0) {
|
|
2558
|
+
dashboard += `## 🚨 Recent Alerts (${recentAlerts.length})\n`;
|
|
2559
|
+
recentAlerts.slice(-3).forEach((alert, i) => {
|
|
2560
|
+
const age = Math.round((now - alert.timestamp) / (1000 * 60));
|
|
2561
|
+
const icon = alert.severity === 'critical' ? '🔴' : alert.severity === 'warning' ? '🟡' : '🔵';
|
|
2562
|
+
dashboard += `${i + 1}. ${icon} ${alert.message} (${age}min ago)\n`;
|
|
2563
|
+
});
|
|
2564
|
+
dashboard += '\n';
|
|
2565
|
+
}
|
|
2566
|
+
|
|
2567
|
+
// Quick Actions
|
|
2568
|
+
dashboard += `## ⚡ Quick Actions\n`;
|
|
2569
|
+
dashboard += `- \`memtap_monitor live\` - Real-time neural monitoring\n`;
|
|
2570
|
+
dashboard += `- \`memtap_alerts check\` - Run anomaly detection\n`;
|
|
2571
|
+
dashboard += `- \`memtap_health neural\` - Detailed neural system report\n`;
|
|
2572
|
+
dashboard += `- \`memtap_maintenance run-all\` - Full system maintenance\n`;
|
|
2573
|
+
|
|
2574
|
+
return dashboard;
|
|
2575
|
+
}
|
|
2576
|
+
|
|
2577
|
+
// ── Dream-Mode Consolidation Function ────────────────────────────────────────
|
|
2578
|
+
|
|
2579
|
+
async function dreamModeConsolidation(cfg: MemTapConfig) {
|
|
2580
|
+
try {
|
|
2581
|
+
// Simulate sleep consolidation - strengthen important memories, weaken unused ones
|
|
2582
|
+
for (const [agentId, chunks] of memoryChunks.entries()) {
|
|
2583
|
+
|
|
2584
|
+
// Strengthen frequently accessed chunks
|
|
2585
|
+
for (const chunk of chunks) {
|
|
2586
|
+
if (chunk.lastActivation > Date.now() - 86400000) { // Active in last 24h
|
|
2587
|
+
chunk.strength += FORGETTING_CURVE.consolidationBonus;
|
|
2588
|
+
} else {
|
|
2589
|
+
chunk.strength *= 0.98; // Slight decay for unused chunks
|
|
2590
|
+
}
|
|
2591
|
+
}
|
|
2592
|
+
|
|
2593
|
+
// Remove very weak chunks (forgotten)
|
|
2594
|
+
const activeChunks = chunks.filter(c => c.strength > 0.1);
|
|
2595
|
+
memoryChunks.set(agentId, activeChunks);
|
|
2596
|
+
|
|
2597
|
+
// Update consolidation scores for episodic memories
|
|
2598
|
+
const episodics = episodicMemories.get(agentId) || [];
|
|
2599
|
+
for (const episodic of episodics) {
|
|
2600
|
+
episodic.consolidationScore = Math.min(1.0,
|
|
2601
|
+
episodic.consolidationScore +
|
|
2602
|
+
(episodic.emotionalIntensity * FORGETTING_CURVE.consolidationBonus)
|
|
2603
|
+
);
|
|
2604
|
+
}
|
|
2605
|
+
}
|
|
2606
|
+
|
|
2607
|
+
// Pattern recognition and abstraction (simplified)
|
|
2608
|
+
await backgroundPatternRecognition(cfg);
|
|
2609
|
+
|
|
2610
|
+
console.log('[memtap] Dream-mode consolidation completed');
|
|
2611
|
+
|
|
2612
|
+
} catch (err: any) {
|
|
2613
|
+
console.warn(`[memtap] Dream-mode consolidation failed: ${err.message}`);
|
|
2614
|
+
}
|
|
2615
|
+
}
|
|
2616
|
+
|
|
2617
|
+
async function backgroundPatternRecognition(cfg: MemTapConfig) {
|
|
2618
|
+
// Analyze patterns in memory access and create abstract connections
|
|
2619
|
+
try {
|
|
2620
|
+
// Find frequently co-accessed memories
|
|
2621
|
+
const cachePatterns = new Map<string, string[]>();
|
|
2622
|
+
|
|
2623
|
+
for (const [key, cached] of memoryCache.entries()) {
|
|
2624
|
+
if (cached.retrievalCount > 3) { // Frequently accessed
|
|
2625
|
+
const agent = key.split(':')[0];
|
|
2626
|
+
const patterns = cachePatterns.get(agent) || [];
|
|
2627
|
+
patterns.push(cached.query);
|
|
2628
|
+
cachePatterns.set(agent, patterns);
|
|
2629
|
+
}
|
|
2630
|
+
}
|
|
2631
|
+
|
|
2632
|
+
// Create abstract pattern memories (very simplified)
|
|
2633
|
+
for (const [agent, patterns] of cachePatterns.entries()) {
|
|
2634
|
+
if (patterns.length > 5) {
|
|
2635
|
+
const abstractPattern = `Pattern detected: Frequent queries about ${extractCommonThemes(patterns)}`;
|
|
2636
|
+
|
|
2637
|
+
// Store as a meta-memory
|
|
2638
|
+
await bbFetch(cfg, `${baseUrl(cfg)}/memories`, {
|
|
2639
|
+
method: 'POST',
|
|
2640
|
+
body: JSON.stringify({
|
|
2641
|
+
content: abstractPattern,
|
|
2642
|
+
type: 'pattern',
|
|
2643
|
+
agent: agent,
|
|
2644
|
+
importance: 0.6,
|
|
2645
|
+
tags: ['dream-extracted', 'pattern', 'meta-memory'],
|
|
2646
|
+
source: 'plugin:dream-mode-pattern-recognition'
|
|
2647
|
+
})
|
|
2648
|
+
}).catch(() => {}); // Silent fail
|
|
2649
|
+
}
|
|
2650
|
+
}
|
|
2651
|
+
|
|
2652
|
+
} catch { /* Pattern recognition failed - not critical */ }
|
|
2653
|
+
}
|
|
2654
|
+
|
|
2655
|
+
function extractCommonThemes(queries: string[]): string {
|
|
2656
|
+
const allText = queries.join(' ').toLowerCase();
|
|
2657
|
+
|
|
2658
|
+
if (/memtap.*development/i.test(allText)) return 'MemTap development patterns';
|
|
2659
|
+
if (/business.*strategy/i.test(allText)) return 'business strategy concerns';
|
|
2660
|
+
if (/infrastructure.*server/i.test(allText)) return 'infrastructure management';
|
|
2661
|
+
|
|
2662
|
+
return 'general usage patterns';
|
|
2663
|
+
}
|
|
2664
|
+
|
|
2665
|
+
async function neuralMaintenance() {
|
|
2666
|
+
try {
|
|
2667
|
+
const now = Date.now();
|
|
2668
|
+
const oneDayAgo = now - 24 * 60 * 60 * 1000;
|
|
2669
|
+
const oneWeekAgo = now - 7 * 24 * 60 * 60 * 1000;
|
|
2670
|
+
|
|
2671
|
+
// Clean up old conversation state (older than 1 day)
|
|
2672
|
+
for (const [agentId, context] of conversationState.entries()) {
|
|
2673
|
+
if ((context.lastMemoryAccess || 0) < oneDayAgo) {
|
|
2674
|
+
conversationState.delete(agentId);
|
|
2675
|
+
}
|
|
2676
|
+
}
|
|
2677
|
+
|
|
2678
|
+
// Clean up old cache entries and update retrieval counts
|
|
2679
|
+
for (const [key, cached] of memoryCache.entries()) {
|
|
2680
|
+
if (cached.timestamp < oneDayAgo) {
|
|
2681
|
+
memoryCache.delete(key);
|
|
2682
|
+
}
|
|
2683
|
+
}
|
|
2684
|
+
|
|
2685
|
+
// Clean up old working memory states
|
|
2686
|
+
for (const [agentId, wm] of workingMemoryState.entries()) {
|
|
2687
|
+
if (wm.lastUpdate < oneDayAgo) {
|
|
2688
|
+
workingMemoryState.delete(agentId);
|
|
2689
|
+
}
|
|
2690
|
+
}
|
|
2691
|
+
|
|
2692
|
+
// Clean up old episodic memories (keep only last 30 days)
|
|
2693
|
+
for (const [agentId, episodics] of episodicMemories.entries()) {
|
|
2694
|
+
const recentEpisodics = episodics.filter(e => e.timestamp > oneWeekAgo);
|
|
2695
|
+
episodicMemories.set(agentId, recentEpisodics);
|
|
2696
|
+
}
|
|
2697
|
+
|
|
2698
|
+
// Update user profiles sleep cycles (for consolidation tracking)
|
|
2699
|
+
for (const [agentId, profile] of userProfiles.entries()) {
|
|
2700
|
+
if (profile.lastActive < oneDayAgo) {
|
|
2701
|
+
profile.sleepCycles++;
|
|
2702
|
+
}
|
|
2703
|
+
}
|
|
2704
|
+
|
|
2705
|
+
// Clean up attention history
|
|
2706
|
+
attentionHistory = attentionHistory.filter(a => a.timestamp > oneWeekAgo);
|
|
2707
|
+
|
|
2708
|
+
console.log('[memtap] Neural maintenance completed');
|
|
2709
|
+
|
|
2710
|
+
} catch (err: any) {
|
|
2711
|
+
console.warn(`[memtap] Neural maintenance failed: ${err.message}`);
|
|
2712
|
+
}
|
|
2713
|
+
}
|
|
2714
|
+
|
|
2715
|
+
// ── Performance Monitoring Hook ──────────────────────────────────────────
|
|
2716
|
+
|
|
2717
|
+
api.registerHook(
|
|
2718
|
+
'session_end',
|
|
2719
|
+
async (event: any) => {
|
|
2720
|
+
const currentAgent = agentId(getConfig(api), api);
|
|
2721
|
+
const profile = userProfiles.get(currentAgent);
|
|
2722
|
+
const context = conversationState.get(currentAgent);
|
|
2723
|
+
|
|
2724
|
+
if (profile && profile.totalQueries > 0) {
|
|
2725
|
+
const successRate = profile.successfulRecalls / profile.totalQueries;
|
|
2726
|
+
const avgComplexity = profile.averageQueryComplexity;
|
|
2727
|
+
|
|
2728
|
+
logger.info?.(`[memtap] Session stats for ${currentAgent}: ${profile.totalQueries} queries, ${Math.round(successRate * 100)}% success rate, avg complexity ${avgComplexity.toFixed(1)}`) ??
|
|
2729
|
+
console.log(`[memtap] Session stats for ${currentAgent}: ${profile.totalQueries} queries, ${Math.round(successRate * 100)}% success rate, avg complexity ${avgComplexity.toFixed(1)}`);
|
|
2730
|
+
}
|
|
2731
|
+
},
|
|
2732
|
+
{
|
|
2733
|
+
name: 'memtap.performance-monitor',
|
|
2734
|
+
description: 'Monitor and log memory system performance metrics',
|
|
2735
|
+
}
|
|
2736
|
+
);
|
|
2737
|
+
|
|
2738
|
+
// ── Tool: memtap_alerts (Anomaly Detection & Alerting) ──────────────────────
|
|
2739
|
+
|
|
2740
|
+
api.registerTool({
|
|
2741
|
+
name: 'memtap_alerts',
|
|
2742
|
+
description:
|
|
2743
|
+
'Neural system anomaly detection and alerting. Monitors for performance issues, ' +
|
|
2744
|
+
'memory leaks, attention anomalies, and system health problems.',
|
|
2745
|
+
parameters: {
|
|
2746
|
+
type: 'object',
|
|
2747
|
+
properties: {
|
|
2748
|
+
action: {
|
|
2749
|
+
type: 'string',
|
|
2750
|
+
enum: ['check', 'list', 'clear'],
|
|
2751
|
+
description: 'Alert action to perform'
|
|
2752
|
+
},
|
|
2753
|
+
severity: {
|
|
2754
|
+
type: 'string',
|
|
2755
|
+
enum: ['info', 'warning', 'critical'],
|
|
2756
|
+
description: 'Minimum severity level to show (default: warning)'
|
|
2757
|
+
}
|
|
2758
|
+
},
|
|
2759
|
+
required: ['action']
|
|
2760
|
+
},
|
|
2761
|
+
async execute(_id: string, params: { action: string; severity?: string }) {
|
|
2762
|
+
const minSeverity = params.severity || 'warning';
|
|
2763
|
+
|
|
2764
|
+
try {
|
|
2765
|
+
switch (params.action) {
|
|
2766
|
+
case 'check':
|
|
2767
|
+
return { content: [{ type: 'text', text: performAnomalyDetection(minSeverity) }] };
|
|
2768
|
+
|
|
2769
|
+
case 'list':
|
|
2770
|
+
return { content: [{ type: 'text', text: listActiveAlerts(minSeverity) }] };
|
|
2771
|
+
|
|
2772
|
+
case 'clear':
|
|
2773
|
+
clearAlerts();
|
|
2774
|
+
return { content: [{ type: 'text', text: 'All alerts cleared.' }] };
|
|
2775
|
+
|
|
2776
|
+
default:
|
|
2777
|
+
return { content: [{ type: 'text', text: `Unknown alert action: ${params.action}` }], isError: true };
|
|
2778
|
+
}
|
|
2779
|
+
} catch (err: any) {
|
|
2780
|
+
return { content: [{ type: 'text', text: `MemTap alerts error: ${err.message}` }], isError: true };
|
|
2781
|
+
}
|
|
2782
|
+
},
|
|
2783
|
+
});
|
|
2784
|
+
|
|
2785
|
+
// ── Tool: memtap_dashboard (Comprehensive System Overview) ─────────────────
|
|
2786
|
+
|
|
2787
|
+
api.registerTool({
|
|
2788
|
+
name: 'memtap_dashboard',
|
|
2789
|
+
description:
|
|
2790
|
+
'Comprehensive MemTap neural system dashboard. Shows all key metrics, system health, ' +
|
|
2791
|
+
'performance indicators, and neural activity in a single overview.',
|
|
2792
|
+
parameters: {
|
|
2793
|
+
type: 'object',
|
|
2794
|
+
properties: {
|
|
2795
|
+
view: {
|
|
2796
|
+
type: 'string',
|
|
2797
|
+
enum: ['overview', 'detailed', 'compact'],
|
|
2798
|
+
description: 'Dashboard view mode (default: overview)'
|
|
2799
|
+
},
|
|
2800
|
+
agent: { type: 'string', description: 'Focus on specific agent (optional)' }
|
|
2801
|
+
}
|
|
2802
|
+
},
|
|
2803
|
+
async execute(_id: string, params: { view?: string; agent?: string }) {
|
|
2804
|
+
const view = params.view || 'overview';
|
|
2805
|
+
|
|
2806
|
+
try {
|
|
2807
|
+
return { content: [{ type: 'text', text: generateDashboard(view, params.agent) }] };
|
|
2808
|
+
} catch (err: any) {
|
|
2809
|
+
return { content: [{ type: 'text', text: `MemTap dashboard error: ${err.message}` }], isError: true };
|
|
2810
|
+
}
|
|
2811
|
+
},
|
|
2812
|
+
});
|
|
2813
|
+
|
|
2814
|
+
logger.info?.('[memtap] Plugin v2.1.0 "The Neuron" registered: 13 tools + 5 neuromimetic hooks') ??
|
|
2815
|
+
console.log('[memtap] Plugin v2.1.0 "The Neuron" registered: 13 tools + 5 neuromimetic hooks');
|
|
2816
|
+
}
|