superlocalmemory 2.7.2 → 2.7.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +30 -1
- package/README.md +1 -1
- package/docs/ARCHITECTURE.md +8 -8
- package/docs/COMPRESSION-README.md +1 -1
- package/docs/SEARCH-ENGINE-V2.2.0.md +1 -0
- package/hooks/post-recall-hook.js +53 -0
- package/mcp_server.py +425 -17
- package/package.json +1 -1
- package/skills/slm-recall/SKILL.md +1 -0
- package/src/agent_registry.py +3 -3
- package/src/auto_backup.py +64 -31
- package/src/graph_engine.py +15 -11
- package/src/learning/adaptive_ranker.py +70 -1
- package/src/learning/feature_extractor.py +131 -16
- package/src/learning/feedback_collector.py +114 -0
- package/src/learning/learning_db.py +158 -34
- package/src/learning/tests/test_adaptive_ranker.py +5 -4
- package/src/learning/tests/test_aggregator.py +4 -3
- package/src/learning/tests/test_feedback_collector.py +7 -4
- package/src/learning/tests/test_signal_inference.py +399 -0
- package/src/learning/tests/test_synthetic_bootstrap.py +1 -1
- package/src/trust_scorer.py +288 -74
- package/ui/app.js +4 -4
- package/ui/index.html +38 -0
- package/ui/js/agents.js +4 -4
- package/ui/js/feedback.js +333 -0
- package/ui/js/learning.js +117 -0
- package/ui/js/modal.js +22 -1
- package/ui/js/profiles.js +8 -0
- package/ui/js/settings.js +58 -1
package/mcp_server.py
CHANGED
|
@@ -25,14 +25,16 @@ Usage:
|
|
|
25
25
|
python3 mcp_server.py --transport http --port 8001
|
|
26
26
|
"""
|
|
27
27
|
|
|
28
|
-
from mcp.server.fastmcp import FastMCP
|
|
28
|
+
from mcp.server.fastmcp import FastMCP, Context
|
|
29
29
|
from mcp.types import ToolAnnotations
|
|
30
30
|
import sys
|
|
31
31
|
import os
|
|
32
32
|
import json
|
|
33
33
|
import re
|
|
34
|
+
import time
|
|
35
|
+
import threading
|
|
34
36
|
from pathlib import Path
|
|
35
|
-
from typing import Optional
|
|
37
|
+
from typing import Optional, Dict, List, Any
|
|
36
38
|
|
|
37
39
|
# Add src directory to path (use existing code!)
|
|
38
40
|
MEMORY_DIR = Path.home() / ".claude-memory"
|
|
@@ -72,6 +74,70 @@ try:
|
|
|
72
74
|
except ImportError:
|
|
73
75
|
LEARNING_AVAILABLE = False
|
|
74
76
|
|
|
77
|
+
# ============================================================================
|
|
78
|
+
# Synthetic Bootstrap Auto-Trigger (v2.7 — P1-12)
|
|
79
|
+
# Runs ONCE on first recall if: memory count > 50, no model, LightGBM available.
|
|
80
|
+
# Spawns in background thread — never blocks recall. All errors swallowed.
|
|
81
|
+
# ============================================================================
|
|
82
|
+
|
|
83
|
+
_bootstrap_checked = False
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def _maybe_bootstrap():
|
|
87
|
+
"""Check if synthetic bootstrap is needed and run it in a background thread.
|
|
88
|
+
|
|
89
|
+
Called once from the first recall invocation. Sets _bootstrap_checked = True
|
|
90
|
+
immediately to prevent re-entry. The actual bootstrap runs in a daemon thread
|
|
91
|
+
so it never blocks the recall response.
|
|
92
|
+
|
|
93
|
+
Conditions for bootstrap:
|
|
94
|
+
1. LEARNING_AVAILABLE and ML_RANKING_AVAILABLE flags are True
|
|
95
|
+
2. SyntheticBootstrapper.should_bootstrap() returns True (checks:
|
|
96
|
+
- LightGBM + NumPy installed
|
|
97
|
+
- No existing model file at ~/.claude-memory/models/ranker.txt
|
|
98
|
+
- Memory count > 50)
|
|
99
|
+
|
|
100
|
+
CRITICAL: This function wraps everything in try/except. Bootstrap failure
|
|
101
|
+
must NEVER break recall. It is purely an optimization — first-time ML
|
|
102
|
+
model creation so users don't have to wait 200+ recalls for personalization.
|
|
103
|
+
"""
|
|
104
|
+
global _bootstrap_checked
|
|
105
|
+
_bootstrap_checked = True # Set immediately to prevent re-entry
|
|
106
|
+
|
|
107
|
+
try:
|
|
108
|
+
if not LEARNING_AVAILABLE:
|
|
109
|
+
return
|
|
110
|
+
if not ML_RANKING_AVAILABLE:
|
|
111
|
+
return
|
|
112
|
+
|
|
113
|
+
from learning.synthetic_bootstrap import SyntheticBootstrapper
|
|
114
|
+
bootstrapper = SyntheticBootstrapper(memory_db_path=DB_PATH)
|
|
115
|
+
|
|
116
|
+
if not bootstrapper.should_bootstrap():
|
|
117
|
+
return
|
|
118
|
+
|
|
119
|
+
# Run bootstrap in background thread — never block recall
|
|
120
|
+
import threading
|
|
121
|
+
|
|
122
|
+
def _run_bootstrap():
|
|
123
|
+
try:
|
|
124
|
+
result = bootstrapper.bootstrap_model()
|
|
125
|
+
if result:
|
|
126
|
+
import logging
|
|
127
|
+
logging.getLogger("superlocalmemory.mcp").info(
|
|
128
|
+
"Synthetic bootstrap complete: %d samples",
|
|
129
|
+
result.get('training_samples', 0)
|
|
130
|
+
)
|
|
131
|
+
except Exception:
|
|
132
|
+
pass # Bootstrap failure is never critical
|
|
133
|
+
|
|
134
|
+
thread = threading.Thread(target=_run_bootstrap, daemon=True)
|
|
135
|
+
thread.start()
|
|
136
|
+
|
|
137
|
+
except Exception:
|
|
138
|
+
pass # Any failure in bootstrap setup is swallowed silently
|
|
139
|
+
|
|
140
|
+
|
|
75
141
|
def _sanitize_error(error: Exception) -> str:
|
|
76
142
|
"""Strip internal paths and structure from error messages."""
|
|
77
143
|
msg = str(error)
|
|
@@ -186,8 +252,48 @@ def get_learning_components():
|
|
|
186
252
|
}
|
|
187
253
|
|
|
188
254
|
|
|
189
|
-
def
|
|
190
|
-
"""
|
|
255
|
+
def _get_client_name(ctx: Optional[Context] = None) -> str:
|
|
256
|
+
"""Extract client name from MCP context, or return default.
|
|
257
|
+
|
|
258
|
+
Reads clientInfo.name from the MCP initialize handshake via
|
|
259
|
+
ctx.session.client_params. This identifies Perplexity, Codex,
|
|
260
|
+
Claude Desktop, etc. as distinct agents.
|
|
261
|
+
"""
|
|
262
|
+
if ctx:
|
|
263
|
+
try:
|
|
264
|
+
# Primary: session.client_params.clientInfo.name (from initialize handshake)
|
|
265
|
+
session = getattr(ctx, 'session', None)
|
|
266
|
+
if session:
|
|
267
|
+
params = getattr(session, 'client_params', None)
|
|
268
|
+
if params:
|
|
269
|
+
client_info = getattr(params, 'clientInfo', None)
|
|
270
|
+
if client_info:
|
|
271
|
+
name = getattr(client_info, 'name', None)
|
|
272
|
+
if name:
|
|
273
|
+
return str(name)
|
|
274
|
+
except Exception:
|
|
275
|
+
pass
|
|
276
|
+
try:
|
|
277
|
+
# Fallback: ctx.client_id (per-request, may be null)
|
|
278
|
+
client_id = ctx.client_id
|
|
279
|
+
if client_id:
|
|
280
|
+
return str(client_id)
|
|
281
|
+
except Exception:
|
|
282
|
+
pass
|
|
283
|
+
return "mcp-client"
|
|
284
|
+
|
|
285
|
+
|
|
286
|
+
def _register_mcp_agent(agent_name: str = "mcp-client", ctx: Optional[Context] = None):
|
|
287
|
+
"""Register the calling MCP agent and record activity. Non-blocking.
|
|
288
|
+
|
|
289
|
+
v2.7.4: Extracts real client name from MCP context when available,
|
|
290
|
+
so Perplexity, Codex, Claude Desktop show as distinct agents.
|
|
291
|
+
"""
|
|
292
|
+
if ctx:
|
|
293
|
+
detected = _get_client_name(ctx)
|
|
294
|
+
if detected != "mcp-client":
|
|
295
|
+
agent_name = detected
|
|
296
|
+
|
|
191
297
|
registry = get_agent_registry()
|
|
192
298
|
if registry:
|
|
193
299
|
try:
|
|
@@ -200,6 +306,264 @@ def _register_mcp_agent(agent_name: str = "mcp-client"):
|
|
|
200
306
|
pass
|
|
201
307
|
|
|
202
308
|
|
|
309
|
+
# ============================================================================
|
|
310
|
+
# RECALL BUFFER & SIGNAL INFERENCE ENGINE (v2.7.4 — Silent Learning)
|
|
311
|
+
# ============================================================================
|
|
312
|
+
# Tracks recall operations and infers implicit feedback signals from user
|
|
313
|
+
# behavior patterns. Zero user effort — all signals auto-collected.
|
|
314
|
+
#
|
|
315
|
+
# Signal Types:
|
|
316
|
+
# implicit_positive_timegap — long pause (>5min) after recall = satisfied
|
|
317
|
+
# implicit_negative_requick — quick re-query (<30s) = dissatisfied
|
|
318
|
+
# implicit_positive_reaccess — same memory in consecutive recalls
|
|
319
|
+
# implicit_positive_cross_tool — same memory recalled by different agents
|
|
320
|
+
# implicit_positive_post_update — memory updated after being recalled
|
|
321
|
+
# implicit_negative_post_delete — memory deleted after being recalled
|
|
322
|
+
#
|
|
323
|
+
# Research: Hu et al. 2008 (implicit feedback), BPR Rendle 2009 (pairwise)
|
|
324
|
+
# ============================================================================
|
|
325
|
+
|
|
326
|
+
class _RecallBuffer:
|
|
327
|
+
"""Thread-safe buffer tracking recent recall operations for signal inference.
|
|
328
|
+
|
|
329
|
+
Stores the last recall per agent_id so we can compare consecutive recalls
|
|
330
|
+
and infer whether the user found results useful.
|
|
331
|
+
|
|
332
|
+
Rate limiting: max 5 implicit signals per agent per minute to prevent gaming.
|
|
333
|
+
"""
|
|
334
|
+
|
|
335
|
+
def __init__(self):
|
|
336
|
+
self._lock = threading.Lock()
|
|
337
|
+
# {agent_id: {query, result_ids, timestamp, result_id_set}}
|
|
338
|
+
self._last_recall: Dict[str, Dict[str, Any]] = {}
|
|
339
|
+
# Global last recall (for cross-agent comparison)
|
|
340
|
+
self._global_last: Optional[Dict[str, Any]] = None
|
|
341
|
+
# Rate limiter: {agent_id: [timestamp, timestamp, ...]}
|
|
342
|
+
self._signal_timestamps: Dict[str, List[float]] = {}
|
|
343
|
+
# Set of memory_ids from the most recent recall (for post-action tracking)
|
|
344
|
+
self._recent_result_ids: set = set()
|
|
345
|
+
# Recall counter for passive decay auto-trigger
|
|
346
|
+
self._recall_count: int = 0
|
|
347
|
+
# Adaptive threshold: starts at 300s (5min), adjusts based on user patterns
|
|
348
|
+
self._positive_threshold: float = 300.0
|
|
349
|
+
self._inter_recall_times: List[float] = []
|
|
350
|
+
|
|
351
|
+
def record_recall(
|
|
352
|
+
self,
|
|
353
|
+
query: str,
|
|
354
|
+
result_ids: List[int],
|
|
355
|
+
agent_id: str = "mcp-client",
|
|
356
|
+
) -> List[Dict[str, Any]]:
|
|
357
|
+
"""Record a recall and infer signals from previous recall comparison.
|
|
358
|
+
|
|
359
|
+
Returns a list of inferred signal dicts: [{memory_id, signal_type, query}]
|
|
360
|
+
"""
|
|
361
|
+
now = time.time()
|
|
362
|
+
signals: List[Dict[str, Any]] = []
|
|
363
|
+
|
|
364
|
+
with self._lock:
|
|
365
|
+
self._recall_count += 1
|
|
366
|
+
result_id_set = set(result_ids)
|
|
367
|
+
self._recent_result_ids = result_id_set
|
|
368
|
+
|
|
369
|
+
current = {
|
|
370
|
+
"query": query,
|
|
371
|
+
"result_ids": result_ids,
|
|
372
|
+
"result_id_set": result_id_set,
|
|
373
|
+
"timestamp": now,
|
|
374
|
+
"agent_id": agent_id,
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
# --- Compare with previous recall from SAME agent ---
|
|
378
|
+
prev = self._last_recall.get(agent_id)
|
|
379
|
+
if prev:
|
|
380
|
+
time_gap = now - prev["timestamp"]
|
|
381
|
+
|
|
382
|
+
# Track inter-recall times for adaptive threshold
|
|
383
|
+
self._inter_recall_times.append(time_gap)
|
|
384
|
+
if len(self._inter_recall_times) > 100:
|
|
385
|
+
self._inter_recall_times = self._inter_recall_times[-100:]
|
|
386
|
+
|
|
387
|
+
# Update adaptive threshold (median of recent times, min 60s, max 1800s)
|
|
388
|
+
if len(self._inter_recall_times) >= 10:
|
|
389
|
+
sorted_times = sorted(self._inter_recall_times)
|
|
390
|
+
median = sorted_times[len(sorted_times) // 2]
|
|
391
|
+
self._positive_threshold = max(60.0, min(median * 0.8, 1800.0))
|
|
392
|
+
|
|
393
|
+
# Signal: Quick re-query with different query = negative
|
|
394
|
+
if time_gap < 30.0 and query != prev["query"]:
|
|
395
|
+
for mid in prev["result_ids"][:5]: # Top 5 only
|
|
396
|
+
signals.append({
|
|
397
|
+
"memory_id": mid,
|
|
398
|
+
"signal_type": "implicit_negative_requick",
|
|
399
|
+
"query": prev["query"],
|
|
400
|
+
"rank_position": prev["result_ids"].index(mid) + 1,
|
|
401
|
+
})
|
|
402
|
+
|
|
403
|
+
# Signal: Long pause = positive for previous results
|
|
404
|
+
elif time_gap > self._positive_threshold:
|
|
405
|
+
for mid in prev["result_ids"][:3]: # Top 3 only
|
|
406
|
+
signals.append({
|
|
407
|
+
"memory_id": mid,
|
|
408
|
+
"signal_type": "implicit_positive_timegap",
|
|
409
|
+
"query": prev["query"],
|
|
410
|
+
"rank_position": prev["result_ids"].index(mid) + 1,
|
|
411
|
+
})
|
|
412
|
+
|
|
413
|
+
# Signal: Same memory re-accessed = positive
|
|
414
|
+
overlap = result_id_set & prev["result_id_set"]
|
|
415
|
+
for mid in overlap:
|
|
416
|
+
signals.append({
|
|
417
|
+
"memory_id": mid,
|
|
418
|
+
"signal_type": "implicit_positive_reaccess",
|
|
419
|
+
"query": query,
|
|
420
|
+
})
|
|
421
|
+
|
|
422
|
+
# --- Compare with previous recall from DIFFERENT agent (cross-tool) ---
|
|
423
|
+
global_prev = self._global_last
|
|
424
|
+
if global_prev and global_prev["agent_id"] != agent_id:
|
|
425
|
+
cross_overlap = result_id_set & global_prev["result_id_set"]
|
|
426
|
+
for mid in cross_overlap:
|
|
427
|
+
signals.append({
|
|
428
|
+
"memory_id": mid,
|
|
429
|
+
"signal_type": "implicit_positive_cross_tool",
|
|
430
|
+
"query": query,
|
|
431
|
+
})
|
|
432
|
+
|
|
433
|
+
# Update buffers
|
|
434
|
+
self._last_recall[agent_id] = current
|
|
435
|
+
self._global_last = current
|
|
436
|
+
|
|
437
|
+
return signals
|
|
438
|
+
|
|
439
|
+
def check_post_action(self, memory_id: int, action: str) -> Optional[Dict[str, Any]]:
|
|
440
|
+
"""Check if a memory action (update/delete) follows a recent recall.
|
|
441
|
+
|
|
442
|
+
Returns signal dict if the memory was in recent results, else None.
|
|
443
|
+
"""
|
|
444
|
+
with self._lock:
|
|
445
|
+
if memory_id not in self._recent_result_ids:
|
|
446
|
+
return None
|
|
447
|
+
|
|
448
|
+
if action == "update":
|
|
449
|
+
return {
|
|
450
|
+
"memory_id": memory_id,
|
|
451
|
+
"signal_type": "implicit_positive_post_update",
|
|
452
|
+
"query": self._global_last["query"] if self._global_last else "",
|
|
453
|
+
}
|
|
454
|
+
elif action == "delete":
|
|
455
|
+
return {
|
|
456
|
+
"memory_id": memory_id,
|
|
457
|
+
"signal_type": "implicit_negative_post_delete",
|
|
458
|
+
"query": self._global_last["query"] if self._global_last else "",
|
|
459
|
+
}
|
|
460
|
+
return None
|
|
461
|
+
|
|
462
|
+
def check_rate_limit(self, agent_id: str, max_per_minute: int = 5) -> bool:
|
|
463
|
+
"""Return True if agent is within rate limit, False if exceeded."""
|
|
464
|
+
now = time.time()
|
|
465
|
+
with self._lock:
|
|
466
|
+
if agent_id not in self._signal_timestamps:
|
|
467
|
+
self._signal_timestamps[agent_id] = []
|
|
468
|
+
|
|
469
|
+
# Clean old timestamps (older than 60s)
|
|
470
|
+
self._signal_timestamps[agent_id] = [
|
|
471
|
+
ts for ts in self._signal_timestamps[agent_id]
|
|
472
|
+
if now - ts < 60.0
|
|
473
|
+
]
|
|
474
|
+
|
|
475
|
+
if len(self._signal_timestamps[agent_id]) >= max_per_minute:
|
|
476
|
+
return False
|
|
477
|
+
|
|
478
|
+
self._signal_timestamps[agent_id].append(now)
|
|
479
|
+
return True
|
|
480
|
+
|
|
481
|
+
def get_recall_count(self) -> int:
|
|
482
|
+
"""Get total recall count (for passive decay trigger)."""
|
|
483
|
+
with self._lock:
|
|
484
|
+
return self._recall_count
|
|
485
|
+
|
|
486
|
+
def get_stats(self) -> Dict[str, Any]:
|
|
487
|
+
"""Get buffer statistics for diagnostics."""
|
|
488
|
+
with self._lock:
|
|
489
|
+
return {
|
|
490
|
+
"recall_count": self._recall_count,
|
|
491
|
+
"tracked_agents": len(self._last_recall),
|
|
492
|
+
"positive_threshold_s": round(self._positive_threshold, 1),
|
|
493
|
+
"recent_results_count": len(self._recent_result_ids),
|
|
494
|
+
}
|
|
495
|
+
|
|
496
|
+
|
|
497
|
+
# Module-level singleton
|
|
498
|
+
_recall_buffer = _RecallBuffer()
|
|
499
|
+
|
|
500
|
+
|
|
501
|
+
def _emit_implicit_signals(signals: List[Dict[str, Any]], agent_id: str = "mcp-client") -> int:
|
|
502
|
+
"""Emit inferred implicit signals to the feedback collector.
|
|
503
|
+
|
|
504
|
+
Rate-limited: max 5 signals per agent per minute.
|
|
505
|
+
All errors swallowed — signal collection must NEVER break operations.
|
|
506
|
+
|
|
507
|
+
Returns number of signals actually stored.
|
|
508
|
+
"""
|
|
509
|
+
if not LEARNING_AVAILABLE or not signals:
|
|
510
|
+
return 0
|
|
511
|
+
|
|
512
|
+
stored = 0
|
|
513
|
+
try:
|
|
514
|
+
feedback = get_feedback_collector()
|
|
515
|
+
if not feedback:
|
|
516
|
+
return 0
|
|
517
|
+
|
|
518
|
+
for sig in signals:
|
|
519
|
+
if not _recall_buffer.check_rate_limit(agent_id):
|
|
520
|
+
break # Rate limit exceeded for this agent
|
|
521
|
+
try:
|
|
522
|
+
feedback.record_implicit_signal(
|
|
523
|
+
memory_id=sig["memory_id"],
|
|
524
|
+
query=sig.get("query", ""),
|
|
525
|
+
signal_type=sig["signal_type"],
|
|
526
|
+
source_tool=agent_id,
|
|
527
|
+
rank_position=sig.get("rank_position"),
|
|
528
|
+
)
|
|
529
|
+
stored += 1
|
|
530
|
+
except Exception:
|
|
531
|
+
pass # Individual signal failure is fine
|
|
532
|
+
except Exception:
|
|
533
|
+
pass # Never break the caller
|
|
534
|
+
|
|
535
|
+
return stored
|
|
536
|
+
|
|
537
|
+
|
|
538
|
+
def _maybe_passive_decay() -> None:
|
|
539
|
+
"""Auto-trigger passive decay every 10 recalls in a background thread."""
|
|
540
|
+
try:
|
|
541
|
+
if not LEARNING_AVAILABLE:
|
|
542
|
+
return
|
|
543
|
+
if _recall_buffer.get_recall_count() % 10 != 0:
|
|
544
|
+
return
|
|
545
|
+
|
|
546
|
+
feedback = get_feedback_collector()
|
|
547
|
+
if not feedback:
|
|
548
|
+
return
|
|
549
|
+
|
|
550
|
+
def _run_decay():
|
|
551
|
+
try:
|
|
552
|
+
count = feedback.compute_passive_decay(threshold=5)
|
|
553
|
+
if count > 0:
|
|
554
|
+
import logging
|
|
555
|
+
logging.getLogger("superlocalmemory.mcp").info(
|
|
556
|
+
"Passive decay: %d signals emitted", count
|
|
557
|
+
)
|
|
558
|
+
except Exception:
|
|
559
|
+
pass
|
|
560
|
+
|
|
561
|
+
thread = threading.Thread(target=_run_decay, daemon=True)
|
|
562
|
+
thread.start()
|
|
563
|
+
except Exception:
|
|
564
|
+
pass
|
|
565
|
+
|
|
566
|
+
|
|
203
567
|
# ============================================================================
|
|
204
568
|
# MCP TOOLS (Functions callable by AI)
|
|
205
569
|
# ============================================================================
|
|
@@ -213,7 +577,8 @@ async def remember(
|
|
|
213
577
|
content: str,
|
|
214
578
|
tags: str = "",
|
|
215
579
|
project: str = "",
|
|
216
|
-
importance: int = 5
|
|
580
|
+
importance: int = 5,
|
|
581
|
+
ctx: Context = None,
|
|
217
582
|
) -> dict:
|
|
218
583
|
"""
|
|
219
584
|
Save content to SuperLocalMemory with intelligent indexing.
|
|
@@ -240,8 +605,8 @@ async def remember(
|
|
|
240
605
|
remember("JWT auth with refresh tokens", tags="security,auth", importance=8)
|
|
241
606
|
"""
|
|
242
607
|
try:
|
|
243
|
-
# Register MCP agent (v2.5 — agent tracking)
|
|
244
|
-
_register_mcp_agent()
|
|
608
|
+
# Register MCP agent (v2.5 — agent tracking, v2.7.4 — client detection)
|
|
609
|
+
_register_mcp_agent(ctx=ctx)
|
|
245
610
|
|
|
246
611
|
# Trust enforcement (v2.6) — block untrusted agents from writing
|
|
247
612
|
try:
|
|
@@ -308,12 +673,16 @@ async def remember(
|
|
|
308
673
|
async def recall(
|
|
309
674
|
query: str,
|
|
310
675
|
limit: int = 10,
|
|
311
|
-
min_score: float = 0.3
|
|
676
|
+
min_score: float = 0.3,
|
|
677
|
+
ctx: Context = None,
|
|
312
678
|
) -> dict:
|
|
313
679
|
"""
|
|
314
680
|
Search memories using semantic similarity and knowledge graph.
|
|
681
|
+
Results are personalized based on your usage patterns — the more you
|
|
682
|
+
use SuperLocalMemory, the better results get. All learning is local.
|
|
315
683
|
|
|
316
|
-
|
|
684
|
+
After using results, call memory_used(memory_id) for memories you
|
|
685
|
+
referenced to help improve future recall quality.
|
|
317
686
|
|
|
318
687
|
Args:
|
|
319
688
|
query: Search query (required)
|
|
@@ -341,6 +710,18 @@ async def recall(
|
|
|
341
710
|
recall("FastAPI", limit=5, min_score=0.5)
|
|
342
711
|
"""
|
|
343
712
|
try:
|
|
713
|
+
# Register MCP agent (v2.7.4 — client detection for agent tab)
|
|
714
|
+
_register_mcp_agent(ctx=ctx)
|
|
715
|
+
|
|
716
|
+
# Track recall in agent registry
|
|
717
|
+
registry = get_agent_registry()
|
|
718
|
+
if registry:
|
|
719
|
+
try:
|
|
720
|
+
agent_name = _get_client_name(ctx)
|
|
721
|
+
registry.record_recall(f"mcp:{agent_name}")
|
|
722
|
+
except Exception:
|
|
723
|
+
pass
|
|
724
|
+
|
|
344
725
|
# Use existing MemoryStoreV2 class
|
|
345
726
|
store = get_store()
|
|
346
727
|
|
|
@@ -356,6 +737,10 @@ async def recall(
|
|
|
356
737
|
else:
|
|
357
738
|
results = store.search(query, limit=limit)
|
|
358
739
|
|
|
740
|
+
# v2.7: Auto-trigger synthetic bootstrap on first recall (P1-12)
|
|
741
|
+
if not _bootstrap_checked:
|
|
742
|
+
_maybe_bootstrap()
|
|
743
|
+
|
|
359
744
|
# v2.7: Learning-based re-ranking (optional, graceful fallback)
|
|
360
745
|
if LEARNING_AVAILABLE:
|
|
361
746
|
try:
|
|
@@ -377,6 +762,17 @@ async def recall(
|
|
|
377
762
|
except Exception:
|
|
378
763
|
pass # Tracking failure must never break recall
|
|
379
764
|
|
|
765
|
+
# v2.7.4: Implicit signal inference from recall patterns
|
|
766
|
+
try:
|
|
767
|
+
result_ids = [r.get('id') for r in results if r.get('id')]
|
|
768
|
+
signals = _recall_buffer.record_recall(query, result_ids)
|
|
769
|
+
if signals:
|
|
770
|
+
_emit_implicit_signals(signals)
|
|
771
|
+
# Auto-trigger passive decay every 10 recalls
|
|
772
|
+
_maybe_passive_decay()
|
|
773
|
+
except Exception:
|
|
774
|
+
pass # Signal inference must NEVER break recall
|
|
775
|
+
|
|
380
776
|
# Filter by minimum score
|
|
381
777
|
filtered_results = [
|
|
382
778
|
r for r in results
|
|
@@ -640,7 +1036,7 @@ async def backup_status() -> dict:
|
|
|
640
1036
|
@mcp.tool(annotations=ToolAnnotations(
|
|
641
1037
|
readOnlyHint=False,
|
|
642
1038
|
destructiveHint=False,
|
|
643
|
-
openWorldHint=
|
|
1039
|
+
openWorldHint=True,
|
|
644
1040
|
))
|
|
645
1041
|
async def memory_used(
|
|
646
1042
|
memory_id: int,
|
|
@@ -648,15 +1044,18 @@ async def memory_used(
|
|
|
648
1044
|
usefulness: str = "high"
|
|
649
1045
|
) -> dict:
|
|
650
1046
|
"""
|
|
651
|
-
|
|
652
|
-
|
|
1047
|
+
Call this tool whenever you use information from a recalled memory in
|
|
1048
|
+
your response. This is the most important feedback signal — it teaches
|
|
1049
|
+
SuperLocalMemory which memories are truly useful and dramatically
|
|
1050
|
+
improves future recall quality. All data stays 100% local.
|
|
653
1051
|
|
|
654
|
-
|
|
655
|
-
|
|
1052
|
+
Best practice: After using recall() results, call memory_used() for
|
|
1053
|
+
each memory ID you referenced. This takes <1ms and helps the system
|
|
1054
|
+
learn your preferences.
|
|
656
1055
|
|
|
657
1056
|
Args:
|
|
658
|
-
memory_id: ID of the useful memory
|
|
659
|
-
query: The recall query that found it (optional)
|
|
1057
|
+
memory_id: ID of the useful memory (from recall results)
|
|
1058
|
+
query: The recall query that found it (optional but recommended)
|
|
660
1059
|
usefulness: How useful - "high", "medium", or "low" (default "high")
|
|
661
1060
|
|
|
662
1061
|
Returns:
|
|
@@ -868,6 +1267,15 @@ async def search(query: str) -> dict:
|
|
|
868
1267
|
store = get_store()
|
|
869
1268
|
raw_results = store.search(query, limit=20)
|
|
870
1269
|
|
|
1270
|
+
# v2.7: Learning-based re-ranking (optional, graceful fallback)
|
|
1271
|
+
if LEARNING_AVAILABLE:
|
|
1272
|
+
try:
|
|
1273
|
+
ranker = get_adaptive_ranker()
|
|
1274
|
+
if ranker:
|
|
1275
|
+
raw_results = ranker.rerank(raw_results, query)
|
|
1276
|
+
except Exception:
|
|
1277
|
+
pass # Re-ranking failure must never break search
|
|
1278
|
+
|
|
871
1279
|
results = []
|
|
872
1280
|
for r in raw_results:
|
|
873
1281
|
if r.get('score', 0) < 0.2:
|
|
@@ -1150,7 +1558,7 @@ if __name__ == "__main__":
|
|
|
1150
1558
|
# Print startup message to stderr (stdout is used for MCP protocol)
|
|
1151
1559
|
print("=" * 60, file=sys.stderr)
|
|
1152
1560
|
print("SuperLocalMemory V2 - MCP Server", file=sys.stderr)
|
|
1153
|
-
print("Version: 2.7.
|
|
1561
|
+
print("Version: 2.7.4", file=sys.stderr)
|
|
1154
1562
|
print("=" * 60, file=sys.stderr)
|
|
1155
1563
|
print("Created by: Varun Pratap Bhardwaj (Solution Architect)", file=sys.stderr)
|
|
1156
1564
|
print("Repository: https://github.com/varun369/SuperLocalMemoryV2", file=sys.stderr)
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "superlocalmemory",
|
|
3
|
-
"version": "2.7.
|
|
3
|
+
"version": "2.7.4",
|
|
4
4
|
"description": "Your AI Finally Remembers You - Local-first intelligent memory system for AI assistants. Works with Claude, Cursor, Windsurf, VS Code/Copilot, Codex, and 17+ AI tools. 100% local, zero cloud dependencies.",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"ai-memory",
|
package/src/agent_registry.py
CHANGED
|
@@ -98,7 +98,7 @@ class AgentRegistry:
|
|
|
98
98
|
last_seen TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
99
99
|
memories_written INTEGER DEFAULT 0,
|
|
100
100
|
memories_recalled INTEGER DEFAULT 0,
|
|
101
|
-
trust_score REAL DEFAULT
|
|
101
|
+
trust_score REAL DEFAULT 0.667,
|
|
102
102
|
metadata TEXT DEFAULT '{}'
|
|
103
103
|
)
|
|
104
104
|
''')
|
|
@@ -126,7 +126,7 @@ class AgentRegistry:
|
|
|
126
126
|
last_seen TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
|
127
127
|
memories_written INTEGER DEFAULT 0,
|
|
128
128
|
memories_recalled INTEGER DEFAULT 0,
|
|
129
|
-
trust_score REAL DEFAULT
|
|
129
|
+
trust_score REAL DEFAULT 0.667,
|
|
130
130
|
metadata TEXT DEFAULT '{}'
|
|
131
131
|
)
|
|
132
132
|
''')
|
|
@@ -150,7 +150,7 @@ class AgentRegistry:
|
|
|
150
150
|
Register or update an agent in the registry.
|
|
151
151
|
|
|
152
152
|
If the agent already exists, updates last_seen and metadata.
|
|
153
|
-
If new, creates the entry with trust_score=1.
|
|
153
|
+
If new, creates the entry with trust_score=0.667 (Beta(2,1) prior).
|
|
154
154
|
|
|
155
155
|
Args:
|
|
156
156
|
agent_id: Unique identifier (e.g., "mcp:claude-desktop")
|