superlocalmemory 2.6.0 → 2.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +167 -1803
- package/README.md +212 -397
- package/bin/slm +179 -3
- package/bin/superlocalmemoryv2:learning +4 -0
- package/bin/superlocalmemoryv2:patterns +4 -0
- package/docs/ACCESSIBILITY.md +291 -0
- package/docs/ARCHITECTURE.md +12 -6
- package/docs/FRAMEWORK-INTEGRATIONS.md +300 -0
- package/docs/MCP-MANUAL-SETUP.md +14 -4
- package/install.sh +99 -3
- package/mcp_server.py +291 -1
- package/package.json +2 -1
- package/requirements-learning.txt +12 -0
- package/scripts/verify-v27.sh +233 -0
- package/skills/slm-show-patterns/SKILL.md +224 -0
- package/src/learning/__init__.py +201 -0
- package/src/learning/adaptive_ranker.py +826 -0
- package/src/learning/cross_project_aggregator.py +866 -0
- package/src/learning/engagement_tracker.py +638 -0
- package/src/learning/feature_extractor.py +461 -0
- package/src/learning/feedback_collector.py +690 -0
- package/src/learning/learning_db.py +842 -0
- package/src/learning/project_context_manager.py +582 -0
- package/src/learning/source_quality_scorer.py +685 -0
- package/src/learning/synthetic_bootstrap.py +1047 -0
- package/src/learning/tests/__init__.py +0 -0
- package/src/learning/tests/test_adaptive_ranker.py +328 -0
- package/src/learning/tests/test_aggregator.py +309 -0
- package/src/learning/tests/test_feedback_collector.py +295 -0
- package/src/learning/tests/test_learning_db.py +606 -0
- package/src/learning/tests/test_project_context.py +296 -0
- package/src/learning/tests/test_source_quality.py +355 -0
- package/src/learning/tests/test_synthetic_bootstrap.py +433 -0
- package/src/learning/tests/test_workflow_miner.py +322 -0
- package/src/learning/workflow_pattern_miner.py +665 -0
- package/ui/index.html +346 -13
- package/ui/js/clusters.js +90 -1
- package/ui/js/graph-core.js +445 -0
- package/ui/js/graph-cytoscape-monolithic-backup.js +1168 -0
- package/ui/js/graph-cytoscape.js +1168 -0
- package/ui/js/graph-d3-backup.js +32 -0
- package/ui/js/graph-filters.js +220 -0
- package/ui/js/graph-interactions.js +354 -0
- package/ui/js/graph-ui.js +214 -0
- package/ui/js/memories.js +52 -0
- package/ui/js/modal.js +104 -1
package/mcp_server.py
CHANGED
|
@@ -63,6 +63,15 @@ try:
|
|
|
63
63
|
except ImportError:
|
|
64
64
|
TRUST_AVAILABLE = False
|
|
65
65
|
|
|
66
|
+
# Learning System (v2.7+)
|
|
67
|
+
try:
|
|
68
|
+
sys.path.insert(0, str(Path(__file__).parent / "src"))
|
|
69
|
+
from learning import get_learning_db, get_adaptive_ranker, get_feedback_collector, get_engagement_tracker, get_status as get_learning_status
|
|
70
|
+
from learning import FULL_LEARNING_AVAILABLE, ML_RANKING_AVAILABLE
|
|
71
|
+
LEARNING_AVAILABLE = True
|
|
72
|
+
except ImportError:
|
|
73
|
+
LEARNING_AVAILABLE = False
|
|
74
|
+
|
|
66
75
|
def _sanitize_error(error: Exception) -> str:
|
|
67
76
|
"""Strip internal paths and structure from error messages."""
|
|
68
77
|
msg = str(error)
|
|
@@ -165,6 +174,18 @@ def get_trust_scorer():
|
|
|
165
174
|
return _trust_scorer
|
|
166
175
|
|
|
167
176
|
|
|
177
|
+
def get_learning_components():
|
|
178
|
+
"""Get learning system components. Returns None if unavailable."""
|
|
179
|
+
if not LEARNING_AVAILABLE:
|
|
180
|
+
return None
|
|
181
|
+
return {
|
|
182
|
+
'db': get_learning_db(),
|
|
183
|
+
'ranker': get_adaptive_ranker(),
|
|
184
|
+
'feedback': get_feedback_collector(),
|
|
185
|
+
'engagement': get_engagement_tracker(),
|
|
186
|
+
}
|
|
187
|
+
|
|
188
|
+
|
|
168
189
|
def _register_mcp_agent(agent_name: str = "mcp-client"):
|
|
169
190
|
"""Register the calling MCP agent and record activity. Non-blocking."""
|
|
170
191
|
registry = get_agent_registry()
|
|
@@ -335,6 +356,27 @@ async def recall(
|
|
|
335
356
|
else:
|
|
336
357
|
results = store.search(query, limit=limit)
|
|
337
358
|
|
|
359
|
+
# v2.7: Learning-based re-ranking (optional, graceful fallback)
|
|
360
|
+
if LEARNING_AVAILABLE:
|
|
361
|
+
try:
|
|
362
|
+
ranker = get_adaptive_ranker()
|
|
363
|
+
if ranker:
|
|
364
|
+
results = ranker.rerank(results, query)
|
|
365
|
+
except Exception:
|
|
366
|
+
pass # Re-ranking failure must never break recall
|
|
367
|
+
|
|
368
|
+
# Track recall for passive feedback decay
|
|
369
|
+
if LEARNING_AVAILABLE:
|
|
370
|
+
try:
|
|
371
|
+
feedback = get_feedback_collector()
|
|
372
|
+
if feedback:
|
|
373
|
+
feedback.record_recall_results(query, [r.get('id') for r in results if r.get('id')])
|
|
374
|
+
tracker = get_engagement_tracker()
|
|
375
|
+
if tracker:
|
|
376
|
+
tracker.record_activity('recall_performed', source='mcp')
|
|
377
|
+
except Exception:
|
|
378
|
+
pass # Tracking failure must never break recall
|
|
379
|
+
|
|
338
380
|
# Filter by minimum score
|
|
339
381
|
filtered_results = [
|
|
340
382
|
r for r in results
|
|
@@ -591,6 +633,212 @@ async def backup_status() -> dict:
|
|
|
591
633
|
}
|
|
592
634
|
|
|
593
635
|
|
|
636
|
+
# ============================================================================
|
|
637
|
+
# LEARNING TOOLS (v2.7 — feedback, transparency, user control)
|
|
638
|
+
# ============================================================================
|
|
639
|
+
|
|
640
|
+
@mcp.tool(annotations=ToolAnnotations(
|
|
641
|
+
readOnlyHint=False,
|
|
642
|
+
destructiveHint=False,
|
|
643
|
+
openWorldHint=False,
|
|
644
|
+
))
|
|
645
|
+
async def memory_used(
|
|
646
|
+
memory_id: int,
|
|
647
|
+
query: str = "",
|
|
648
|
+
usefulness: str = "high"
|
|
649
|
+
) -> dict:
|
|
650
|
+
"""
|
|
651
|
+
Signal that a recalled memory was useful. Call this when you reference
|
|
652
|
+
or apply a memory from recall results in your response.
|
|
653
|
+
|
|
654
|
+
This helps SuperLocalMemory learn which memories are most relevant
|
|
655
|
+
and improves future recall results.
|
|
656
|
+
|
|
657
|
+
Args:
|
|
658
|
+
memory_id: ID of the useful memory
|
|
659
|
+
query: The recall query that found it (optional)
|
|
660
|
+
usefulness: How useful - "high", "medium", or "low" (default "high")
|
|
661
|
+
|
|
662
|
+
Returns:
|
|
663
|
+
{"success": bool, "message": str}
|
|
664
|
+
"""
|
|
665
|
+
try:
|
|
666
|
+
if not LEARNING_AVAILABLE:
|
|
667
|
+
return {"success": False, "message": "Learning features not available. Install: pip3 install lightgbm scipy"}
|
|
668
|
+
|
|
669
|
+
feedback = get_feedback_collector()
|
|
670
|
+
if feedback is None:
|
|
671
|
+
return {"success": False, "message": "Feedback collector not initialized"}
|
|
672
|
+
|
|
673
|
+
feedback.record_memory_used(
|
|
674
|
+
memory_id=memory_id,
|
|
675
|
+
query=query,
|
|
676
|
+
usefulness=usefulness,
|
|
677
|
+
source_tool="mcp-client",
|
|
678
|
+
)
|
|
679
|
+
|
|
680
|
+
return {
|
|
681
|
+
"success": True,
|
|
682
|
+
"message": f"Feedback recorded for memory #{memory_id} (usefulness: {usefulness})"
|
|
683
|
+
}
|
|
684
|
+
except Exception as e:
|
|
685
|
+
return {"success": False, "error": _sanitize_error(e)}
|
|
686
|
+
|
|
687
|
+
|
|
688
|
+
@mcp.tool(annotations=ToolAnnotations(
|
|
689
|
+
readOnlyHint=True,
|
|
690
|
+
destructiveHint=False,
|
|
691
|
+
openWorldHint=False,
|
|
692
|
+
))
|
|
693
|
+
async def get_learned_patterns(
|
|
694
|
+
min_confidence: float = 0.6,
|
|
695
|
+
category: str = "all"
|
|
696
|
+
) -> dict:
|
|
697
|
+
"""
|
|
698
|
+
See what SuperLocalMemory has learned about your preferences,
|
|
699
|
+
projects, and workflow patterns.
|
|
700
|
+
|
|
701
|
+
Args:
|
|
702
|
+
min_confidence: Minimum confidence threshold 0.0-1.0 (default 0.6)
|
|
703
|
+
category: Filter by "tech", "workflow", "project", or "all" (default "all")
|
|
704
|
+
|
|
705
|
+
Returns:
|
|
706
|
+
{
|
|
707
|
+
"success": bool,
|
|
708
|
+
"patterns": {
|
|
709
|
+
"tech_preferences": [...],
|
|
710
|
+
"workflow_patterns": [...],
|
|
711
|
+
},
|
|
712
|
+
"ranking_phase": str,
|
|
713
|
+
"feedback_count": int
|
|
714
|
+
}
|
|
715
|
+
"""
|
|
716
|
+
try:
|
|
717
|
+
if not LEARNING_AVAILABLE:
|
|
718
|
+
return {"success": False, "message": "Learning features not available. Install: pip3 install lightgbm scipy", "patterns": {}}
|
|
719
|
+
|
|
720
|
+
ldb = get_learning_db()
|
|
721
|
+
if ldb is None:
|
|
722
|
+
return {"success": False, "message": "Learning database not initialized", "patterns": {}}
|
|
723
|
+
|
|
724
|
+
result = {"success": True, "patterns": {}}
|
|
725
|
+
|
|
726
|
+
# Tech preferences (Layer 1)
|
|
727
|
+
if category in ("all", "tech"):
|
|
728
|
+
patterns = ldb.get_transferable_patterns(min_confidence=min_confidence)
|
|
729
|
+
result["patterns"]["tech_preferences"] = [
|
|
730
|
+
{
|
|
731
|
+
"id": p["id"],
|
|
732
|
+
"type": p["pattern_type"],
|
|
733
|
+
"key": p["key"],
|
|
734
|
+
"value": p["value"],
|
|
735
|
+
"confidence": round(p["confidence"], 2),
|
|
736
|
+
"evidence": p["evidence_count"],
|
|
737
|
+
"profiles_seen": p["profiles_seen"],
|
|
738
|
+
}
|
|
739
|
+
for p in patterns
|
|
740
|
+
]
|
|
741
|
+
|
|
742
|
+
# Workflow patterns (Layer 3)
|
|
743
|
+
if category in ("all", "workflow"):
|
|
744
|
+
workflows = ldb.get_workflow_patterns(min_confidence=min_confidence)
|
|
745
|
+
result["patterns"]["workflow_patterns"] = [
|
|
746
|
+
{
|
|
747
|
+
"id": p["id"],
|
|
748
|
+
"type": p["pattern_type"],
|
|
749
|
+
"key": p["pattern_key"],
|
|
750
|
+
"value": p["pattern_value"],
|
|
751
|
+
"confidence": round(p["confidence"], 2),
|
|
752
|
+
}
|
|
753
|
+
for p in workflows
|
|
754
|
+
]
|
|
755
|
+
|
|
756
|
+
# Ranking phase info
|
|
757
|
+
ranker = get_adaptive_ranker()
|
|
758
|
+
if ranker:
|
|
759
|
+
result["ranking_phase"] = ranker.get_phase()
|
|
760
|
+
result["feedback_count"] = ldb.get_feedback_count()
|
|
761
|
+
|
|
762
|
+
# Learning stats
|
|
763
|
+
result["stats"] = ldb.get_stats()
|
|
764
|
+
|
|
765
|
+
return result
|
|
766
|
+
except Exception as e:
|
|
767
|
+
return {"success": False, "error": _sanitize_error(e), "patterns": {}}
|
|
768
|
+
|
|
769
|
+
|
|
770
|
+
@mcp.tool(annotations=ToolAnnotations(
|
|
771
|
+
readOnlyHint=False,
|
|
772
|
+
destructiveHint=False,
|
|
773
|
+
openWorldHint=False,
|
|
774
|
+
))
|
|
775
|
+
async def correct_pattern(
|
|
776
|
+
pattern_id: int,
|
|
777
|
+
correct_value: str,
|
|
778
|
+
reason: str = ""
|
|
779
|
+
) -> dict:
|
|
780
|
+
"""
|
|
781
|
+
Correct a learned pattern that is wrong. Use get_learned_patterns first
|
|
782
|
+
to see pattern IDs.
|
|
783
|
+
|
|
784
|
+
Args:
|
|
785
|
+
pattern_id: ID of the pattern to correct
|
|
786
|
+
correct_value: The correct value (e.g., "Vue" instead of "React")
|
|
787
|
+
reason: Why the correction (optional)
|
|
788
|
+
|
|
789
|
+
Returns:
|
|
790
|
+
{"success": bool, "message": str}
|
|
791
|
+
"""
|
|
792
|
+
try:
|
|
793
|
+
if not LEARNING_AVAILABLE:
|
|
794
|
+
return {"success": False, "message": "Learning features not available"}
|
|
795
|
+
|
|
796
|
+
ldb = get_learning_db()
|
|
797
|
+
if ldb is None:
|
|
798
|
+
return {"success": False, "message": "Learning database not initialized"}
|
|
799
|
+
|
|
800
|
+
# Get existing pattern
|
|
801
|
+
conn = ldb._get_connection()
|
|
802
|
+
try:
|
|
803
|
+
cursor = conn.cursor()
|
|
804
|
+
cursor.execute('SELECT * FROM transferable_patterns WHERE id = ?', (pattern_id,))
|
|
805
|
+
pattern = cursor.fetchone()
|
|
806
|
+
if not pattern:
|
|
807
|
+
return {"success": False, "message": f"Pattern #{pattern_id} not found"}
|
|
808
|
+
|
|
809
|
+
old_value = pattern['value']
|
|
810
|
+
|
|
811
|
+
# Update the pattern with correction
|
|
812
|
+
ldb.upsert_transferable_pattern(
|
|
813
|
+
pattern_type=pattern['pattern_type'],
|
|
814
|
+
key=pattern['key'],
|
|
815
|
+
value=correct_value,
|
|
816
|
+
confidence=1.0, # User correction = maximum confidence
|
|
817
|
+
evidence_count=pattern['evidence_count'] + 1,
|
|
818
|
+
profiles_seen=pattern['profiles_seen'],
|
|
819
|
+
contradictions=[f"Corrected from '{old_value}' to '{correct_value}': {reason}"],
|
|
820
|
+
)
|
|
821
|
+
|
|
822
|
+
# Record as negative feedback for the old value
|
|
823
|
+
feedback = get_feedback_collector()
|
|
824
|
+
if feedback:
|
|
825
|
+
feedback.record_memory_used(
|
|
826
|
+
memory_id=0, # No specific memory
|
|
827
|
+
query=f"correction:{pattern['key']}",
|
|
828
|
+
usefulness="low",
|
|
829
|
+
source_tool="mcp-correction",
|
|
830
|
+
)
|
|
831
|
+
|
|
832
|
+
return {
|
|
833
|
+
"success": True,
|
|
834
|
+
"message": f"Pattern '{pattern['key']}' corrected: '{old_value}' → '{correct_value}'"
|
|
835
|
+
}
|
|
836
|
+
finally:
|
|
837
|
+
conn.close()
|
|
838
|
+
except Exception as e:
|
|
839
|
+
return {"success": False, "error": _sanitize_error(e)}
|
|
840
|
+
|
|
841
|
+
|
|
594
842
|
# ============================================================================
|
|
595
843
|
# CHATGPT CONNECTOR TOOLS (search + fetch — required by OpenAI MCP spec)
|
|
596
844
|
# These two tools are required for ChatGPT Connectors and Deep Research.
|
|
@@ -758,6 +1006,41 @@ async def get_coding_identity_resource() -> str:
|
|
|
758
1006
|
return json.dumps({"error": _sanitize_error(e)}, indent=2)
|
|
759
1007
|
|
|
760
1008
|
|
|
1009
|
+
@mcp.resource("memory://learning/status")
|
|
1010
|
+
async def get_learning_status_resource() -> str:
|
|
1011
|
+
"""
|
|
1012
|
+
Resource: Get learning system status.
|
|
1013
|
+
|
|
1014
|
+
Usage: memory://learning/status
|
|
1015
|
+
"""
|
|
1016
|
+
try:
|
|
1017
|
+
if not LEARNING_AVAILABLE:
|
|
1018
|
+
return json.dumps({"available": False, "message": "Learning deps not installed"}, indent=2)
|
|
1019
|
+
status = get_learning_status()
|
|
1020
|
+
return json.dumps(status, indent=2)
|
|
1021
|
+
except Exception as e:
|
|
1022
|
+
return json.dumps({"error": _sanitize_error(e)}, indent=2)
|
|
1023
|
+
|
|
1024
|
+
|
|
1025
|
+
@mcp.resource("memory://engagement")
|
|
1026
|
+
async def get_engagement_resource() -> str:
|
|
1027
|
+
"""
|
|
1028
|
+
Resource: Get engagement metrics.
|
|
1029
|
+
|
|
1030
|
+
Usage: memory://engagement
|
|
1031
|
+
"""
|
|
1032
|
+
try:
|
|
1033
|
+
if not LEARNING_AVAILABLE:
|
|
1034
|
+
return json.dumps({"available": False}, indent=2)
|
|
1035
|
+
tracker = get_engagement_tracker()
|
|
1036
|
+
if tracker:
|
|
1037
|
+
stats = tracker.get_engagement_stats()
|
|
1038
|
+
return json.dumps(stats, indent=2)
|
|
1039
|
+
return json.dumps({"available": False}, indent=2)
|
|
1040
|
+
except Exception as e:
|
|
1041
|
+
return json.dumps({"error": _sanitize_error(e)}, indent=2)
|
|
1042
|
+
|
|
1043
|
+
|
|
761
1044
|
# ============================================================================
|
|
762
1045
|
# MCP PROMPTS (Template injection)
|
|
763
1046
|
# ============================================================================
|
|
@@ -867,7 +1150,7 @@ if __name__ == "__main__":
|
|
|
867
1150
|
# Print startup message to stderr (stdout is used for MCP protocol)
|
|
868
1151
|
print("=" * 60, file=sys.stderr)
|
|
869
1152
|
print("SuperLocalMemory V2 - MCP Server", file=sys.stderr)
|
|
870
|
-
print("Version: 2.
|
|
1153
|
+
print("Version: 2.7.0", file=sys.stderr)
|
|
871
1154
|
print("=" * 60, file=sys.stderr)
|
|
872
1155
|
print("Created by: Varun Pratap Bhardwaj (Solution Architect)", file=sys.stderr)
|
|
873
1156
|
print("Repository: https://github.com/varun369/SuperLocalMemoryV2", file=sys.stderr)
|
|
@@ -891,12 +1174,19 @@ if __name__ == "__main__":
|
|
|
891
1174
|
print(" - build_graph()", file=sys.stderr)
|
|
892
1175
|
print(" - switch_profile(name)", file=sys.stderr)
|
|
893
1176
|
print(" - backup_status() [Auto-Backup]", file=sys.stderr)
|
|
1177
|
+
if LEARNING_AVAILABLE:
|
|
1178
|
+
print(" - memory_used(memory_id, query, usefulness) [v2.7 Learning]", file=sys.stderr)
|
|
1179
|
+
print(" - get_learned_patterns(min_confidence, category) [v2.7 Learning]", file=sys.stderr)
|
|
1180
|
+
print(" - correct_pattern(pattern_id, correct_value) [v2.7 Learning]", file=sys.stderr)
|
|
894
1181
|
print("", file=sys.stderr)
|
|
895
1182
|
print("MCP Resources Available:", file=sys.stderr)
|
|
896
1183
|
print(" - memory://recent/{limit}", file=sys.stderr)
|
|
897
1184
|
print(" - memory://stats", file=sys.stderr)
|
|
898
1185
|
print(" - memory://graph/clusters", file=sys.stderr)
|
|
899
1186
|
print(" - memory://patterns/identity", file=sys.stderr)
|
|
1187
|
+
if LEARNING_AVAILABLE:
|
|
1188
|
+
print(" - memory://learning/status", file=sys.stderr)
|
|
1189
|
+
print(" - memory://engagement", file=sys.stderr)
|
|
900
1190
|
print("", file=sys.stderr)
|
|
901
1191
|
print("MCP Prompts Available:", file=sys.stderr)
|
|
902
1192
|
print(" - coding_identity_prompt()", file=sys.stderr)
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "superlocalmemory",
|
|
3
|
-
"version": "2.
|
|
3
|
+
"version": "2.7.0",
|
|
4
4
|
"description": "Your AI Finally Remembers You - Local-first intelligent memory system for AI assistants. Works with Claude, Cursor, Windsurf, VS Code/Copilot, Codex, and 17+ AI tools. 100% local, zero cloud dependencies.",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"ai-memory",
|
|
@@ -76,6 +76,7 @@
|
|
|
76
76
|
"docs/",
|
|
77
77
|
"requirements.txt",
|
|
78
78
|
"requirements-core.txt",
|
|
79
|
+
"requirements-learning.txt",
|
|
79
80
|
"ui/",
|
|
80
81
|
"ui_server.py",
|
|
81
82
|
"api_server.py"
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
# SuperLocalMemory v2.7 - Learning Dependencies
|
|
2
|
+
# ============================================================================
|
|
3
|
+
# Optional but recommended. Enables intelligent pattern learning and
|
|
4
|
+
# personalized recall ranking.
|
|
5
|
+
#
|
|
6
|
+
# If installation fails, core features work normally (v2.6 behavior).
|
|
7
|
+
# To install: pip3 install -r requirements-learning.txt
|
|
8
|
+
#
|
|
9
|
+
# Download size: ~30MB
|
|
10
|
+
# ============================================================================
|
|
11
|
+
lightgbm>=4.0.0
|
|
12
|
+
scipy>=1.9.0
|
|
@@ -0,0 +1,233 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
# ============================================================================
|
|
3
|
+
# SuperLocalMemory V2.7 — Quick Verification Script
|
|
4
|
+
# Copyright (c) 2026 Varun Pratap Bhardwaj
|
|
5
|
+
# Licensed under MIT License
|
|
6
|
+
# Repository: https://github.com/varun369/SuperLocalMemoryV2
|
|
7
|
+
#
|
|
8
|
+
# Run this after installation to verify everything works:
|
|
9
|
+
# bash scripts/verify-v27.sh
|
|
10
|
+
# ============================================================================
|
|
11
|
+
|
|
12
|
+
INSTALL_DIR="${HOME}/.claude-memory"
|
|
13
|
+
PASS=0
|
|
14
|
+
WARN=0
|
|
15
|
+
FAIL=0
|
|
16
|
+
|
|
17
|
+
echo ""
|
|
18
|
+
echo "SuperLocalMemory v2.7 Verification"
|
|
19
|
+
echo "==================================="
|
|
20
|
+
echo ""
|
|
21
|
+
|
|
22
|
+
# ── Check 1: Installation directory ──────────────────────────────────────────
|
|
23
|
+
if [ -d "$INSTALL_DIR" ]; then
|
|
24
|
+
echo "[PASS] Installation directory exists: $INSTALL_DIR"
|
|
25
|
+
PASS=$((PASS + 1))
|
|
26
|
+
else
|
|
27
|
+
echo "[FAIL] Installation directory missing. Run install.sh first."
|
|
28
|
+
FAIL=$((FAIL + 1))
|
|
29
|
+
echo ""
|
|
30
|
+
echo "==================================="
|
|
31
|
+
echo "Result: FAIL — SuperLocalMemory is not installed."
|
|
32
|
+
echo "Run: bash install.sh"
|
|
33
|
+
exit 1
|
|
34
|
+
fi
|
|
35
|
+
|
|
36
|
+
# ── Check 2: Core modules ────────────────────────────────────────────────────
|
|
37
|
+
echo ""
|
|
38
|
+
echo "Core Modules:"
|
|
39
|
+
for mod in memory_store_v2.py graph_engine.py pattern_learner.py mcp_server.py tree_manager.py; do
|
|
40
|
+
if [ -f "$INSTALL_DIR/$mod" ]; then
|
|
41
|
+
echo " [PASS] $mod"
|
|
42
|
+
PASS=$((PASS + 1))
|
|
43
|
+
else
|
|
44
|
+
echo " [FAIL] Missing: $mod"
|
|
45
|
+
FAIL=$((FAIL + 1))
|
|
46
|
+
fi
|
|
47
|
+
done
|
|
48
|
+
|
|
49
|
+
# ── Check 3: v2.5 modules ────────────────────────────────────────────────────
|
|
50
|
+
echo ""
|
|
51
|
+
echo "Event System (v2.5):"
|
|
52
|
+
for mod in event_bus.py subscription_manager.py webhook_dispatcher.py agent_registry.py provenance_tracker.py trust_scorer.py db_connection_manager.py; do
|
|
53
|
+
if [ -f "$INSTALL_DIR/$mod" ]; then
|
|
54
|
+
echo " [PASS] $mod"
|
|
55
|
+
PASS=$((PASS + 1))
|
|
56
|
+
else
|
|
57
|
+
echo " [WARN] Missing: $mod (v2.5 feature)"
|
|
58
|
+
WARN=$((WARN + 1))
|
|
59
|
+
fi
|
|
60
|
+
done
|
|
61
|
+
|
|
62
|
+
# ── Check 4: Learning modules (v2.7) ─────────────────────────────────────────
|
|
63
|
+
echo ""
|
|
64
|
+
echo "Learning System (v2.7):"
|
|
65
|
+
if [ -d "$INSTALL_DIR/learning" ]; then
|
|
66
|
+
echo " [PASS] learning/ directory exists"
|
|
67
|
+
PASS=$((PASS + 1))
|
|
68
|
+
|
|
69
|
+
for mod in __init__.py learning_db.py adaptive_ranker.py feedback_collector.py \
|
|
70
|
+
engagement_tracker.py cross_project_aggregator.py project_context_manager.py \
|
|
71
|
+
workflow_pattern_miner.py source_quality_scorer.py synthetic_bootstrap.py \
|
|
72
|
+
feature_extractor.py; do
|
|
73
|
+
if [ -f "$INSTALL_DIR/learning/$mod" ]; then
|
|
74
|
+
echo " [PASS] learning/$mod"
|
|
75
|
+
PASS=$((PASS + 1))
|
|
76
|
+
else
|
|
77
|
+
echo " [WARN] Missing: learning/$mod"
|
|
78
|
+
WARN=$((WARN + 1))
|
|
79
|
+
fi
|
|
80
|
+
done
|
|
81
|
+
else
|
|
82
|
+
echo " [FAIL] learning/ directory missing (v2.7 not fully installed)"
|
|
83
|
+
FAIL=$((FAIL + 1))
|
|
84
|
+
fi
|
|
85
|
+
|
|
86
|
+
# ── Check 5: Learning dependencies ───────────────────────────────────────────
|
|
87
|
+
echo ""
|
|
88
|
+
echo "Learning Dependencies:"
|
|
89
|
+
python3 -c "import lightgbm; print(f' [PASS] LightGBM {lightgbm.__version__}')" 2>/dev/null || {
|
|
90
|
+
echo " [INFO] LightGBM not installed (optional — rule-based ranking will be used)"
|
|
91
|
+
WARN=$((WARN + 1))
|
|
92
|
+
}
|
|
93
|
+
python3 -c "import scipy; print(f' [PASS] SciPy {scipy.__version__}')" 2>/dev/null || {
|
|
94
|
+
echo " [INFO] SciPy not installed (optional — install for full learning features)"
|
|
95
|
+
WARN=$((WARN + 1))
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
# ── Check 6: Core dependencies ───────────────────────────────────────────────
|
|
99
|
+
echo ""
|
|
100
|
+
echo "Core Dependencies:"
|
|
101
|
+
python3 -c "import sklearn; print(f' [PASS] scikit-learn {sklearn.__version__}')" 2>/dev/null || {
|
|
102
|
+
echo " [WARN] scikit-learn not installed (needed for knowledge graph)"
|
|
103
|
+
WARN=$((WARN + 1))
|
|
104
|
+
}
|
|
105
|
+
python3 -c "import numpy; print(f' [PASS] numpy {numpy.__version__}')" 2>/dev/null || {
|
|
106
|
+
echo " [WARN] numpy not installed"
|
|
107
|
+
WARN=$((WARN + 1))
|
|
108
|
+
}
|
|
109
|
+
python3 -c "import igraph; print(f' [PASS] python-igraph {igraph.__version__}')" 2>/dev/null || {
|
|
110
|
+
echo " [WARN] python-igraph not installed (needed for graph clustering)"
|
|
111
|
+
WARN=$((WARN + 1))
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
# ── Check 7: Database ────────────────────────────────────────────────────────
|
|
115
|
+
echo ""
|
|
116
|
+
echo "Databases:"
|
|
117
|
+
if [ -f "$INSTALL_DIR/memory.db" ]; then
|
|
118
|
+
MEMORY_COUNT=$(sqlite3 "$INSTALL_DIR/memory.db" "SELECT COUNT(*) FROM memories;" 2>/dev/null || echo "0")
|
|
119
|
+
DB_SIZE=$(du -h "$INSTALL_DIR/memory.db" 2>/dev/null | cut -f1)
|
|
120
|
+
echo " [PASS] memory.db exists ($MEMORY_COUNT memories, $DB_SIZE)"
|
|
121
|
+
PASS=$((PASS + 1))
|
|
122
|
+
else
|
|
123
|
+
echo " [INFO] memory.db not yet created (will auto-create on first use)"
|
|
124
|
+
fi
|
|
125
|
+
|
|
126
|
+
if [ -f "$INSTALL_DIR/learning.db" ]; then
|
|
127
|
+
FEEDBACK_COUNT=$(sqlite3 "$INSTALL_DIR/learning.db" "SELECT COUNT(*) FROM ranking_feedback;" 2>/dev/null || echo "0")
|
|
128
|
+
echo " [PASS] learning.db exists ($FEEDBACK_COUNT feedback signals)"
|
|
129
|
+
PASS=$((PASS + 1))
|
|
130
|
+
else
|
|
131
|
+
echo " [INFO] learning.db not yet created (will auto-create on first recall)"
|
|
132
|
+
fi
|
|
133
|
+
|
|
134
|
+
# ── Check 8: CLI ──────────────────────────────────────────────────────────────
|
|
135
|
+
echo ""
|
|
136
|
+
echo "CLI:"
|
|
137
|
+
if command -v slm &> /dev/null; then
|
|
138
|
+
echo " [PASS] slm command available in PATH"
|
|
139
|
+
PASS=$((PASS + 1))
|
|
140
|
+
else
|
|
141
|
+
if [ -f "$INSTALL_DIR/bin/slm" ]; then
|
|
142
|
+
echo " [WARN] slm exists at $INSTALL_DIR/bin/slm but not in PATH"
|
|
143
|
+
echo " Add to PATH: export PATH=\"\$HOME/.claude-memory/bin:\$PATH\""
|
|
144
|
+
WARN=$((WARN + 1))
|
|
145
|
+
else
|
|
146
|
+
echo " [FAIL] slm command not found"
|
|
147
|
+
FAIL=$((FAIL + 1))
|
|
148
|
+
fi
|
|
149
|
+
fi
|
|
150
|
+
|
|
151
|
+
# ── Check 9: MCP server ──────────────────────────────────────────────────────
|
|
152
|
+
echo ""
|
|
153
|
+
echo "MCP Server:"
|
|
154
|
+
if [ -f "$INSTALL_DIR/mcp_server.py" ]; then
|
|
155
|
+
echo " [PASS] mcp_server.py installed"
|
|
156
|
+
PASS=$((PASS + 1))
|
|
157
|
+
else
|
|
158
|
+
echo " [FAIL] mcp_server.py missing"
|
|
159
|
+
FAIL=$((FAIL + 1))
|
|
160
|
+
fi
|
|
161
|
+
|
|
162
|
+
if python3 -c "from mcp.server.fastmcp import FastMCP" 2>/dev/null; then
|
|
163
|
+
echo " [PASS] MCP SDK installed"
|
|
164
|
+
PASS=$((PASS + 1))
|
|
165
|
+
else
|
|
166
|
+
echo " [WARN] MCP SDK not installed (install: pip3 install mcp)"
|
|
167
|
+
WARN=$((WARN + 1))
|
|
168
|
+
fi
|
|
169
|
+
|
|
170
|
+
# ── Check 10: Import chain verification ───────────────────────────────────────
|
|
171
|
+
echo ""
|
|
172
|
+
echo "Import Chain:"
|
|
173
|
+
IMPORT_RESULT=$(python3 -c "
|
|
174
|
+
import sys
|
|
175
|
+
sys.path.insert(0, '$INSTALL_DIR')
|
|
176
|
+
try:
|
|
177
|
+
from learning import get_learning_db, get_status, FULL_LEARNING_AVAILABLE, ML_RANKING_AVAILABLE
|
|
178
|
+
status = get_status()
|
|
179
|
+
ml = 'yes' if status['ml_ranking_available'] else 'no'
|
|
180
|
+
full = 'yes' if status['learning_available'] else 'no'
|
|
181
|
+
print(f'OK ml_ranking={ml} full_learning={full}')
|
|
182
|
+
except ImportError as e:
|
|
183
|
+
print(f'IMPORT_ERROR {e}')
|
|
184
|
+
except Exception as e:
|
|
185
|
+
print(f'ERROR {e}')
|
|
186
|
+
" 2>&1)
|
|
187
|
+
|
|
188
|
+
if [[ "$IMPORT_RESULT" == OK* ]]; then
|
|
189
|
+
echo " [PASS] Learning system imports successfully"
|
|
190
|
+
echo " $IMPORT_RESULT"
|
|
191
|
+
PASS=$((PASS + 1))
|
|
192
|
+
elif [[ "$IMPORT_RESULT" == IMPORT_ERROR* ]]; then
|
|
193
|
+
echo " [WARN] Learning import failed: ${IMPORT_RESULT#IMPORT_ERROR }"
|
|
194
|
+
echo " This may be normal if learning modules are not yet installed."
|
|
195
|
+
WARN=$((WARN + 1))
|
|
196
|
+
else
|
|
197
|
+
echo " [WARN] Learning check: $IMPORT_RESULT"
|
|
198
|
+
WARN=$((WARN + 1))
|
|
199
|
+
fi
|
|
200
|
+
|
|
201
|
+
# ── Summary ───────────────────────────────────────────────────────────────────
|
|
202
|
+
echo ""
|
|
203
|
+
echo "==================================="
|
|
204
|
+
echo "Verification Summary"
|
|
205
|
+
echo " Passed: $PASS"
|
|
206
|
+
echo " Warnings: $WARN"
|
|
207
|
+
echo " Failed: $FAIL"
|
|
208
|
+
echo "==================================="
|
|
209
|
+
echo ""
|
|
210
|
+
|
|
211
|
+
if [ $FAIL -eq 0 ]; then
|
|
212
|
+
echo "Status: READY"
|
|
213
|
+
echo ""
|
|
214
|
+
echo "Quick start:"
|
|
215
|
+
echo " slm remember \"My first memory\""
|
|
216
|
+
echo " slm recall \"first\""
|
|
217
|
+
echo " slm status"
|
|
218
|
+
echo ""
|
|
219
|
+
if [ $WARN -gt 0 ]; then
|
|
220
|
+
echo "Some optional features may not be available."
|
|
221
|
+
echo "Install missing dependencies to enable them:"
|
|
222
|
+
echo " pip3 install lightgbm scipy # Learning system"
|
|
223
|
+
echo " pip3 install scikit-learn igraph # Knowledge graph"
|
|
224
|
+
echo ""
|
|
225
|
+
fi
|
|
226
|
+
else
|
|
227
|
+
echo "Status: INCOMPLETE"
|
|
228
|
+
echo ""
|
|
229
|
+
echo "Fix the failed checks above, then re-run:"
|
|
230
|
+
echo " bash scripts/verify-v27.sh"
|
|
231
|
+
echo ""
|
|
232
|
+
exit 1
|
|
233
|
+
fi
|