superlocalmemory 2.6.5 → 2.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/install.sh CHANGED
@@ -57,18 +57,87 @@ if [ "$NON_INTERACTIVE" = true ]; then
57
57
  echo ""
58
58
  fi
59
59
 
60
- # Check Python version
60
+ # Check Python version — install if missing (non-tech user friendly)
61
61
  echo "Checking Python version..."
62
+
63
+ install_python() {
64
+ echo ""
65
+ echo "Python 3 not found. Attempting automatic installation..."
66
+ if [ "$(uname)" = "Darwin" ]; then
67
+ # macOS: try Homebrew first, then Xcode CLI tools
68
+ if command -v brew &> /dev/null; then
69
+ echo "Installing Python via Homebrew..."
70
+ brew install python3 && return 0
71
+ fi
72
+ # Try installing Xcode Command Line Tools (includes Python 3)
73
+ echo "Installing Xcode Command Line Tools (includes Python 3)..."
74
+ echo "A system dialog may appear — click 'Install' to continue."
75
+ xcode-select --install 2>/dev/null
76
+ # Wait for user to complete the install dialog
77
+ echo "Waiting for Xcode CLI tools installation to complete..."
78
+ echo "Press Enter after the installation finishes."
79
+ if [ "$NON_INTERACTIVE" = false ]; then
80
+ read -r
81
+ else
82
+ # In non-interactive mode, wait and retry
83
+ sleep 30
84
+ fi
85
+ if command -v python3 &> /dev/null; then
86
+ return 0
87
+ fi
88
+ # Last resort: direct Python.org installer
89
+ echo ""
90
+ echo "Automatic installation could not complete."
91
+ echo "Please install Python 3.10+ from: https://www.python.org/downloads/"
92
+ echo "Then re-run this installer."
93
+ return 1
94
+ elif [ -f /etc/debian_version ]; then
95
+ # Debian/Ubuntu
96
+ echo "Installing Python via apt..."
97
+ sudo apt-get update -qq && sudo apt-get install -y python3 python3-pip && return 0
98
+ elif [ -f /etc/redhat-release ]; then
99
+ # RHEL/CentOS/Fedora
100
+ echo "Installing Python via dnf..."
101
+ sudo dnf install -y python3 python3-pip && return 0
102
+ elif [ -f /etc/arch-release ]; then
103
+ # Arch Linux
104
+ sudo pacman -S --noconfirm python python-pip && return 0
105
+ fi
106
+ echo "Could not auto-install Python. Please install Python 3.8+ manually."
107
+ echo " macOS: brew install python3"
108
+ echo " Ubuntu: sudo apt install python3 python3-pip"
109
+ echo " Fedora: sudo dnf install python3 python3-pip"
110
+ return 1
111
+ }
112
+
113
+ if ! command -v python3 &> /dev/null; then
114
+ install_python || exit 1
115
+ fi
116
+
62
117
  PYTHON_VERSION=$(python3 -c 'import sys; print(f"{sys.version_info.major}.{sys.version_info.minor}")')
63
118
  PYTHON_MAJOR=$(python3 -c 'import sys; print(sys.version_info.major)')
64
119
  PYTHON_MINOR=$(python3 -c 'import sys; print(sys.version_info.minor)')
65
120
 
66
121
  if [ "$PYTHON_MAJOR" -lt 3 ] || ([ "$PYTHON_MAJOR" -eq 3 ] && [ "$PYTHON_MINOR" -lt 8 ]); then
67
- echo " Error: Python 3.8+ required (found $PYTHON_VERSION)"
68
- exit 1
122
+ echo "Python $PYTHON_VERSION found but 3.8+ required."
123
+ install_python || exit 1
124
+ # Re-check after install
125
+ PYTHON_VERSION=$(python3 -c 'import sys; print(f"{sys.version_info.major}.{sys.version_info.minor}")')
126
+ PYTHON_MAJOR=$(python3 -c 'import sys; print(sys.version_info.major)')
127
+ PYTHON_MINOR=$(python3 -c 'import sys; print(sys.version_info.minor)')
128
+ if [ "$PYTHON_MAJOR" -lt 3 ] || ([ "$PYTHON_MAJOR" -eq 3 ] && [ "$PYTHON_MINOR" -lt 8 ]); then
129
+ echo "✗ Error: Python 3.8+ still not available after install attempt"
130
+ exit 1
131
+ fi
69
132
  fi
70
133
  echo "✓ Python $PYTHON_VERSION"
71
134
 
135
+ # Ensure pip3 is available
136
+ if ! command -v pip3 &> /dev/null; then
137
+ echo "Installing pip..."
138
+ python3 -m ensurepip --upgrade 2>/dev/null || python3 -c "import urllib.request; urllib.request.urlretrieve('https://bootstrap.pypa.io/get-pip.py', '/tmp/get-pip.py')" && python3 /tmp/get-pip.py 2>/dev/null || true
139
+ fi
140
+
72
141
  # Create installation directory
73
142
  echo ""
74
143
  echo "Creating installation directory..."
@@ -88,6 +157,13 @@ echo "Copying source files..."
88
157
  cp -r "${REPO_DIR}/src/"* "${INSTALL_DIR}/"
89
158
  echo "✓ Source files copied"
90
159
 
160
+ # Copy learning modules explicitly (v2.7+ — ensures nested dir is handled)
161
+ if [ -d "${REPO_DIR}/src/learning" ]; then
162
+ mkdir -p "${INSTALL_DIR}/learning"
163
+ cp -r "${REPO_DIR}/src/learning/"* "${INSTALL_DIR}/learning/"
164
+ echo "✓ Learning modules copied"
165
+ fi
166
+
91
167
  # Copy hooks
92
168
  echo "Copying hooks..."
93
169
  mkdir -p "${INSTALL_DIR}/hooks"
@@ -205,6 +281,22 @@ else
205
281
  echo "⚠️ requirements-core.txt not found, skipping dependency installation"
206
282
  fi
207
283
 
284
+ # Install learning dependencies (v2.7+)
285
+ echo ""
286
+ echo "Installing learning dependencies..."
287
+ echo " Enables intelligent pattern learning and personalized recall"
288
+
289
+ if [ -f "${REPO_DIR}/requirements-learning.txt" ]; then
290
+ if pip3 install $PIP_FLAGS -q -r "${REPO_DIR}/requirements-learning.txt" 2>/dev/null; then
291
+ echo "✓ Learning dependencies installed (personalized ranking enabled)"
292
+ else
293
+ echo "○ Learning dependencies skipped (core features unaffected)"
294
+ echo " To install later: pip3 install lightgbm scipy"
295
+ fi
296
+ else
297
+ echo "○ requirements-learning.txt not found (learning features will use rule-based ranking)"
298
+ fi
299
+
208
300
  # Initialize knowledge graph and pattern learning
209
301
  echo ""
210
302
  echo "Initializing advanced features..."
@@ -677,6 +769,10 @@ echo " slm status"
677
769
  echo " slm remember 'My first memory'"
678
770
  echo " slm recall 'first'"
679
771
  echo ""
772
+ echo "Learning System (v2.7+):"
773
+ echo " slm learning status - Check learning system"
774
+ echo " slm engagement - View engagement metrics"
775
+ echo ""
680
776
  # Optional: Offer to install optional features
681
777
  if [ "$NON_INTERACTIVE" = true ]; then
682
778
  INSTALL_CHOICE="N"
package/mcp_server.py CHANGED
@@ -63,6 +63,15 @@ try:
63
63
  except ImportError:
64
64
  TRUST_AVAILABLE = False
65
65
 
66
+ # Learning System (v2.7+)
67
+ try:
68
+ sys.path.insert(0, str(Path(__file__).parent / "src"))
69
+ from learning import get_learning_db, get_adaptive_ranker, get_feedback_collector, get_engagement_tracker, get_status as get_learning_status
70
+ from learning import FULL_LEARNING_AVAILABLE, ML_RANKING_AVAILABLE
71
+ LEARNING_AVAILABLE = True
72
+ except ImportError:
73
+ LEARNING_AVAILABLE = False
74
+
66
75
  def _sanitize_error(error: Exception) -> str:
67
76
  """Strip internal paths and structure from error messages."""
68
77
  msg = str(error)
@@ -165,6 +174,18 @@ def get_trust_scorer():
165
174
  return _trust_scorer
166
175
 
167
176
 
177
+ def get_learning_components():
178
+ """Get learning system components. Returns None if unavailable."""
179
+ if not LEARNING_AVAILABLE:
180
+ return None
181
+ return {
182
+ 'db': get_learning_db(),
183
+ 'ranker': get_adaptive_ranker(),
184
+ 'feedback': get_feedback_collector(),
185
+ 'engagement': get_engagement_tracker(),
186
+ }
187
+
188
+
168
189
  def _register_mcp_agent(agent_name: str = "mcp-client"):
169
190
  """Register the calling MCP agent and record activity. Non-blocking."""
170
191
  registry = get_agent_registry()
@@ -335,6 +356,27 @@ async def recall(
335
356
  else:
336
357
  results = store.search(query, limit=limit)
337
358
 
359
+ # v2.7: Learning-based re-ranking (optional, graceful fallback)
360
+ if LEARNING_AVAILABLE:
361
+ try:
362
+ ranker = get_adaptive_ranker()
363
+ if ranker:
364
+ results = ranker.rerank(results, query)
365
+ except Exception:
366
+ pass # Re-ranking failure must never break recall
367
+
368
+ # Track recall for passive feedback decay
369
+ if LEARNING_AVAILABLE:
370
+ try:
371
+ feedback = get_feedback_collector()
372
+ if feedback:
373
+ feedback.record_recall_results(query, [r.get('id') for r in results if r.get('id')])
374
+ tracker = get_engagement_tracker()
375
+ if tracker:
376
+ tracker.record_activity('recall_performed', source='mcp')
377
+ except Exception:
378
+ pass # Tracking failure must never break recall
379
+
338
380
  # Filter by minimum score
339
381
  filtered_results = [
340
382
  r for r in results
@@ -591,6 +633,212 @@ async def backup_status() -> dict:
591
633
  }
592
634
 
593
635
 
636
+ # ============================================================================
637
+ # LEARNING TOOLS (v2.7 — feedback, transparency, user control)
638
+ # ============================================================================
639
+
640
+ @mcp.tool(annotations=ToolAnnotations(
641
+ readOnlyHint=False,
642
+ destructiveHint=False,
643
+ openWorldHint=False,
644
+ ))
645
+ async def memory_used(
646
+ memory_id: int,
647
+ query: str = "",
648
+ usefulness: str = "high"
649
+ ) -> dict:
650
+ """
651
+ Signal that a recalled memory was useful. Call this when you reference
652
+ or apply a memory from recall results in your response.
653
+
654
+ This helps SuperLocalMemory learn which memories are most relevant
655
+ and improves future recall results.
656
+
657
+ Args:
658
+ memory_id: ID of the useful memory
659
+ query: The recall query that found it (optional)
660
+ usefulness: How useful - "high", "medium", or "low" (default "high")
661
+
662
+ Returns:
663
+ {"success": bool, "message": str}
664
+ """
665
+ try:
666
+ if not LEARNING_AVAILABLE:
667
+ return {"success": False, "message": "Learning features not available. Install: pip3 install lightgbm scipy"}
668
+
669
+ feedback = get_feedback_collector()
670
+ if feedback is None:
671
+ return {"success": False, "message": "Feedback collector not initialized"}
672
+
673
+ feedback.record_memory_used(
674
+ memory_id=memory_id,
675
+ query=query,
676
+ usefulness=usefulness,
677
+ source_tool="mcp-client",
678
+ )
679
+
680
+ return {
681
+ "success": True,
682
+ "message": f"Feedback recorded for memory #{memory_id} (usefulness: {usefulness})"
683
+ }
684
+ except Exception as e:
685
+ return {"success": False, "error": _sanitize_error(e)}
686
+
687
+
688
+ @mcp.tool(annotations=ToolAnnotations(
689
+ readOnlyHint=True,
690
+ destructiveHint=False,
691
+ openWorldHint=False,
692
+ ))
693
+ async def get_learned_patterns(
694
+ min_confidence: float = 0.6,
695
+ category: str = "all"
696
+ ) -> dict:
697
+ """
698
+ See what SuperLocalMemory has learned about your preferences,
699
+ projects, and workflow patterns.
700
+
701
+ Args:
702
+ min_confidence: Minimum confidence threshold 0.0-1.0 (default 0.6)
703
+ category: Filter by "tech", "workflow", "project", or "all" (default "all")
704
+
705
+ Returns:
706
+ {
707
+ "success": bool,
708
+ "patterns": {
709
+ "tech_preferences": [...],
710
+ "workflow_patterns": [...],
711
+ },
712
+ "ranking_phase": str,
713
+ "feedback_count": int
714
+ }
715
+ """
716
+ try:
717
+ if not LEARNING_AVAILABLE:
718
+ return {"success": False, "message": "Learning features not available. Install: pip3 install lightgbm scipy", "patterns": {}}
719
+
720
+ ldb = get_learning_db()
721
+ if ldb is None:
722
+ return {"success": False, "message": "Learning database not initialized", "patterns": {}}
723
+
724
+ result = {"success": True, "patterns": {}}
725
+
726
+ # Tech preferences (Layer 1)
727
+ if category in ("all", "tech"):
728
+ patterns = ldb.get_transferable_patterns(min_confidence=min_confidence)
729
+ result["patterns"]["tech_preferences"] = [
730
+ {
731
+ "id": p["id"],
732
+ "type": p["pattern_type"],
733
+ "key": p["key"],
734
+ "value": p["value"],
735
+ "confidence": round(p["confidence"], 2),
736
+ "evidence": p["evidence_count"],
737
+ "profiles_seen": p["profiles_seen"],
738
+ }
739
+ for p in patterns
740
+ ]
741
+
742
+ # Workflow patterns (Layer 3)
743
+ if category in ("all", "workflow"):
744
+ workflows = ldb.get_workflow_patterns(min_confidence=min_confidence)
745
+ result["patterns"]["workflow_patterns"] = [
746
+ {
747
+ "id": p["id"],
748
+ "type": p["pattern_type"],
749
+ "key": p["pattern_key"],
750
+ "value": p["pattern_value"],
751
+ "confidence": round(p["confidence"], 2),
752
+ }
753
+ for p in workflows
754
+ ]
755
+
756
+ # Ranking phase info
757
+ ranker = get_adaptive_ranker()
758
+ if ranker:
759
+ result["ranking_phase"] = ranker.get_phase()
760
+ result["feedback_count"] = ldb.get_feedback_count()
761
+
762
+ # Learning stats
763
+ result["stats"] = ldb.get_stats()
764
+
765
+ return result
766
+ except Exception as e:
767
+ return {"success": False, "error": _sanitize_error(e), "patterns": {}}
768
+
769
+
770
+ @mcp.tool(annotations=ToolAnnotations(
771
+ readOnlyHint=False,
772
+ destructiveHint=False,
773
+ openWorldHint=False,
774
+ ))
775
+ async def correct_pattern(
776
+ pattern_id: int,
777
+ correct_value: str,
778
+ reason: str = ""
779
+ ) -> dict:
780
+ """
781
+ Correct a learned pattern that is wrong. Use get_learned_patterns first
782
+ to see pattern IDs.
783
+
784
+ Args:
785
+ pattern_id: ID of the pattern to correct
786
+ correct_value: The correct value (e.g., "Vue" instead of "React")
787
+ reason: Why the correction (optional)
788
+
789
+ Returns:
790
+ {"success": bool, "message": str}
791
+ """
792
+ try:
793
+ if not LEARNING_AVAILABLE:
794
+ return {"success": False, "message": "Learning features not available"}
795
+
796
+ ldb = get_learning_db()
797
+ if ldb is None:
798
+ return {"success": False, "message": "Learning database not initialized"}
799
+
800
+ # Get existing pattern
801
+ conn = ldb._get_connection()
802
+ try:
803
+ cursor = conn.cursor()
804
+ cursor.execute('SELECT * FROM transferable_patterns WHERE id = ?', (pattern_id,))
805
+ pattern = cursor.fetchone()
806
+ if not pattern:
807
+ return {"success": False, "message": f"Pattern #{pattern_id} not found"}
808
+
809
+ old_value = pattern['value']
810
+
811
+ # Update the pattern with correction
812
+ ldb.upsert_transferable_pattern(
813
+ pattern_type=pattern['pattern_type'],
814
+ key=pattern['key'],
815
+ value=correct_value,
816
+ confidence=1.0, # User correction = maximum confidence
817
+ evidence_count=pattern['evidence_count'] + 1,
818
+ profiles_seen=pattern['profiles_seen'],
819
+ contradictions=[f"Corrected from '{old_value}' to '{correct_value}': {reason}"],
820
+ )
821
+
822
+ # Record as negative feedback for the old value
823
+ feedback = get_feedback_collector()
824
+ if feedback:
825
+ feedback.record_memory_used(
826
+ memory_id=0, # No specific memory
827
+ query=f"correction:{pattern['key']}",
828
+ usefulness="low",
829
+ source_tool="mcp-correction",
830
+ )
831
+
832
+ return {
833
+ "success": True,
834
+ "message": f"Pattern '{pattern['key']}' corrected: '{old_value}' → '{correct_value}'"
835
+ }
836
+ finally:
837
+ conn.close()
838
+ except Exception as e:
839
+ return {"success": False, "error": _sanitize_error(e)}
840
+
841
+
594
842
  # ============================================================================
595
843
  # CHATGPT CONNECTOR TOOLS (search + fetch — required by OpenAI MCP spec)
596
844
  # These two tools are required for ChatGPT Connectors and Deep Research.
@@ -758,6 +1006,41 @@ async def get_coding_identity_resource() -> str:
758
1006
  return json.dumps({"error": _sanitize_error(e)}, indent=2)
759
1007
 
760
1008
 
1009
+ @mcp.resource("memory://learning/status")
1010
+ async def get_learning_status_resource() -> str:
1011
+ """
1012
+ Resource: Get learning system status.
1013
+
1014
+ Usage: memory://learning/status
1015
+ """
1016
+ try:
1017
+ if not LEARNING_AVAILABLE:
1018
+ return json.dumps({"available": False, "message": "Learning deps not installed"}, indent=2)
1019
+ status = get_learning_status()
1020
+ return json.dumps(status, indent=2)
1021
+ except Exception as e:
1022
+ return json.dumps({"error": _sanitize_error(e)}, indent=2)
1023
+
1024
+
1025
+ @mcp.resource("memory://engagement")
1026
+ async def get_engagement_resource() -> str:
1027
+ """
1028
+ Resource: Get engagement metrics.
1029
+
1030
+ Usage: memory://engagement
1031
+ """
1032
+ try:
1033
+ if not LEARNING_AVAILABLE:
1034
+ return json.dumps({"available": False}, indent=2)
1035
+ tracker = get_engagement_tracker()
1036
+ if tracker:
1037
+ stats = tracker.get_engagement_stats()
1038
+ return json.dumps(stats, indent=2)
1039
+ return json.dumps({"available": False}, indent=2)
1040
+ except Exception as e:
1041
+ return json.dumps({"error": _sanitize_error(e)}, indent=2)
1042
+
1043
+
761
1044
  # ============================================================================
762
1045
  # MCP PROMPTS (Template injection)
763
1046
  # ============================================================================
@@ -867,7 +1150,7 @@ if __name__ == "__main__":
867
1150
  # Print startup message to stderr (stdout is used for MCP protocol)
868
1151
  print("=" * 60, file=sys.stderr)
869
1152
  print("SuperLocalMemory V2 - MCP Server", file=sys.stderr)
870
- print("Version: 2.5.0", file=sys.stderr)
1153
+ print("Version: 2.7.0", file=sys.stderr)
871
1154
  print("=" * 60, file=sys.stderr)
872
1155
  print("Created by: Varun Pratap Bhardwaj (Solution Architect)", file=sys.stderr)
873
1156
  print("Repository: https://github.com/varun369/SuperLocalMemoryV2", file=sys.stderr)
@@ -891,12 +1174,19 @@ if __name__ == "__main__":
891
1174
  print(" - build_graph()", file=sys.stderr)
892
1175
  print(" - switch_profile(name)", file=sys.stderr)
893
1176
  print(" - backup_status() [Auto-Backup]", file=sys.stderr)
1177
+ if LEARNING_AVAILABLE:
1178
+ print(" - memory_used(memory_id, query, usefulness) [v2.7 Learning]", file=sys.stderr)
1179
+ print(" - get_learned_patterns(min_confidence, category) [v2.7 Learning]", file=sys.stderr)
1180
+ print(" - correct_pattern(pattern_id, correct_value) [v2.7 Learning]", file=sys.stderr)
894
1181
  print("", file=sys.stderr)
895
1182
  print("MCP Resources Available:", file=sys.stderr)
896
1183
  print(" - memory://recent/{limit}", file=sys.stderr)
897
1184
  print(" - memory://stats", file=sys.stderr)
898
1185
  print(" - memory://graph/clusters", file=sys.stderr)
899
1186
  print(" - memory://patterns/identity", file=sys.stderr)
1187
+ if LEARNING_AVAILABLE:
1188
+ print(" - memory://learning/status", file=sys.stderr)
1189
+ print(" - memory://engagement", file=sys.stderr)
900
1190
  print("", file=sys.stderr)
901
1191
  print("MCP Prompts Available:", file=sys.stderr)
902
1192
  print(" - coding_identity_prompt()", file=sys.stderr)
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "superlocalmemory",
3
- "version": "2.6.5",
3
+ "version": "2.7.0",
4
4
  "description": "Your AI Finally Remembers You - Local-first intelligent memory system for AI assistants. Works with Claude, Cursor, Windsurf, VS Code/Copilot, Codex, and 17+ AI tools. 100% local, zero cloud dependencies.",
5
5
  "keywords": [
6
6
  "ai-memory",
@@ -76,6 +76,7 @@
76
76
  "docs/",
77
77
  "requirements.txt",
78
78
  "requirements-core.txt",
79
+ "requirements-learning.txt",
79
80
  "ui/",
80
81
  "ui_server.py",
81
82
  "api_server.py"
@@ -0,0 +1,12 @@
1
+ # SuperLocalMemory v2.7 - Learning Dependencies
2
+ # ============================================================================
3
+ # Optional but recommended. Enables intelligent pattern learning and
4
+ # personalized recall ranking.
5
+ #
6
+ # If installation fails, core features work normally (v2.6 behavior).
7
+ # To install: pip3 install -r requirements-learning.txt
8
+ #
9
+ # Download size: ~30MB
10
+ # ============================================================================
11
+ lightgbm>=4.0.0
12
+ scipy>=1.9.0