superlocalmemory 3.0.37 → 3.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -84,7 +84,7 @@ slm status
84
84
  }
85
85
  ```
86
86
 
87
- 24 MCP tools available. Works with Claude Code, Cursor, Windsurf, VS Code Copilot, Continue, Cody, ChatGPT Desktop, Gemini CLI, JetBrains, Zed, and 17+ AI tools.
87
+ 27 MCP tools + 7 resources available. Works with Claude Code, Cursor, Windsurf, VS Code Copilot, Continue, Cody, ChatGPT Desktop, Gemini CLI, JetBrains, Zed, and 17+ AI tools. **V3.1: Active Memory tools auto-learn your patterns.**
88
88
 
89
89
  ### Dual Interface: MCP + CLI
90
90
 
@@ -247,6 +247,42 @@ slm dashboard # Opens at http://localhost:8765
247
247
 
248
248
  ---
249
249
 
250
+ ## Active Memory (V3.1) — Memory That Learns
251
+
252
+ Most AI memory systems are passive databases — you store, you search, you get results. **SuperLocalMemory learns.**
253
+
254
+ Every recall you make generates learning signals. Over time, the system adapts to your patterns:
255
+
256
+ | Phase | Signals | What Happens |
257
+ |-------|---------|-------------|
258
+ | **Baseline** | 0-19 | Cross-encoder ranking (default behavior) |
259
+ | **Rule-Based** | 20+ | Heuristic boosts: recency, access count, trust score |
260
+ | **ML Model** | 200+ | LightGBM model trained on YOUR usage patterns |
261
+
262
+ ### Zero-Cost Learning Signals
263
+ No LLM tokens spent. Four mathematical signals computed locally:
264
+ - **Co-Retrieval** — memories retrieved together strengthen their connections
265
+ - **Confidence Lifecycle** — accessed facts get boosted, unused facts decay
266
+ - **Channel Performance** — tracks which retrieval channel works best for your queries
267
+ - **Entropy Gap** — surprising content gets prioritized for deeper indexing
268
+
269
+ ### Auto-Capture & Auto-Recall
270
+ ```bash
271
+ slm hooks install # Install Claude Code hooks for invisible injection
272
+ slm observe "We decided to use PostgreSQL" # Auto-detects decisions, bugs, preferences
273
+ slm session-context # Get relevant context at session start
274
+ ```
275
+
276
+ ### MCP Active Memory Tools
277
+ Three new tools for AI assistants:
278
+ - `session_init` — call at session start, get relevant project context automatically
279
+ - `observe` — send conversation content, auto-captures decisions/bugs/preferences
280
+ - `report_feedback` — explicit feedback for faster learning
281
+
282
+ **No competitor learns at zero token cost.** Mem0, Zep, and Letta all require cloud LLM calls for their learning loops. SLM learns through mathematics.
283
+
284
+ ---
285
+
250
286
  ## Features
251
287
 
252
288
  ### Retrieval
@@ -2,7 +2,7 @@
2
2
  > SuperLocalMemory V3 Documentation
3
3
  > https://superlocalmemory.com | Part of Qualixar
4
4
 
5
- Get your AI's memory system running in under 5 minutes.
5
+ Get your AI's memory system running in under 5 minutes. **V3.1: Now with Active Memory — your memory learns from your usage and gets smarter over time, at zero token cost.**
6
6
 
7
7
  ---
8
8
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "superlocalmemory",
3
- "version": "3.0.37",
3
+ "version": "3.1.1",
4
4
  "description": "Information-geometric agent memory with mathematical guarantees. 4-channel retrieval, Fisher-Rao similarity, zero-LLM mode, EU AI Act compliant. Works with Claude, Cursor, Windsurf, and 17+ AI tools.",
5
5
  "keywords": [
6
6
  "ai-memory",
package/pyproject.toml CHANGED
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "superlocalmemory"
3
- version = "3.0.37"
3
+ version = "3.1.1"
4
4
  description = "Information-geometric agent memory with mathematical guarantees"
5
5
  readme = "README.md"
6
6
  license = {text = "MIT"}
@@ -26,6 +26,7 @@ dependencies = [
26
26
  "fastapi[all]>=0.135.1",
27
27
  "uvicorn>=0.42.0",
28
28
  "websockets>=16.0",
29
+ "lightgbm>=4.0.0",
29
30
  ]
30
31
 
31
32
  [project.optional-dependencies]
@@ -37,6 +37,9 @@ def dispatch(args: Namespace) -> None:
37
37
  "warmup": cmd_warmup,
38
38
  "dashboard": cmd_dashboard,
39
39
  "profile": cmd_profile,
40
+ "hooks": cmd_hooks,
41
+ "session-context": cmd_session_context,
42
+ "observe": cmd_observe,
40
43
  }
41
44
  handler = handlers.get(args.command)
42
45
  if handler:
@@ -288,6 +291,12 @@ def cmd_recall(args: Namespace) -> None:
288
291
  ])
289
292
  return
290
293
 
294
+ # Record learning signals (CLI path — works without MCP)
295
+ try:
296
+ _cli_record_signals(config, args.query, response.results)
297
+ except Exception:
298
+ pass
299
+
291
300
  if not response.results:
292
301
  print("No memories found.")
293
302
  return
@@ -295,6 +304,26 @@ def cmd_recall(args: Namespace) -> None:
295
304
  print(f" {i}. [{r.score:.2f}] {r.fact.content[:120]}")
296
305
 
297
306
 
307
+ def _cli_record_signals(config, query, results):
308
+ """Record learning signals from CLI recall (no MCP dependency)."""
309
+ from pathlib import Path
310
+ from superlocalmemory.learning.feedback import FeedbackCollector
311
+ from superlocalmemory.learning.signals import LearningSignals
312
+ slm_dir = Path.home() / ".superlocalmemory"
313
+ pid = config.active_profile
314
+ fact_ids = [r.fact.fact_id for r in results[:10]]
315
+ if not fact_ids:
316
+ return
317
+ FeedbackCollector(slm_dir / "learning.db").record_implicit(
318
+ profile_id=pid, query=query,
319
+ fact_ids_returned=fact_ids, fact_ids_available=fact_ids,
320
+ )
321
+ signals = LearningSignals(slm_dir / "learning.db")
322
+ signals.record_co_retrieval(pid, fact_ids)
323
+ for fid in fact_ids[:5]:
324
+ LearningSignals.boost_confidence(str(slm_dir / "memory.db"), fid)
325
+
326
+
298
327
  def cmd_forget(args: Namespace) -> None:
299
328
  """Delete memories matching a query."""
300
329
  from superlocalmemory.core.engine import MemoryEngine
@@ -763,3 +792,96 @@ def cmd_profile(args: Namespace) -> None:
763
792
  )
764
793
  ensure_profile_in_json(args.name)
765
794
  print(f"Created profile: {args.name}")
795
+
796
+
797
+ # -- Active Memory commands (V3.1) ------------------------------------------
798
+
799
+
800
+ def cmd_hooks(args: Namespace) -> None:
801
+ """Manage Claude Code hooks for invisible memory injection."""
802
+ from superlocalmemory.hooks.claude_code_hooks import (
803
+ install_hooks, remove_hooks, check_status,
804
+ )
805
+
806
+ action = getattr(args, "action", "status")
807
+ if action == "install":
808
+ result = install_hooks()
809
+ if result["scripts"] and result["settings"]:
810
+ print("SLM hooks installed in Claude Code.")
811
+ print("Memory context will auto-inject on every new session.")
812
+ else:
813
+ print(f"Installation incomplete: {result['errors']}")
814
+ elif action == "remove":
815
+ result = remove_hooks()
816
+ if result["scripts"] and result["settings"]:
817
+ print("SLM hooks removed from Claude Code.")
818
+ else:
819
+ print(f"Removal incomplete: {result['errors']}")
820
+ else:
821
+ result = check_status()
822
+ if result["installed"]:
823
+ print("SLM hooks: INSTALLED")
824
+ print(f" Scripts: {result['hooks_dir']}")
825
+ print(" Claude Code settings: configured")
826
+ else:
827
+ print("SLM hooks: NOT INSTALLED")
828
+ print(" Run: slm hooks install")
829
+
830
+
831
+ def cmd_session_context(args: Namespace) -> None:
832
+ """Print session context (for hook scripts and piping)."""
833
+ from superlocalmemory.hooks.auto_recall import AutoRecall
834
+ from superlocalmemory.core.config import SLMConfig
835
+ from superlocalmemory.core.engine import MemoryEngine
836
+
837
+ try:
838
+ config = SLMConfig.load()
839
+ engine = MemoryEngine(config)
840
+ engine.initialize()
841
+
842
+ auto = AutoRecall(
843
+ engine=engine,
844
+ config={"enabled": True, "max_memories_injected": 10, "relevance_threshold": 0.3},
845
+ )
846
+ context = auto.get_session_context(
847
+ query=getattr(args, "query", "") or "recent decisions and important context",
848
+ )
849
+ if context:
850
+ print(context)
851
+ except Exception as exc:
852
+ logger.debug("session-context failed: %s", exc)
853
+
854
+
855
+ def cmd_observe(args: Namespace) -> None:
856
+ """Evaluate and auto-capture content from stdin or argument."""
857
+ import sys
858
+ from superlocalmemory.hooks.auto_capture import AutoCapture
859
+ from superlocalmemory.core.config import SLMConfig
860
+ from superlocalmemory.core.engine import MemoryEngine
861
+
862
+ content = getattr(args, "content", "") or ""
863
+ if not content and not sys.stdin.isatty():
864
+ content = sys.stdin.read().strip()
865
+
866
+ if not content:
867
+ print("No content to observe.")
868
+ return
869
+
870
+ try:
871
+ config = SLMConfig.load()
872
+ engine = MemoryEngine(config)
873
+ engine.initialize()
874
+
875
+ auto = AutoCapture(engine=engine)
876
+ decision = auto.evaluate(content)
877
+
878
+ if decision.capture:
879
+ stored = auto.capture(content, category=decision.category)
880
+ if stored:
881
+ print(f"Auto-captured: {decision.category} (confidence: {decision.confidence:.2f})")
882
+ else:
883
+ print(f"Detected {decision.category} but store failed.")
884
+ else:
885
+ print(f"Not captured: {decision.reason}")
886
+ except Exception as exc:
887
+ logger.debug("observe failed: %s", exc)
@@ -170,6 +170,19 @@ def main() -> None:
170
170
  profile_p.add_argument("name", nargs="?", help="Profile name")
171
171
  profile_p.add_argument("--json", action="store_true", help="Output structured JSON (agent-native)")
172
172
 
173
+ # -- Active Memory (V3.1) ------------------------------------------
174
+ hooks_p = sub.add_parser("hooks", help="Manage Claude Code hooks for auto memory injection")
175
+ hooks_p.add_argument(
176
+ "action", nargs="?", default="status",
177
+ choices=["install", "remove", "status"], help="Action (default: status)",
178
+ )
179
+
180
+ ctx_p = sub.add_parser("session-context", help="Print session context (for hooks)")
181
+ ctx_p.add_argument("query", nargs="?", default="", help="Optional context query")
182
+
183
+ obs_p = sub.add_parser("observe", help="Auto-capture content (pipe or argument)")
184
+ obs_p.add_argument("content", nargs="?", default="", help="Content to evaluate")
185
+
173
186
  args = parser.parse_args()
174
187
 
175
188
  if not args.command:
@@ -460,6 +460,15 @@ class MemoryEngine:
460
460
  except Exception as exc:
461
461
  logger.debug("Agentic sufficiency skipped: %s", exc)
462
462
 
463
+ # Adaptive re-ranking (V3.1 Active Memory)
464
+ # Phase 1 (< 50 signals): no change (cross-encoder order preserved)
465
+ # Phase 2 (50+): heuristic boosts (recency, access, trust)
466
+ # Phase 3 (200+): LightGBM ML ranking
467
+ try:
468
+ response = self._apply_adaptive_ranking(response, query, pid)
469
+ except Exception as exc:
470
+ logger.debug("Adaptive ranking skipped: %s", exc)
471
+
463
472
  # Reconsolidation: access updates trust + count (neuroscience principle)
464
473
  if self._trust_scorer:
465
474
  for r in response.results:
@@ -614,6 +623,60 @@ class MemoryEngine:
614
623
  if not self._initialized:
615
624
  self.initialize()
616
625
 
626
+ def _apply_adaptive_ranking(self, response, query: str, pid: str):
627
+ """Apply adaptive re-ranking if enough learning signals exist.
628
+
629
+ Phase 1 (< 50 signals): returns response unchanged (backward compat).
630
+ Phase 2 (50+): heuristic boosts from recency, access count, trust.
631
+ Phase 3 (200+): LightGBM ML-based reranking.
632
+ """
633
+ from superlocalmemory.learning.feedback import FeedbackCollector
634
+ from pathlib import Path
635
+
636
+ learning_db = Path.home() / ".superlocalmemory" / "learning.db"
637
+ if not learning_db.exists():
638
+ return response
639
+
640
+ collector = FeedbackCollector(learning_db)
641
+ signal_count = collector.get_feedback_count(pid)
642
+
643
+ if signal_count < 50:
644
+ return response # Phase 1: no change
645
+
646
+ from superlocalmemory.learning.ranker import AdaptiveRanker
647
+ ranker = AdaptiveRanker(signal_count=signal_count)
648
+
649
+ result_dicts = []
650
+ for r in response.results:
651
+ result_dicts.append({
652
+ "score": r.score,
653
+ "cross_encoder_score": r.score,
654
+ "trust_score": r.trust_score,
655
+ "channel_scores": r.channel_scores or {},
656
+ "fact": {
657
+ "age_days": 0,
658
+ "access_count": r.fact.access_count,
659
+ },
660
+ "_original": r,
661
+ })
662
+
663
+ query_context = {"query_type": response.query_type}
664
+ reranked = ranker.rerank(result_dicts, query_context)
665
+
666
+ # Rebuild response with new ordering
667
+ new_results = [d["_original"] for d in reranked]
668
+
669
+ from superlocalmemory.storage.models import RecallResponse
670
+ return RecallResponse(
671
+ query=response.query,
672
+ mode=response.mode,
673
+ results=new_results,
674
+ query_type=response.query_type,
675
+ channel_weights=response.channel_weights,
676
+ total_candidates=response.total_candidates,
677
+ retrieval_time_ms=response.retrieval_time_ms,
678
+ )
679
+
617
680
  def _init_encoding(self) -> None:
618
681
  from superlocalmemory.encoding.fact_extractor import FactExtractor
619
682
  from superlocalmemory.encoding.entity_resolver import EntityResolver
@@ -94,14 +94,13 @@ class Summarizer:
94
94
  # ------------------------------------------------------------------
95
95
 
96
96
  def _has_llm(self) -> bool:
97
- """Check if LLM is available (AND warm for Ollama).
97
+ """Check if LLM is available.
98
98
 
99
- For Mode B (Ollama): only returns True if the model is already
100
- loaded in memory. NEVER triggers a cold model load — that would
101
- spike 5+ GB of RAM on every recall, unacceptable on ≤32 GB machines.
99
+ Mode B: Ollama assumed running (num_ctx: 4096 caps memory at 5.5 GB).
100
+ Mode C: Requires API key for cloud provider.
102
101
  """
103
102
  if self._mode == "b":
104
- return self._is_ollama_model_warm()
103
+ return True
105
104
  if self._mode == "c":
106
105
  return bool(
107
106
  os.environ.get("OPENROUTER_API_KEY")
@@ -109,27 +108,6 @@ class Summarizer:
109
108
  )
110
109
  return False
111
110
 
112
- def _is_ollama_model_warm(self) -> bool:
113
- """Check if the LLM model is already loaded in Ollama memory.
114
-
115
- Queries Ollama /api/ps. Returns True only if our model is loaded,
116
- preventing cold-load memory spikes during recall.
117
- """
118
- try:
119
- import httpx
120
- model = getattr(self._config.llm, 'model', None) or "llama3.1:8b"
121
- model_base = model.split(":")[0]
122
- with httpx.Client(timeout=httpx.Timeout(2.0)) as client:
123
- resp = client.get("http://localhost:11434/api/ps")
124
- if resp.status_code != 200:
125
- return False
126
- for m in resp.json().get("models", []):
127
- if model_base in m.get("name", ""):
128
- return True
129
- return False
130
- except Exception:
131
- return False
132
-
133
111
  def _call_llm(self, prompt: str, max_tokens: int = 200) -> str:
134
112
  """Route to Ollama (B) or OpenRouter (C)."""
135
113
  if self._mode == "b":
@@ -0,0 +1,175 @@
1
+ # Copyright (c) 2026 Varun Pratap Bhardwaj / Qualixar
2
+ # Licensed under the MIT License - see LICENSE file
3
+ # Part of SuperLocalMemory V3 | https://qualixar.com | https://varunpratap.com
4
+
5
+ """Claude Code Hook Integration — invisible memory injection.
6
+
7
+ Installs hooks into Claude Code's settings.json that auto-inject
8
+ SLM context on session start and auto-capture on tool use.
9
+
10
+ Usage:
11
+ slm hooks install Install hooks into Claude Code settings
12
+ slm hooks status Check if hooks are installed
13
+ slm hooks remove Remove SLM hooks from settings
14
+
15
+ Part of Qualixar | Author: Varun Pratap Bhardwaj
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ import json
21
+ import logging
22
+ import shutil
23
+ from pathlib import Path
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+ CLAUDE_SETTINGS = Path.home() / ".claude" / "settings.json"
28
+ HOOKS_DIR = Path.home() / ".superlocalmemory" / "hooks"
29
+
30
+ # The hook scripts that Claude Code will execute
31
+ HOOK_SCRIPTS = {
32
+ "slm-session-start.sh": """\
33
+ #!/bin/bash
34
+ # SLM Active Memory — Session Start Hook
35
+ # Auto-recalls relevant context at session start
36
+ slm session-context 2>/dev/null || true
37
+ """,
38
+ "slm-auto-capture.sh": """\
39
+ #!/bin/bash
40
+ # SLM Active Memory — Auto-Capture Hook
41
+ # Evaluates tool output for decisions/bugs/preferences
42
+ # Input comes via stdin from Claude Code PostToolUse event
43
+ INPUT=$(cat)
44
+ if [ -n "$INPUT" ]; then
45
+ echo "$INPUT" | slm observe 2>/dev/null || true
46
+ fi
47
+ """,
48
+ }
49
+
50
+ # Hook definitions for Claude Code settings.json
51
+ HOOK_DEFINITIONS = {
52
+ "hooks": {
53
+ "SessionStart": [
54
+ {
55
+ "type": "command",
56
+ "command": str(HOOKS_DIR / "slm-session-start.sh"),
57
+ "timeout": 10000,
58
+ }
59
+ ],
60
+ }
61
+ }
62
+
63
+
64
+ def install_hooks() -> dict:
65
+ """Install SLM hooks into Claude Code settings."""
66
+ results = {"scripts": False, "settings": False, "errors": []}
67
+
68
+ # 1. Create hook scripts
69
+ try:
70
+ HOOKS_DIR.mkdir(parents=True, exist_ok=True)
71
+ for name, content in HOOK_SCRIPTS.items():
72
+ path = HOOKS_DIR / name
73
+ path.write_text(content)
74
+ path.chmod(0o755)
75
+ results["scripts"] = True
76
+ except Exception as exc:
77
+ results["errors"].append(f"Script creation failed: {exc}")
78
+
79
+ # 2. Update Claude Code settings.json
80
+ try:
81
+ if not CLAUDE_SETTINGS.parent.exists():
82
+ CLAUDE_SETTINGS.parent.mkdir(parents=True, exist_ok=True)
83
+
84
+ settings = {}
85
+ if CLAUDE_SETTINGS.exists():
86
+ settings = json.loads(CLAUDE_SETTINGS.read_text())
87
+
88
+ # Merge hooks without overwriting existing ones
89
+ if "hooks" not in settings:
90
+ settings["hooks"] = {}
91
+
92
+ # Add SessionStart hook if not present
93
+ session_hooks = settings["hooks"].get("SessionStart", [])
94
+ slm_hook_cmd = str(HOOKS_DIR / "slm-session-start.sh")
95
+ already_installed = any(
96
+ h.get("command", "") == slm_hook_cmd
97
+ for h in session_hooks if isinstance(h, dict)
98
+ )
99
+
100
+ if not already_installed:
101
+ session_hooks.append({
102
+ "type": "command",
103
+ "command": slm_hook_cmd,
104
+ "timeout": 10000,
105
+ })
106
+ settings["hooks"]["SessionStart"] = session_hooks
107
+
108
+ CLAUDE_SETTINGS.write_text(json.dumps(settings, indent=2))
109
+ results["settings"] = True
110
+ except Exception as exc:
111
+ results["errors"].append(f"Settings update failed: {exc}")
112
+
113
+ return results
114
+
115
+
116
+ def remove_hooks() -> dict:
117
+ """Remove SLM hooks from Claude Code settings."""
118
+ results = {"scripts": False, "settings": False, "errors": []}
119
+
120
+ # 1. Remove hook scripts
121
+ try:
122
+ if HOOKS_DIR.exists():
123
+ shutil.rmtree(HOOKS_DIR)
124
+ results["scripts"] = True
125
+ except Exception as exc:
126
+ results["errors"].append(f"Script removal failed: {exc}")
127
+
128
+ # 2. Remove from Claude Code settings
129
+ try:
130
+ if CLAUDE_SETTINGS.exists():
131
+ settings = json.loads(CLAUDE_SETTINGS.read_text())
132
+ if "hooks" in settings and "SessionStart" in settings["hooks"]:
133
+ slm_hook_cmd = str(HOOKS_DIR / "slm-session-start.sh")
134
+ settings["hooks"]["SessionStart"] = [
135
+ h for h in settings["hooks"]["SessionStart"]
136
+ if not (isinstance(h, dict) and h.get("command", "") == slm_hook_cmd)
137
+ ]
138
+ if not settings["hooks"]["SessionStart"]:
139
+ del settings["hooks"]["SessionStart"]
140
+ if not settings["hooks"]:
141
+ del settings["hooks"]
142
+ CLAUDE_SETTINGS.write_text(json.dumps(settings, indent=2))
143
+ results["settings"] = True
144
+ except Exception as exc:
145
+ results["errors"].append(f"Settings cleanup failed: {exc}")
146
+
147
+ return results
148
+
149
+
150
+ def check_status() -> dict:
151
+ """Check if SLM hooks are installed."""
152
+ scripts_ok = all(
153
+ (HOOKS_DIR / name).exists()
154
+ for name in HOOK_SCRIPTS
155
+ )
156
+
157
+ settings_ok = False
158
+ try:
159
+ if CLAUDE_SETTINGS.exists():
160
+ settings = json.loads(CLAUDE_SETTINGS.read_text())
161
+ session_hooks = settings.get("hooks", {}).get("SessionStart", [])
162
+ slm_hook_cmd = str(HOOKS_DIR / "slm-session-start.sh")
163
+ settings_ok = any(
164
+ h.get("command", "") == slm_hook_cmd
165
+ for h in session_hooks if isinstance(h, dict)
166
+ )
167
+ except Exception:
168
+ pass
169
+
170
+ return {
171
+ "installed": scripts_ok and settings_ok,
172
+ "scripts": scripts_ok,
173
+ "settings": settings_ok,
174
+ "hooks_dir": str(HOOKS_DIR),
175
+ }