superlocalmemory 3.0.37 → 3.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -20,7 +20,32 @@ logger = logging.getLogger(__name__)
20
20
 
21
21
 
22
22
  def register_resources(server, get_engine: Callable) -> None:
23
- """Register 6 MCP resources on *server*."""
23
+ """Register 7 MCP resources on *server*."""
24
+
25
+ # ------------------------------------------------------------------
26
+ # 0. slm://context — Active Memory auto-injection
27
+ # ------------------------------------------------------------------
28
+ @server.resource("slm://context")
29
+ async def session_context() -> str:
30
+ """Active session context — auto-injected on MCP connect.
31
+
32
+ Returns the most relevant memories for the current session:
33
+ recent decisions, active patterns, and project context.
34
+ AI tools read this automatically on connection to get instant context.
35
+ """
36
+ try:
37
+ from superlocalmemory.hooks.auto_recall import AutoRecall
38
+ engine = get_engine()
39
+ auto = AutoRecall(
40
+ engine=engine,
41
+ config={"enabled": True, "max_memories_injected": 10, "relevance_threshold": 0.3},
42
+ )
43
+ context = auto.get_session_context(query="recent decisions and important context")
44
+ if not context:
45
+ return "No session context available yet. Use 'remember' to store memories."
46
+ return context
47
+ except Exception as exc:
48
+ return f"Context unavailable: {exc}"
24
49
 
25
50
  # ------------------------------------------------------------------
26
51
  # 1. slm://recent
@@ -58,11 +58,13 @@ def reset_engine():
58
58
  from superlocalmemory.mcp.tools_core import register_core_tools
59
59
  from superlocalmemory.mcp.tools_v28 import register_v28_tools
60
60
  from superlocalmemory.mcp.tools_v3 import register_v3_tools
61
+ from superlocalmemory.mcp.tools_active import register_active_tools
61
62
  from superlocalmemory.mcp.resources import register_resources
62
63
 
63
64
  register_core_tools(server, get_engine)
64
65
  register_v28_tools(server, get_engine)
65
66
  register_v3_tools(server, get_engine)
67
+ register_active_tools(server, get_engine)
66
68
  register_resources(server, get_engine)
67
69
 
68
70
 
@@ -0,0 +1,255 @@
1
+ # Copyright (c) 2026 Varun Pratap Bhardwaj / Qualixar
2
+ # Licensed under the MIT License - see LICENSE file
3
+ # Part of SuperLocalMemory V3 | https://qualixar.com | https://varunpratap.com
4
+
5
+ """SuperLocalMemory V3.1 — Active Memory MCP Tools.
6
+
7
+ session_init — Auto-recall project context at session start.
8
+ observe — Monitor conversation for auto-capture (decisions/bugs/prefs).
9
+ report_feedback — Record explicit feedback on recall results for learning.
10
+
11
+ These tools transform SLM from a passive database into an active
12
+ intelligence layer that learns and improves over time.
13
+
14
+ Part of Qualixar | Author: Varun Pratap Bhardwaj
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import logging
20
+ from pathlib import Path
21
+ from typing import Callable
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+ MEMORY_DIR = Path.home() / ".superlocalmemory"
26
+ DB_PATH = MEMORY_DIR / "memory.db"
27
+
28
+
29
+ def _emit_event(event_type: str, payload: dict | None = None,
30
+ source_agent: str = "mcp_client") -> None:
31
+ """Emit an event to the EventBus (best-effort, never raises)."""
32
+ try:
33
+ from superlocalmemory.infra.event_bus import EventBus
34
+ bus = EventBus.get_instance(str(DB_PATH))
35
+ bus.emit(event_type, payload=payload, source_agent=source_agent,
36
+ source_protocol="mcp")
37
+ except Exception:
38
+ pass
39
+
40
+
41
+ def _register_agent(agent_id: str, profile_id: str) -> None:
42
+ """Register an agent in the AgentRegistry (best-effort)."""
43
+ try:
44
+ from superlocalmemory.core.registry import AgentRegistry
45
+ registry_path = MEMORY_DIR / "agents.json"
46
+ registry = AgentRegistry(persist_path=registry_path)
47
+ registry.register_agent(agent_id, profile_id)
48
+ except Exception:
49
+ pass
50
+
51
+
52
+ def register_active_tools(server, get_engine: Callable) -> None:
53
+ """Register 3 active memory tools on *server*."""
54
+
55
+ # ------------------------------------------------------------------
56
+ # 1. session_init — Auto-recall project context at session start
57
+ # ------------------------------------------------------------------
58
+ @server.tool()
59
+ async def session_init(
60
+ project_path: str = "",
61
+ query: str = "",
62
+ max_results: int = 10,
63
+ ) -> dict:
64
+ """Initialize session with relevant memory context.
65
+
66
+ Call this ONCE at the start of every session. Returns:
67
+ - Recent decisions and patterns for this project
68
+ - Top relevant memories based on project path or query
69
+ - Learning status (signal count, ranking phase)
70
+
71
+ The AI should call this automatically before any other work.
72
+ """
73
+ try:
74
+ from superlocalmemory.hooks.auto_recall import AutoRecall
75
+ from superlocalmemory.hooks.rules_engine import RulesEngine
76
+
77
+ engine = get_engine()
78
+ rules = RulesEngine()
79
+
80
+ if not rules.should_recall("session_start"):
81
+ return {"success": True, "context": "", "memories": [], "message": "Auto-recall disabled"}
82
+
83
+ recall_config = rules.get_recall_config()
84
+ auto = AutoRecall(
85
+ engine=engine,
86
+ config={
87
+ "enabled": True,
88
+ "max_memories_injected": max_results,
89
+ "relevance_threshold": recall_config.get("relevance_threshold", 0.3),
90
+ },
91
+ )
92
+
93
+ # Get formatted context for system prompt injection
94
+ context = auto.get_session_context(project_path=project_path, query=query)
95
+
96
+ # Get structured results for tool response
97
+ search_query = query or f"project context {project_path}" if project_path else "recent important decisions"
98
+ memories = auto.get_query_context(search_query)
99
+
100
+ # Get learning status
101
+ pid = engine.profile_id
102
+ feedback_count = 0
103
+ try:
104
+ feedback_count = engine._adaptive_learner.get_feedback_count(pid)
105
+ except Exception:
106
+ pass
107
+
108
+ # Register agent + emit event
109
+ _register_agent("mcp_client", pid)
110
+ _emit_event("agent.connected", {
111
+ "agent_id": "mcp_client",
112
+ "project_path": project_path,
113
+ "memory_count": len(memories),
114
+ })
115
+
116
+ return {
117
+ "success": True,
118
+ "context": context,
119
+ "memories": memories[:max_results],
120
+ "memory_count": len(memories),
121
+ "learning": {
122
+ "feedback_signals": feedback_count,
123
+ "phase": 1 if feedback_count < 50 else (2 if feedback_count < 200 else 3),
124
+ "status": "collecting" if feedback_count < 50 else ("learning" if feedback_count < 200 else "trained"),
125
+ },
126
+ }
127
+ except Exception as exc:
128
+ logger.exception("session_init failed")
129
+ return {"success": False, "error": str(exc)}
130
+
131
+ # ------------------------------------------------------------------
132
+ # 2. observe — Auto-capture decisions/bugs/preferences
133
+ # ------------------------------------------------------------------
134
+ @server.tool()
135
+ async def observe(
136
+ content: str,
137
+ agent_id: str = "mcp_client",
138
+ ) -> dict:
139
+ """Observe conversation content for automatic memory capture.
140
+
141
+ Send conversation snippets here. The system evaluates whether
142
+ the content contains decisions, bug fixes, or preferences worth
143
+ storing. If so, it auto-captures them with classification metadata.
144
+
145
+ Call this after making decisions, fixing bugs, or expressing preferences.
146
+ The system will NOT store low-confidence or irrelevant content.
147
+ """
148
+ try:
149
+ from superlocalmemory.hooks.auto_capture import AutoCapture
150
+ from superlocalmemory.hooks.rules_engine import RulesEngine
151
+
152
+ engine = get_engine()
153
+ rules = RulesEngine()
154
+
155
+ auto = AutoCapture(
156
+ engine=engine,
157
+ config=rules.get_capture_config(),
158
+ )
159
+
160
+ decision = auto.evaluate(content)
161
+
162
+ if not decision.capture:
163
+ return {
164
+ "captured": False,
165
+ "reason": decision.reason,
166
+ "category": decision.category,
167
+ "confidence": round(decision.confidence, 3),
168
+ }
169
+
170
+ # Check rules engine for category-level permission
171
+ if not rules.should_capture(decision.category, decision.confidence):
172
+ return {
173
+ "captured": False,
174
+ "reason": f"Category '{decision.category}' disabled in rules",
175
+ "category": decision.category,
176
+ "confidence": round(decision.confidence, 3),
177
+ }
178
+
179
+ # Auto-store via engine
180
+ stored = auto.capture(
181
+ content,
182
+ category=decision.category,
183
+ metadata={"agent_id": agent_id, "source": "auto-observe"},
184
+ )
185
+
186
+ if stored:
187
+ _emit_event("memory.created", {
188
+ "agent_id": agent_id,
189
+ "category": decision.category,
190
+ "content_preview": content[:80],
191
+ "source": "auto-observe",
192
+ }, source_agent=agent_id)
193
+
194
+ return {
195
+ "captured": stored,
196
+ "category": decision.category,
197
+ "confidence": round(decision.confidence, 3),
198
+ "reason": decision.reason,
199
+ }
200
+ except Exception as exc:
201
+ logger.exception("observe failed")
202
+ return {"captured": False, "error": str(exc)}
203
+
204
+ # ------------------------------------------------------------------
205
+ # 3. report_feedback — Explicit feedback for learning
206
+ # ------------------------------------------------------------------
207
+ @server.tool()
208
+ async def report_feedback(
209
+ fact_id: str,
210
+ feedback: str = "relevant",
211
+ query: str = "",
212
+ ) -> dict:
213
+ """Report whether a recalled memory was useful.
214
+
215
+ feedback: "relevant" (memory was helpful), "irrelevant" (not useful),
216
+ "partial" (somewhat relevant).
217
+
218
+ This feedback trains the adaptive ranker to return better results
219
+ over time. The more feedback, the smarter the system gets.
220
+ """
221
+ try:
222
+ engine = get_engine()
223
+ pid = engine.profile_id
224
+
225
+ if feedback not in ("relevant", "irrelevant", "partial"):
226
+ return {"success": False, "error": f"Invalid feedback: {feedback}. Use relevant/irrelevant/partial"}
227
+
228
+ record = engine._adaptive_learner.record_feedback(
229
+ query=query,
230
+ fact_id=fact_id,
231
+ feedback_type=feedback,
232
+ profile_id=pid,
233
+ )
234
+
235
+ count = engine._adaptive_learner.get_feedback_count(pid)
236
+
237
+ _emit_event("pattern.learned", {
238
+ "fact_id": fact_id,
239
+ "feedback": feedback,
240
+ "total_signals": count,
241
+ "phase": 1 if count < 50 else (2 if count < 200 else 3),
242
+ })
243
+
244
+ return {
245
+ "success": True,
246
+ "feedback_id": record.feedback_id,
247
+ "total_signals": count,
248
+ "phase": 1 if count < 50 else (2 if count < 200 else 3),
249
+ "message": f"Feedback recorded. {count} total signals."
250
+ + (" Phase 2 unlocked!" if count == 50 else "")
251
+ + (" Phase 3 (ML) unlocked!" if count == 200 else ""),
252
+ }
253
+ except Exception as exc:
254
+ logger.exception("report_feedback failed")
255
+ return {"success": False, "error": str(exc)}
@@ -15,10 +15,71 @@ from __future__ import annotations
15
15
 
16
16
  import json
17
17
  import logging
18
+ from pathlib import Path
18
19
  from typing import Any, Callable
19
20
 
20
21
  logger = logging.getLogger(__name__)
21
22
 
23
+ _DB_PATH = str(Path.home() / ".superlocalmemory" / "memory.db")
24
+
25
+
26
+ def _emit_event(event_type: str, payload: dict | None = None,
27
+ source_agent: str = "mcp_client") -> None:
28
+ """Emit an event to the EventBus (best-effort, never raises)."""
29
+ try:
30
+ from superlocalmemory.infra.event_bus import EventBus
31
+ bus = EventBus.get_instance(_DB_PATH)
32
+ bus.emit(event_type, payload=payload, source_agent=source_agent,
33
+ source_protocol="mcp")
34
+ except Exception:
35
+ pass
36
+
37
+
38
+ def _record_recall_hits(get_engine: Callable, query: str, results: list[dict]) -> None:
39
+ """Record implicit feedback + learning signals for each recall.
40
+
41
+ Non-blocking, non-critical — failures silently ignored.
42
+ Feeds: FeedbackCollector + Co-Retrieval + Confidence Boost.
43
+ """
44
+ try:
45
+ from pathlib import Path
46
+ engine = get_engine()
47
+ pid = engine.profile_id
48
+ slm_dir = Path.home() / ".superlocalmemory"
49
+ fact_ids = [r.get("fact_id", "") for r in results[:10] if r.get("fact_id")]
50
+ if not fact_ids:
51
+ return
52
+
53
+ # 1. Implicit feedback (recall_hit signals for adaptive learner)
54
+ try:
55
+ from superlocalmemory.learning.feedback import FeedbackCollector
56
+ collector = FeedbackCollector(slm_dir / "learning.db")
57
+ collector.record_implicit(
58
+ profile_id=pid, query=query,
59
+ fact_ids_returned=fact_ids, fact_ids_available=fact_ids,
60
+ )
61
+ except Exception:
62
+ pass
63
+
64
+ # 2. Co-retrieval signals (strengthen implicit graph edges)
65
+ try:
66
+ from superlocalmemory.learning.signals import LearningSignals
67
+ signals = LearningSignals(slm_dir / "learning.db")
68
+ signals.record_co_retrieval(pid, fact_ids)
69
+ except Exception:
70
+ pass
71
+
72
+ # 3. Confidence boost (accessed facts get +0.02, cap 1.0)
73
+ try:
74
+ from superlocalmemory.learning.signals import LearningSignals
75
+ mem_db = str(slm_dir / "memory.db")
76
+ for fid in fact_ids[:5]:
77
+ LearningSignals.boost_confidence(mem_db, fid)
78
+ except Exception:
79
+ pass
80
+ except Exception:
81
+ pass
82
+
22
83
 
23
84
  def register_core_tools(server, get_engine: Callable) -> None:
24
85
  """Register the 13 core MCP tools on *server*."""
@@ -43,6 +104,11 @@ def register_core_tools(server, get_engine: Callable) -> None:
43
104
  "session_id": session_id,
44
105
  })
45
106
  if result.get("ok"):
107
+ _emit_event("memory.created", {
108
+ "content_preview": content[:80],
109
+ "agent_id": agent_id,
110
+ "fact_count": result.get("count", 0),
111
+ }, source_agent=agent_id)
46
112
  return {"success": True, "fact_ids": result.get("fact_ids", []), "count": result.get("count", 0)}
47
113
  return {"success": False, "error": result.get("error", "Store failed")}
48
114
  except Exception as exc:
@@ -57,6 +123,17 @@ def register_core_tools(server, get_engine: Callable) -> None:
57
123
  pool = WorkerPool.shared()
58
124
  result = pool.recall(query, limit=limit)
59
125
  if result.get("ok"):
126
+ # Record implicit feedback: every returned result is a recall_hit
127
+ try:
128
+ _record_recall_hits(get_engine, query, result.get("results", []))
129
+ except Exception:
130
+ pass # Feedback is non-critical, never block recall
131
+ _emit_event("memory.recalled", {
132
+ "query": query[:80],
133
+ "result_count": result.get("result_count", 0),
134
+ "query_type": result.get("query_type", "unknown"),
135
+ "agent_id": agent_id,
136
+ }, source_agent=agent_id)
60
137
  return {
61
138
  "success": True,
62
139
  "results": result.get("results", []),
@@ -311,6 +388,10 @@ def register_core_tools(server, get_engine: Callable) -> None:
311
388
  })
312
389
  if result.get("ok"):
313
390
  logger.info("Memory deleted: %s by agent: %s", fact_id[:16], agent_id)
391
+ _emit_event("memory.deleted", {
392
+ "fact_id": fact_id,
393
+ "agent_id": agent_id,
394
+ }, source_agent=agent_id)
314
395
  return {"success": True, "deleted": fact_id, "agent_id": agent_id}
315
396
  return {"success": False, "error": result.get("error", "Delete failed")}
316
397
  except Exception as exc:
@@ -43,13 +43,15 @@ async def get_agents(
43
43
  if not REGISTRY_AVAILABLE:
44
44
  return {"agents": [], "count": 0, "message": "Agent registry not available"}
45
45
  try:
46
- engine = getattr(request.app.state, "engine", None)
47
- if engine and hasattr(engine, '_db'):
48
- registry = AgentRegistry(engine._db)
49
- agents = registry.list_agents(protocol=protocol, limit=limit)
50
- stats = registry.get_stats()
51
- return {"agents": agents, "count": len(agents), "stats": stats}
52
- return {"agents": [], "count": 0, "message": "Engine not initialized"}
46
+ from pathlib import Path
47
+ registry_path = Path.home() / ".superlocalmemory" / "agents.json"
48
+ registry = AgentRegistry(persist_path=registry_path)
49
+ agents = registry.list_agents()
50
+ return {
51
+ "agents": agents,
52
+ "count": len(agents),
53
+ "stats": {"total_agents": len(agents)},
54
+ }
53
55
  except Exception as e:
54
56
  raise HTTPException(status_code=500, detail=f"Agent registry error: {str(e)}")
55
57
 
@@ -60,11 +62,11 @@ async def get_agent_stats(request: Request):
60
62
  if not REGISTRY_AVAILABLE:
61
63
  return {"total_agents": 0, "message": "Agent registry not available"}
62
64
  try:
63
- engine = getattr(request.app.state, "engine", None)
64
- if engine and hasattr(engine, '_db'):
65
- registry = AgentRegistry(engine._db)
66
- return registry.get_stats()
67
- return {"total_agents": 0, "message": "Engine not initialized"}
65
+ from pathlib import Path
66
+ registry_path = Path.home() / ".superlocalmemory" / "agents.json"
67
+ registry = AgentRegistry(persist_path=registry_path)
68
+ agents = registry.list_agents()
69
+ return {"total_agents": len(agents)}
68
70
  except Exception as e:
69
71
  raise HTTPException(status_code=500, detail=f"Agent stats error: {str(e)}")
70
72
 
@@ -45,9 +45,18 @@ async def behavioral_status():
45
45
  recent_outcomes = []
46
46
  try:
47
47
  tracker = OutcomeTracker(db_path)
48
- total_outcomes = tracker.get_outcome_count(profile=profile)
49
- outcome_breakdown = tracker.get_outcome_breakdown(profile=profile)
50
- recent_outcomes = tracker.get_recent_outcomes(profile=profile, limit=20)
48
+ all_outcomes = tracker.get_outcomes(profile_id=profile, limit=50)
49
+ total_outcomes = len(all_outcomes)
50
+ for o in all_outcomes:
51
+ key = o.outcome if hasattr(o, 'outcome') else str(o)
52
+ if key in outcome_breakdown:
53
+ outcome_breakdown[key] += 1
54
+ recent_outcomes = [
55
+ {"outcome": o.outcome, "action_type": o.action_type,
56
+ "timestamp": o.timestamp}
57
+ for o in all_outcomes[:20]
58
+ if hasattr(o, 'outcome')
59
+ ]
51
60
  except Exception as exc:
52
61
  logger.debug("outcome tracker: %s", exc)
53
62
 
@@ -56,8 +65,8 @@ async def behavioral_status():
56
65
  cross_project_transfers = 0
57
66
  try:
58
67
  store = BehavioralPatternStore(db_path)
59
- patterns = store.get_patterns(profile=profile)
60
- cross_project_transfers = store.get_cross_project_count(profile=profile)
68
+ patterns = store.get_patterns(profile_id=profile)
69
+ cross_project_transfers = 0
61
70
  except Exception as exc:
62
71
  logger.debug("pattern store: %s", exc)
63
72
 
@@ -69,6 +78,12 @@ async def behavioral_status():
69
78
  "patterns": patterns,
70
79
  "cross_project_transfers": cross_project_transfers,
71
80
  "recent_outcomes": recent_outcomes,
81
+ "stats": {
82
+ "success_count": outcome_breakdown.get("success", 0),
83
+ "failure_count": outcome_breakdown.get("failure", 0),
84
+ "partial_count": outcome_breakdown.get("partial", 0),
85
+ "patterns_count": len(patterns),
86
+ },
72
87
  }
73
88
  except Exception as e:
74
89
  logger.error("behavioral_status error: %s", e)
@@ -79,22 +79,45 @@ async def learning_status():
79
79
  active_profile = get_active_profile()
80
80
  result["active_profile"] = active_profile
81
81
 
82
- # Ranking phase
83
- result["ranking_phase"] = "baseline"
82
+ # Real signal count from V3.1 learning_feedback table
83
+ signal_count = 0
84
+ try:
85
+ from superlocalmemory.learning.feedback import FeedbackCollector
86
+ from pathlib import Path
87
+ learning_db = Path.home() / ".superlocalmemory" / "learning.db"
88
+ if learning_db.exists():
89
+ collector = FeedbackCollector(learning_db)
90
+ signal_count = collector.get_feedback_count(active_profile)
91
+ except Exception:
92
+ pass
93
+
94
+ # Ranking phase based on real signal count
95
+ if signal_count >= 200:
96
+ result["ranking_phase"] = "ml_model"
97
+ elif signal_count >= 20:
98
+ result["ranking_phase"] = "rule_based"
99
+ else:
100
+ result["ranking_phase"] = "baseline"
84
101
 
85
- # Feedback stats
102
+ # Feedback stats — merge old system + new V3.1 signals
103
+ stats_dict = {"feedback_count": signal_count, "active_profile": active_profile}
86
104
  feedback = _get_feedback()
87
105
  if feedback:
88
106
  try:
89
- summary = feedback.get_feedback_summary()
90
- result["stats"] = summary
91
- result["profile_feedback"] = {
92
- "profile": active_profile,
93
- "signals": summary.get("total_signals", 0),
94
- }
107
+ old_stats = feedback.get_feedback_summary(active_profile)
108
+ if isinstance(old_stats, dict):
109
+ old_stats["feedback_count"] = signal_count
110
+ old_stats["active_profile"] = active_profile
111
+ stats_dict = old_stats
95
112
  except Exception as exc:
96
113
  logger.debug("feedback summary: %s", exc)
97
114
 
115
+ result["stats"] = stats_dict
116
+ result["profile_feedback"] = {
117
+ "profile": active_profile,
118
+ "signals": signal_count,
119
+ }
120
+
98
121
  # Engagement
99
122
  engagement = _get_engagement()
100
123
  if engagement:
@@ -105,9 +128,43 @@ async def learning_status():
105
128
  else:
106
129
  result["engagement"] = None
107
130
 
108
- # Tech preferences (stub until learning DB populated)
109
- result["tech_preferences"] = []
110
- result["workflow_patterns"] = []
131
+ # Tech preferences + workflow patterns from V3.1 behavioral store
132
+ try:
133
+ from superlocalmemory.learning.behavioral import BehavioralPatternStore
134
+ from pathlib import Path
135
+ learning_db = Path.home() / ".superlocalmemory" / "learning.db"
136
+ if learning_db.exists():
137
+ store = BehavioralPatternStore(str(learning_db))
138
+ all_patterns = store.get_patterns(profile_id=active_profile)
139
+ tech = [
140
+ {"key": "tech", "value": p.get("metadata", {}).get("value", p.get("pattern_key", "")),
141
+ "confidence": p.get("confidence", 0), "evidence": p.get("evidence_count", 0)}
142
+ for p in all_patterns if p.get("pattern_type") == "tech_preference"
143
+ ]
144
+ workflows = [
145
+ {"type": p.get("pattern_type"), "key": p.get("pattern_key", ""),
146
+ "value": p.get("metadata", {}).get("value", ""),
147
+ "confidence": p.get("confidence", 0)}
148
+ for p in all_patterns if p.get("pattern_type") in ("temporal", "interest")
149
+ ]
150
+ result["tech_preferences"] = tech
151
+ result["workflow_patterns"] = workflows
152
+
153
+ # Privacy stats
154
+ import os
155
+ db_size = os.path.getsize(str(learning_db)) // 1024 if learning_db.exists() else 0
156
+ stats_dict["db_size_kb"] = db_size
157
+ stats_dict["transferable_patterns"] = len(all_patterns)
158
+ stats_dict["models_trained"] = 1 if signal_count >= 200 else 0
159
+ stats_dict["tracked_sources"] = len(set(
160
+ p.get("pattern_type") for p in all_patterns
161
+ ))
162
+ else:
163
+ result["tech_preferences"] = []
164
+ result["workflow_patterns"] = []
165
+ except Exception:
166
+ result["tech_preferences"] = []
167
+ result["workflow_patterns"] = []
111
168
  result["source_scores"] = {}
112
169
 
113
170
  except Exception as e:
@@ -217,8 +274,9 @@ async def feedback_stats():
217
274
  by_type = {}
218
275
 
219
276
  if feedback:
220
- summary = feedback.get_feedback_summary()
221
- total = summary.get("total_signals", 0)
277
+ profile = get_active_profile()
278
+ summary = feedback.get_feedback_summary(profile)
279
+ total = summary.get("total", summary.get("total_signals", 0))
222
280
  by_channel = summary.get("by_channel", {})
223
281
  by_type = summary.get("by_type", {})
224
282
 
@@ -38,32 +38,32 @@ async def lifecycle_status():
38
38
  conn = sqlite3.connect(str(DB_PATH))
39
39
  conn.row_factory = sqlite3.Row
40
40
 
41
- # Try V3 schema first (atomic_facts with lifecycle_state)
41
+ # Try V3 schema first (atomic_facts with lifecycle column)
42
42
  states = {}
43
43
  try:
44
44
  rows = conn.execute(
45
- "SELECT lifecycle_state, COUNT(*) as cnt "
46
- "FROM atomic_facts WHERE profile_id = ? GROUP BY lifecycle_state",
45
+ "SELECT lifecycle, COUNT(*) as cnt "
46
+ "FROM atomic_facts WHERE profile_id = ? GROUP BY lifecycle",
47
47
  (profile,),
48
48
  ).fetchall()
49
49
  states = {
50
- (row['lifecycle_state'] or 'active'): row['cnt']
50
+ (row['lifecycle'] or 'active'): row['cnt']
51
51
  for row in rows
52
52
  }
53
53
  except sqlite3.OperationalError:
54
54
  # V2 fallback: memories table
55
55
  try:
56
56
  rows = conn.execute(
57
- "SELECT lifecycle_state, COUNT(*) as cnt "
58
- "FROM memories WHERE profile = ? GROUP BY lifecycle_state",
57
+ "SELECT lifecycle, COUNT(*) as cnt "
58
+ "FROM memories WHERE profile = ? GROUP BY lifecycle",
59
59
  (profile,),
60
60
  ).fetchall()
61
61
  states = {
62
- (row['lifecycle_state'] or 'active'): row['cnt']
62
+ (row['lifecycle'] or 'active'): row['cnt']
63
63
  for row in rows
64
64
  }
65
65
  except sqlite3.OperationalError:
66
- # No lifecycle_state column at all
66
+ # No lifecycle column at all — count everything as active
67
67
  total = conn.execute(
68
68
  "SELECT COUNT(*) FROM atomic_facts WHERE profile_id = ?",
69
69
  (profile,),
@@ -80,7 +80,7 @@ async def lifecycle_status():
80
80
  "SELECT AVG(julianday('now') - julianday(created_at)) as avg_age, "
81
81
  "MIN(julianday('now') - julianday(created_at)) as min_age, "
82
82
  "MAX(julianday('now') - julianday(created_at)) as max_age "
83
- "FROM atomic_facts WHERE profile_id = ? AND lifecycle_state = ?",
83
+ "FROM atomic_facts WHERE profile_id = ? AND lifecycle = ?",
84
84
  (profile, state),
85
85
  ).fetchone()
86
86
  if row and row['avg_age'] is not None: