superlocalmemory 3.3.29 → 3.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. package/ATTRIBUTION.md +1 -1
  2. package/CHANGELOG.md +3 -0
  3. package/LICENSE +633 -70
  4. package/README.md +14 -11
  5. package/docs/screenshots/01-dashboard-main.png +0 -0
  6. package/docs/screenshots/02-knowledge-graph.png +0 -0
  7. package/docs/screenshots/03-patterns-learning.png +0 -0
  8. package/docs/screenshots/04-learning-dashboard.png +0 -0
  9. package/docs/screenshots/05-behavioral-analysis.png +0 -0
  10. package/docs/screenshots/06-graph-communities.png +0 -0
  11. package/docs/v2-archive/ACCESSIBILITY.md +1 -1
  12. package/docs/v2-archive/FRAMEWORK-INTEGRATIONS.md +1 -1
  13. package/docs/v2-archive/MCP-MANUAL-SETUP.md +1 -1
  14. package/docs/v2-archive/SEARCH-ENGINE-V2.2.0.md +2 -2
  15. package/docs/v2-archive/SEARCH-INTEGRATION-GUIDE.md +1 -1
  16. package/docs/v2-archive/UNIVERSAL-INTEGRATION.md +1 -1
  17. package/docs/v2-archive/V2.2.0-OPTIONAL-SEARCH.md +1 -1
  18. package/docs/v2-archive/example_graph_usage.py +1 -1
  19. package/ide/configs/codex-mcp.toml +1 -1
  20. package/ide/integrations/langchain/README.md +1 -1
  21. package/ide/integrations/langchain/langchain_superlocalmemory/__init__.py +1 -1
  22. package/ide/integrations/langchain/langchain_superlocalmemory/chat_message_history.py +1 -1
  23. package/ide/integrations/langchain/pyproject.toml +2 -2
  24. package/ide/integrations/langchain/tests/__init__.py +1 -1
  25. package/ide/integrations/langchain/tests/test_chat_message_history.py +1 -1
  26. package/ide/integrations/langchain/tests/test_security.py +1 -1
  27. package/ide/integrations/llamaindex/llama_index/storage/chat_store/superlocalmemory/__init__.py +1 -1
  28. package/ide/integrations/llamaindex/llama_index/storage/chat_store/superlocalmemory/base.py +1 -1
  29. package/ide/integrations/llamaindex/pyproject.toml +2 -2
  30. package/ide/integrations/llamaindex/tests/__init__.py +1 -1
  31. package/ide/integrations/llamaindex/tests/test_chat_store.py +1 -1
  32. package/ide/integrations/llamaindex/tests/test_security.py +1 -1
  33. package/ide/skills/slm-build-graph/SKILL.md +3 -3
  34. package/ide/skills/slm-list-recent/SKILL.md +3 -3
  35. package/ide/skills/slm-recall/SKILL.md +3 -3
  36. package/ide/skills/slm-remember/SKILL.md +3 -3
  37. package/ide/skills/slm-show-patterns/SKILL.md +3 -3
  38. package/ide/skills/slm-status/SKILL.md +3 -3
  39. package/ide/skills/slm-switch-profile/SKILL.md +3 -3
  40. package/package.json +3 -3
  41. package/pyproject.toml +3 -3
  42. package/src/superlocalmemory/core/engine_wiring.py +5 -1
  43. package/src/superlocalmemory/core/graph_analyzer.py +254 -12
  44. package/src/superlocalmemory/learning/consolidation_worker.py +240 -52
  45. package/src/superlocalmemory/retrieval/entity_channel.py +135 -4
  46. package/src/superlocalmemory/retrieval/spreading_activation.py +45 -0
  47. package/src/superlocalmemory/server/api.py +9 -1
  48. package/src/superlocalmemory/server/routes/behavioral.py +8 -4
  49. package/src/superlocalmemory/server/routes/chat.py +320 -0
  50. package/src/superlocalmemory/server/routes/insights.py +368 -0
  51. package/src/superlocalmemory/server/routes/learning.py +106 -6
  52. package/src/superlocalmemory/server/routes/memories.py +20 -9
  53. package/src/superlocalmemory/server/routes/stats.py +25 -3
  54. package/src/superlocalmemory/server/routes/timeline.py +252 -0
  55. package/src/superlocalmemory/server/routes/v3_api.py +161 -0
  56. package/src/superlocalmemory/server/ui.py +8 -0
  57. package/src/superlocalmemory/ui/index.html +168 -58
  58. package/src/superlocalmemory/ui/js/graph-event-bus.js +83 -0
  59. package/src/superlocalmemory/ui/js/graph-filters.js +1 -1
  60. package/src/superlocalmemory/ui/js/knowledge-graph.js +942 -0
  61. package/src/superlocalmemory/ui/js/memory-chat.js +344 -0
  62. package/src/superlocalmemory/ui/js/memory-timeline.js +265 -0
  63. package/src/superlocalmemory/ui/js/quick-actions.js +334 -0
  64. package/src/superlocalmemory.egg-info/PKG-INFO +597 -0
  65. package/src/superlocalmemory.egg-info/SOURCES.txt +287 -0
  66. package/src/superlocalmemory.egg-info/dependency_links.txt +1 -0
  67. package/src/superlocalmemory.egg-info/entry_points.txt +2 -0
  68. package/src/superlocalmemory.egg-info/requires.txt +47 -0
  69. package/src/superlocalmemory.egg-info/top_level.txt +1 -0
@@ -0,0 +1,368 @@
1
+ # Copyright (c) 2026 Varun Pratap Bhardwaj / Qualixar
2
+ # Licensed under AGPL-3.0-or-later — see LICENSE file
3
+ # Part of SuperLocalMemory v3.4.1 | https://qualixar.com
4
+
5
+ """Quick Insight Actions — 5 one-click intelligence endpoints.
6
+
7
+ Actions: changed_this_week, opinions, contradictions, health, cross_project.
8
+ All queries use direct sqlite3 (Rule 06), parameterized SQL (Rule 11),
9
+ profile-scoped (Rule 01).
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ import logging
15
+ import sqlite3
16
+ from typing import Any, Callable
17
+
18
+ from fastapi import APIRouter, Query
19
+ from fastapi.responses import JSONResponse
20
+ from superlocalmemory.server.routes.helpers import DB_PATH, get_active_profile
21
+
22
+ logger = logging.getLogger(__name__)
23
+
24
+ router = APIRouter(prefix="/api/v3/insights", tags=["insights"])
25
+
26
+
27
+ # ── Helper ────────────────────────────────────────────────────────
28
+
29
+ def _get_conn(profile: str = "") -> tuple[sqlite3.Connection | None, str]:
30
+ """Open sqlite3 connection and resolve profile_id."""
31
+ pid = profile or get_active_profile()
32
+ if not DB_PATH.exists():
33
+ return None, pid
34
+ conn = sqlite3.connect(str(DB_PATH))
35
+ conn.row_factory = sqlite3.Row
36
+ return conn, pid
37
+
38
+
39
+ # ── Action Handlers ───────────────────────────────────────────────
40
+
41
+ def _action_changed_this_week(
42
+ conn: sqlite3.Connection, pid: str, limit: int = 50, days: int = 7,
43
+ ) -> dict[str, Any]:
44
+ """Facts created or modified in last N days."""
45
+ items = []
46
+ modified = []
47
+
48
+ # New facts
49
+ try:
50
+ rows = conn.execute(
51
+ "SELECT fact_id, content, fact_type, created_at, session_id, confidence "
52
+ "FROM atomic_facts "
53
+ "WHERE profile_id = ? AND created_at >= datetime('now', ?) "
54
+ "ORDER BY created_at DESC LIMIT ?",
55
+ (pid, f"-{days} days", limit),
56
+ ).fetchall()
57
+ items = [
58
+ {
59
+ "fact_id": dict(r)["fact_id"],
60
+ "content": (dict(r).get("content") or "")[:200],
61
+ "fact_type": dict(r).get("fact_type", ""),
62
+ "created_at": dict(r).get("created_at", ""),
63
+ "session_id": dict(r).get("session_id", ""),
64
+ "confidence": round(float(dict(r).get("confidence", 0)), 3),
65
+ }
66
+ for r in rows
67
+ ]
68
+ except Exception as exc:
69
+ logger.debug("changed_this_week new facts failed: %s", exc)
70
+
71
+ # Modified facts (consolidation updates)
72
+ try:
73
+ rows = conn.execute(
74
+ "SELECT cl.action_type, cl.new_fact_id, cl.existing_fact_id, "
75
+ "cl.reason, cl.timestamp, af.content AS new_content "
76
+ "FROM consolidation_log cl "
77
+ "LEFT JOIN atomic_facts af ON af.fact_id = cl.new_fact_id "
78
+ "WHERE cl.profile_id = ? AND cl.timestamp >= datetime('now', ?) "
79
+ "AND cl.action_type IN ('update', 'supersede') "
80
+ "ORDER BY cl.timestamp DESC LIMIT ?",
81
+ (pid, f"-{days} days", limit),
82
+ ).fetchall()
83
+ modified = [
84
+ {
85
+ "action_type": dict(r).get("action_type", ""),
86
+ "new_fact_id": dict(r).get("new_fact_id", ""),
87
+ "existing_fact_id": dict(r).get("existing_fact_id", ""),
88
+ "reason": (dict(r).get("reason") or "")[:200],
89
+ "timestamp": dict(r).get("timestamp", ""),
90
+ "new_content": (dict(r).get("new_content") or "")[:200],
91
+ }
92
+ for r in rows
93
+ ]
94
+ except Exception as exc:
95
+ logger.debug("changed_this_week modifications failed: %s", exc)
96
+
97
+ return {
98
+ "action": "changed_this_week",
99
+ "count": len(items),
100
+ "items": items,
101
+ "metadata": {
102
+ "days": days,
103
+ "modified_count": len(modified),
104
+ "modifications": modified,
105
+ },
106
+ }
107
+
108
+
109
+ def _action_opinions(
110
+ conn: sqlite3.Connection, pid: str, limit: int = 50, **_: Any,
111
+ ) -> dict[str, Any]:
112
+ """Opinion-type facts (user preferences, decisions, rationale)."""
113
+ items = []
114
+ try:
115
+ rows = conn.execute(
116
+ "SELECT fact_id, content, created_at, session_id, confidence "
117
+ "FROM atomic_facts "
118
+ "WHERE profile_id = ? AND fact_type = 'opinion' AND lifecycle = 'active' "
119
+ "ORDER BY created_at DESC LIMIT ?",
120
+ (pid, limit),
121
+ ).fetchall()
122
+ items = [
123
+ {
124
+ "fact_id": dict(r)["fact_id"],
125
+ "content": (dict(r).get("content") or "")[:200],
126
+ "created_at": dict(r).get("created_at", ""),
127
+ "session_id": dict(r).get("session_id", ""),
128
+ "confidence": round(float(dict(r).get("confidence", 0)), 3),
129
+ }
130
+ for r in rows
131
+ ]
132
+ except Exception as exc:
133
+ logger.debug("opinions query failed: %s", exc)
134
+
135
+ return {"action": "opinions", "count": len(items), "items": items}
136
+
137
+
138
+ def _action_contradictions(
139
+ conn: sqlite3.Connection, pid: str, limit: int = 50, **_: Any,
140
+ ) -> dict[str, Any]:
141
+ """Contradiction edges from sheaf cohomology."""
142
+ items = []
143
+ try:
144
+ rows = conn.execute(
145
+ "SELECT ge.edge_id, ge.source_id, ge.target_id, ge.weight, ge.created_at, "
146
+ "sf.content AS source_content, tf.content AS target_content "
147
+ "FROM graph_edges ge "
148
+ "LEFT JOIN atomic_facts sf ON sf.fact_id = ge.source_id "
149
+ "LEFT JOIN atomic_facts tf ON tf.fact_id = ge.target_id "
150
+ "WHERE ge.profile_id = ? AND ge.edge_type = 'contradiction' "
151
+ "ORDER BY ge.weight DESC, ge.created_at DESC LIMIT ?",
152
+ (pid, limit),
153
+ ).fetchall()
154
+ items = [
155
+ {
156
+ "edge_id": dict(r).get("edge_id", ""),
157
+ "source_id": dict(r).get("source_id", ""),
158
+ "target_id": dict(r).get("target_id", ""),
159
+ "severity": round(float(dict(r).get("weight", 0)), 3),
160
+ "source_content": (dict(r).get("source_content") or "")[:200],
161
+ "target_content": (dict(r).get("target_content") or "")[:200],
162
+ "created_at": dict(r).get("created_at", ""),
163
+ }
164
+ for r in rows
165
+ ]
166
+ except Exception as exc:
167
+ logger.debug("contradictions query failed: %s", exc)
168
+
169
+ return {"action": "contradictions", "count": len(items), "items": items}
170
+
171
+
172
+ def _action_health(
173
+ conn: sqlite3.Connection, pid: str, **_: Any,
174
+ ) -> dict[str, Any]:
175
+ """Aggregate memory health: trust, retention, coverage, counts."""
176
+ trust = {"high": 0, "medium": 0, "low": 0, "total": 0, "avg": 0.5}
177
+ retention_zones: dict[str, Any] | None = None
178
+ coverage: dict[str, int] = {}
179
+ totals = {"facts": 0, "entities": 0, "edges": 0}
180
+ community_count = 0
181
+
182
+ # Trust distribution
183
+ try:
184
+ row = conn.execute(
185
+ "SELECT "
186
+ "COUNT(CASE WHEN trust_score >= 0.7 THEN 1 END) AS high_trust, "
187
+ "COUNT(CASE WHEN trust_score >= 0.4 AND trust_score < 0.7 THEN 1 END) AS med_trust, "
188
+ "COUNT(CASE WHEN trust_score < 0.4 THEN 1 END) AS low_trust, "
189
+ "COUNT(*) AS total, ROUND(AVG(trust_score), 3) AS avg_trust "
190
+ "FROM trust_scores WHERE profile_id = ?",
191
+ (pid,),
192
+ ).fetchone()
193
+ if row:
194
+ d = dict(row)
195
+ trust = {
196
+ "high": d.get("high_trust", 0),
197
+ "medium": d.get("med_trust", 0),
198
+ "low": d.get("low_trust", 0),
199
+ "total": d.get("total", 0),
200
+ "avg": float(d.get("avg_trust", 0) or 0.5),
201
+ }
202
+ except Exception:
203
+ pass
204
+
205
+ # Retention zones (v3.2+)
206
+ try:
207
+ rows = conn.execute(
208
+ "SELECT lifecycle_zone, COUNT(*) AS cnt, "
209
+ "ROUND(AVG(retention_score), 3) AS avg_retention "
210
+ "FROM fact_retention WHERE profile_id = ? "
211
+ "GROUP BY lifecycle_zone",
212
+ (pid,),
213
+ ).fetchall()
214
+ retention_zones = {
215
+ dict(r)["lifecycle_zone"]: {
216
+ "count": dict(r)["cnt"],
217
+ "avg_retention": float(dict(r).get("avg_retention", 0) or 0),
218
+ }
219
+ for r in rows
220
+ }
221
+ except Exception:
222
+ pass # Table may not exist in older DBs
223
+
224
+ # Coverage by fact_type
225
+ try:
226
+ rows = conn.execute(
227
+ "SELECT fact_type, COUNT(*) AS cnt "
228
+ "FROM atomic_facts WHERE profile_id = ? AND lifecycle = 'active' "
229
+ "GROUP BY fact_type",
230
+ (pid,),
231
+ ).fetchall()
232
+ coverage = {dict(r)["fact_type"]: dict(r)["cnt"] for r in rows}
233
+ except Exception:
234
+ pass
235
+
236
+ # Totals
237
+ try:
238
+ row = conn.execute(
239
+ "SELECT "
240
+ "(SELECT COUNT(*) FROM atomic_facts WHERE profile_id = ?) AS total_facts, "
241
+ "(SELECT COUNT(*) FROM canonical_entities WHERE profile_id = ?) AS total_entities, "
242
+ "(SELECT COUNT(*) FROM graph_edges WHERE profile_id = ?) AS total_edges",
243
+ (pid, pid, pid),
244
+ ).fetchone()
245
+ if row:
246
+ d = dict(row)
247
+ totals = {
248
+ "facts": d.get("total_facts", 0),
249
+ "entities": d.get("total_entities", 0),
250
+ "edges": d.get("total_edges", 0),
251
+ }
252
+ except Exception:
253
+ pass
254
+
255
+ # Community count
256
+ try:
257
+ row = conn.execute(
258
+ "SELECT COUNT(DISTINCT community_id) AS cnt "
259
+ "FROM fact_importance WHERE profile_id = ? AND community_id IS NOT NULL",
260
+ (pid,),
261
+ ).fetchone()
262
+ if row:
263
+ community_count = dict(row).get("cnt", 0)
264
+ except Exception:
265
+ pass
266
+
267
+ return {
268
+ "action": "health",
269
+ "count": 1,
270
+ "items": [{
271
+ "trust": trust,
272
+ "retention_zones": retention_zones,
273
+ "coverage": coverage,
274
+ "totals": totals,
275
+ "community_count": community_count,
276
+ }],
277
+ }
278
+
279
+
280
+ def _action_cross_project(
281
+ conn: sqlite3.Connection, pid: str, limit: int = 50, **_: Any,
282
+ ) -> dict[str, Any]:
283
+ """Entities spanning multiple sessions."""
284
+ items = []
285
+ try:
286
+ rows = conn.execute(
287
+ "SELECT ce.entity_id, ce.canonical_name, ce.entity_type, ce.fact_count, "
288
+ "COUNT(DISTINCT af.session_id) AS session_count, "
289
+ "GROUP_CONCAT(DISTINCT af.session_id) AS session_ids "
290
+ "FROM canonical_entities ce "
291
+ "JOIN atomic_facts af ON af.profile_id = ? AND EXISTS ("
292
+ " SELECT 1 FROM json_each(af.canonical_entities_json) je "
293
+ " WHERE je.value = ce.entity_id"
294
+ ") "
295
+ "WHERE ce.profile_id = ? "
296
+ "GROUP BY ce.entity_id "
297
+ "HAVING COUNT(DISTINCT af.session_id) > 1 "
298
+ "ORDER BY session_count DESC, ce.fact_count DESC LIMIT ?",
299
+ (pid, pid, limit),
300
+ ).fetchall()
301
+ items = [
302
+ {
303
+ "entity_id": dict(r).get("entity_id", ""),
304
+ "canonical_name": dict(r).get("canonical_name", ""),
305
+ "entity_type": dict(r).get("entity_type", ""),
306
+ "fact_count": dict(r).get("fact_count", 0),
307
+ "session_count": dict(r).get("session_count", 0),
308
+ "session_ids": (dict(r).get("session_ids") or "").split(",")[:10],
309
+ }
310
+ for r in rows
311
+ ]
312
+ except Exception as exc:
313
+ logger.debug("cross_project query failed: %s", exc)
314
+
315
+ return {"action": "cross_project", "count": len(items), "items": items}
316
+
317
+
318
+ # ── Dispatch Map ──────────────────────────────────────────────────
319
+
320
+ ALLOWED_ACTIONS: dict[str, Callable] = {
321
+ "changed_this_week": _action_changed_this_week,
322
+ "opinions": _action_opinions,
323
+ "contradictions": _action_contradictions,
324
+ "health": _action_health,
325
+ "cross_project": _action_cross_project,
326
+ }
327
+
328
+
329
+ # ── Endpoint ──────────────────────────────────────────────────────
330
+
331
+ @router.get("/{action_name}")
332
+ async def insight_action(
333
+ action_name: str,
334
+ profile: str = "",
335
+ limit: int = Query(default=50, ge=1, le=200),
336
+ days: int = Query(default=7, ge=1, le=90),
337
+ ):
338
+ """Run a quick insight action against the memory database."""
339
+ handler = ALLOWED_ACTIONS.get(action_name)
340
+ if not handler:
341
+ valid = ", ".join(sorted(ALLOWED_ACTIONS.keys()))
342
+ return JSONResponse(
343
+ {"error": f"Unknown action: '{action_name}'. Valid: {valid}"},
344
+ status_code=400,
345
+ )
346
+
347
+ conn, pid = _get_conn(profile)
348
+ if conn is None:
349
+ return {
350
+ "action": action_name,
351
+ "profile": pid,
352
+ "count": 0,
353
+ "items": [],
354
+ "metadata": {"error": "Database not found"},
355
+ }
356
+
357
+ try:
358
+ result = handler(conn, pid, limit=limit, days=days)
359
+ result["profile"] = pid
360
+ return result
361
+ except Exception as exc:
362
+ logger.warning("Insight action %s failed: %s", action_name, exc)
363
+ return JSONResponse(
364
+ {"error": f"Query failed: {exc}"},
365
+ status_code=500,
366
+ )
367
+ finally:
368
+ conn.close()
@@ -25,17 +25,25 @@ LEARNING_DB = MEMORY_DIR / "learning.db"
25
25
 
26
26
  # Feature detection
27
27
  LEARNING_AVAILABLE = False
28
+ BEHAVIORAL_AVAILABLE = False
28
29
  try:
29
30
  from superlocalmemory.learning.feedback import FeedbackCollector
30
31
  from superlocalmemory.learning.engagement import EngagementTracker
31
32
  from superlocalmemory.learning.ranker import AdaptiveRanker
32
33
  LEARNING_AVAILABLE = True
33
- except ImportError:
34
+ except ImportError as e:
35
+ logger.warning("V3 learning primary import failed: %s", e)
34
36
  try:
35
37
  from superlocalmemory.learning.adaptive import AdaptiveLearner
36
38
  LEARNING_AVAILABLE = True
37
- except ImportError:
38
- logger.info("V3 learning system not available")
39
+ except ImportError as e2:
40
+ logger.warning("V3 learning fallback import failed: %s", e2)
41
+
42
+ try:
43
+ from superlocalmemory.learning.behavioral import BehavioralPatternStore
44
+ BEHAVIORAL_AVAILABLE = True
45
+ except ImportError as e:
46
+ logger.warning("V3 behavioral import failed: %s", e)
39
47
 
40
48
  # Lazy singletons
41
49
  _feedback: FeedbackCollector | None = None
@@ -86,7 +94,7 @@ async def learning_status():
86
94
  from superlocalmemory.learning.feedback import FeedbackCollector
87
95
  from pathlib import Path
88
96
  import sqlite3 as _sqlite3
89
- learning_db = Path.home() / ".superlocalmemory" / "learning.db"
97
+ learning_db = LEARNING_DB
90
98
  if learning_db.exists():
91
99
  collector = FeedbackCollector(learning_db)
92
100
  signal_count = collector.get_feedback_count(active_profile)
@@ -149,7 +157,7 @@ async def learning_status():
149
157
  try:
150
158
  from superlocalmemory.learning.behavioral import BehavioralPatternStore
151
159
  from pathlib import Path
152
- learning_db = Path.home() / ".superlocalmemory" / "learning.db"
160
+ learning_db = LEARNING_DB
153
161
  if learning_db.exists():
154
162
  store = BehavioralPatternStore(str(learning_db))
155
163
  all_patterns = store.get_patterns(profile_id=active_profile)
@@ -179,9 +187,11 @@ async def learning_status():
179
187
  else:
180
188
  result["tech_preferences"] = []
181
189
  result["workflow_patterns"] = []
182
- except Exception:
190
+ except Exception as exc:
191
+ logger.error("Error fetching behavioral patterns: %s", exc, exc_info=True)
183
192
  result["tech_preferences"] = []
184
193
  result["workflow_patterns"] = []
194
+ result["pattern_error"] = str(exc)
185
195
  result["source_scores"] = {}
186
196
 
187
197
  except Exception as e:
@@ -310,6 +320,96 @@ async def feedback_stats():
310
320
  return {"total_signals": 0, "ranking_phase": "baseline", "progress": 0, "error": str(e)}
311
321
 
312
322
 
323
+ # ============================================================================
324
+ # PATTERNS ENDPOINT (v3.4.1 — CRITICAL FIX: frontend calls /api/patterns)
325
+ # ============================================================================
326
+
327
+ @router.get("/api/patterns")
328
+ async def get_patterns():
329
+ """Get learned behavioral patterns for the Patterns dashboard tab.
330
+
331
+ v3.4.1: This endpoint was MISSING — patterns.js calls /api/patterns
332
+ but no backend route existed. The frontend always showed 'No patterns'.
333
+ Now queries BehavioralPatternStore + learning signals.
334
+ """
335
+ active_profile = get_active_profile()
336
+
337
+ patterns: dict = {
338
+ "preference": [],
339
+ "style": [],
340
+ "terminology": [],
341
+ "workflow": [],
342
+ }
343
+ result: dict = {
344
+ "available": BEHAVIORAL_AVAILABLE or LEARNING_AVAILABLE,
345
+ "patterns": patterns,
346
+ "signal_stats": {},
347
+ }
348
+
349
+ # Behavioral patterns from BehavioralPatternStore
350
+ if BEHAVIORAL_AVAILABLE:
351
+ try:
352
+ store = BehavioralPatternStore(str(LEARNING_DB))
353
+ all_patterns = store.get_patterns(profile_id=active_profile)
354
+ for p in all_patterns:
355
+ ptype = p.get("pattern_type", "")
356
+ entry = {
357
+ "key": p.get("pattern_key", ""),
358
+ "value": p.get("metadata", {}).get("value", ""),
359
+ "confidence": round(float(p.get("confidence", 0)), 3),
360
+ "evidence_count": p.get("evidence_count", 0),
361
+ "created_at": p.get("created_at", ""),
362
+ "updated_at": p.get("updated_at", ""),
363
+ }
364
+ if ptype == "tech_preference":
365
+ patterns["preference"].append(entry)
366
+ elif ptype == "style":
367
+ patterns["style"].append(entry)
368
+ elif ptype == "terminology":
369
+ patterns["terminology"].append(entry)
370
+ elif ptype in ("temporal", "interest", "workflow"):
371
+ patterns["workflow"].append(entry)
372
+ else:
373
+ patterns["preference"].append(entry)
374
+ except Exception as exc:
375
+ logger.error("Error loading patterns from behavioral store: %s", exc)
376
+ result["pattern_error"] = str(exc)
377
+
378
+ # Learning signal stats (feedback count, co-retrieval, channel credits)
379
+ try:
380
+ from superlocalmemory.learning.signals import LearningSignals
381
+ signals = LearningSignals(str(LEARNING_DB))
382
+ result["signal_stats"] = signals.get_signal_stats(active_profile)
383
+ except Exception as exc:
384
+ logger.debug("Signal stats unavailable: %s", exc)
385
+
386
+ # Graph intelligence contribution to learning (v3.4.1)
387
+ try:
388
+ import sqlite3 as _sqlite3
389
+ from superlocalmemory.server.routes.helpers import DB_PATH
390
+ if DB_PATH.exists():
391
+ conn = _sqlite3.connect(str(DB_PATH))
392
+ conn.row_factory = _sqlite3.Row
393
+ row = conn.execute(
394
+ "SELECT COUNT(*) AS cnt, COUNT(DISTINCT community_id) AS communities, "
395
+ "ROUND(AVG(pagerank_score), 4) AS avg_pagerank "
396
+ "FROM fact_importance WHERE profile_id = ?",
397
+ (active_profile,),
398
+ ).fetchone()
399
+ if row:
400
+ d = dict(row)
401
+ result["graph_intelligence"] = {
402
+ "facts_analyzed": d.get("cnt", 0),
403
+ "communities_detected": d.get("communities", 0),
404
+ "avg_pagerank": float(d.get("avg_pagerank", 0) or 0),
405
+ }
406
+ conn.close()
407
+ except Exception:
408
+ pass
409
+
410
+ return result
411
+
412
+
313
413
  @router.post("/api/learning/backup")
314
414
  async def learning_backup():
315
415
  """Backup learning.db to a timestamped file."""
@@ -46,16 +46,20 @@ def _fetch_graph_data(
46
46
  ) -> tuple[list, list, list]:
47
47
  """Fetch graph nodes, links, clusters from V3 or V2 schema."""
48
48
  if use_v3:
49
- # Recency-first: get the most recent nodes, then find their edges
49
+ # Recency-first: get the most recent nodes, then find their edges.
50
+ # LEFT JOIN fact_importance for graph metrics (v3.4.1 — additive only).
50
51
  cursor.execute("""
51
- SELECT fact_id as id, content, fact_type as category,
52
- confidence as importance, session_id as project_name,
53
- created_at
54
- FROM atomic_facts
55
- WHERE profile_id = ? AND confidence >= ?
56
- ORDER BY created_at DESC
52
+ SELECT af.fact_id as id, af.content, af.fact_type as category,
53
+ af.confidence as importance, af.session_id as project_name,
54
+ af.created_at,
55
+ fi.pagerank_score, fi.community_id, fi.degree_centrality
56
+ FROM atomic_facts af
57
+ LEFT JOIN fact_importance fi
58
+ ON af.fact_id = fi.fact_id AND fi.profile_id = ?
59
+ WHERE af.profile_id = ? AND af.confidence >= ?
60
+ ORDER BY af.created_at DESC
57
61
  LIMIT ?
58
- """, (profile, min_importance / 10.0, max_nodes))
62
+ """, (profile, profile, min_importance / 10.0, max_nodes))
59
63
  nodes = cursor.fetchall()
60
64
 
61
65
  node_ids = {n['id'] for n in nodes}
@@ -80,6 +84,13 @@ def _fetch_graph_data(
80
84
  for n in nodes:
81
85
  n['entities'] = []
82
86
  n['content_preview'] = _preview(n.get('content'))
87
+ # v3.4.1: Default graph metrics when fact_importance has no data
88
+ if n.get('pagerank_score') is None:
89
+ n['pagerank_score'] = 0.0
90
+ if n.get('community_id') is None:
91
+ n['community_id'] = 0
92
+ if n.get('degree_centrality') is None:
93
+ n['degree_centrality'] = 0.0
83
94
 
84
95
  # Filter edges to only those between displayed nodes
85
96
  node_ids = {n['id'] for n in nodes}
@@ -290,7 +301,7 @@ async def get_memories(
290
301
  @router.get("/api/graph")
291
302
  async def get_graph(
292
303
  request: Request,
293
- max_nodes: int = Query(100, ge=10, le=500),
304
+ max_nodes: int = Query(100, ge=10, le=10000),
294
305
  min_importance: int = Query(1, ge=1, le=10),
295
306
  ):
296
307
  """Get knowledge graph data for D3.js force-directed visualization."""
@@ -64,14 +64,26 @@ async def get_stats():
64
64
  pass
65
65
 
66
66
  total_clusters = 0
67
+ # v3.4.1: Use community_id from fact_importance (graph intelligence)
67
68
  try:
68
69
  cursor.execute(
69
- "SELECT COUNT(DISTINCT scene_id) as total FROM scenes WHERE profile_id = ?",
70
+ "SELECT COUNT(DISTINCT community_id) as total FROM fact_importance "
71
+ "WHERE profile_id = ? AND community_id IS NOT NULL",
70
72
  (active_profile,),
71
73
  )
72
74
  total_clusters = cursor.fetchone()['total']
73
75
  except Exception:
74
76
  pass
77
+ # Fallback: V2 scenes table
78
+ if total_clusters == 0:
79
+ try:
80
+ cursor.execute(
81
+ "SELECT COUNT(DISTINCT scene_id) as total FROM scenes WHERE profile_id = ?",
82
+ (active_profile,),
83
+ )
84
+ total_clusters = cursor.fetchone()['total']
85
+ except Exception:
86
+ pass
75
87
  # Fallback: V2-migrated clusters stored as cluster_id on memories
76
88
  if total_clusters == 0:
77
89
  try:
@@ -311,11 +323,21 @@ async def get_patterns():
311
323
  from superlocalmemory.learning.behavioral import BehavioralPatternStore
312
324
  store = BehavioralPatternStore(str(MEMORY_DIR / "learning.db"))
313
325
  raw = store.get_patterns(profile_id=active_profile)
326
+ # v3.4.1: Map pattern_type to frontend-expected keys
327
+ type_map = {
328
+ "tech_preference": "preference",
329
+ "style": "style",
330
+ "terminology": "terminology",
331
+ "temporal": "workflow",
332
+ "interest": "workflow",
333
+ "workflow": "workflow",
334
+ }
314
335
  grouped = defaultdict(list)
315
336
  for p in raw:
316
337
  meta = p.get("metadata", {})
317
- grouped[p["pattern_type"]].append({
318
- "pattern_type": p["pattern_type"],
338
+ frontend_key = type_map.get(p.get("pattern_type", ""), "preference")
339
+ grouped[frontend_key].append({
340
+ "pattern_type": p.get("pattern_type", ""),
319
341
  "key": meta.get("key", p.get("pattern_key", "")),
320
342
  "value": meta.get("value", p.get("pattern_key", "")),
321
343
  "confidence": p.get("confidence", 0),