superlocalmemory 3.4.0 → 3.4.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. package/README.md +7 -8
  2. package/docs/screenshots/01-dashboard-main.png +0 -0
  3. package/docs/screenshots/02-knowledge-graph.png +0 -0
  4. package/docs/screenshots/03-patterns-learning.png +0 -0
  5. package/docs/screenshots/04-learning-dashboard.png +0 -0
  6. package/docs/screenshots/05-behavioral-analysis.png +0 -0
  7. package/docs/screenshots/06-graph-communities.png +0 -0
  8. package/package.json +2 -2
  9. package/pyproject.toml +11 -2
  10. package/scripts/postinstall.js +26 -7
  11. package/src/superlocalmemory/cli/commands.py +42 -60
  12. package/src/superlocalmemory/cli/daemon.py +107 -47
  13. package/src/superlocalmemory/cli/main.py +10 -0
  14. package/src/superlocalmemory/cli/setup_wizard.py +137 -9
  15. package/src/superlocalmemory/core/config.py +28 -0
  16. package/src/superlocalmemory/core/consolidation_engine.py +38 -1
  17. package/src/superlocalmemory/core/engine.py +9 -0
  18. package/src/superlocalmemory/core/engine_wiring.py +5 -1
  19. package/src/superlocalmemory/core/graph_analyzer.py +254 -12
  20. package/src/superlocalmemory/core/health_monitor.py +313 -0
  21. package/src/superlocalmemory/core/reranker_worker.py +19 -5
  22. package/src/superlocalmemory/ingestion/__init__.py +13 -0
  23. package/src/superlocalmemory/ingestion/adapter_manager.py +234 -0
  24. package/src/superlocalmemory/ingestion/base_adapter.py +177 -0
  25. package/src/superlocalmemory/ingestion/calendar_adapter.py +340 -0
  26. package/src/superlocalmemory/ingestion/credentials.py +118 -0
  27. package/src/superlocalmemory/ingestion/gmail_adapter.py +369 -0
  28. package/src/superlocalmemory/ingestion/parsers.py +100 -0
  29. package/src/superlocalmemory/ingestion/transcript_adapter.py +156 -0
  30. package/src/superlocalmemory/learning/consolidation_worker.py +287 -53
  31. package/src/superlocalmemory/learning/entity_compiler.py +377 -0
  32. package/src/superlocalmemory/mesh/__init__.py +12 -0
  33. package/src/superlocalmemory/mesh/broker.py +344 -0
  34. package/src/superlocalmemory/retrieval/entity_channel.py +141 -4
  35. package/src/superlocalmemory/retrieval/spreading_activation.py +45 -0
  36. package/src/superlocalmemory/server/api.py +15 -8
  37. package/src/superlocalmemory/server/routes/behavioral.py +8 -4
  38. package/src/superlocalmemory/server/routes/chat.py +320 -0
  39. package/src/superlocalmemory/server/routes/entity.py +95 -0
  40. package/src/superlocalmemory/server/routes/ingest.py +110 -0
  41. package/src/superlocalmemory/server/routes/insights.py +368 -0
  42. package/src/superlocalmemory/server/routes/learning.py +106 -6
  43. package/src/superlocalmemory/server/routes/memories.py +20 -9
  44. package/src/superlocalmemory/server/routes/mesh.py +186 -0
  45. package/src/superlocalmemory/server/routes/stats.py +25 -3
  46. package/src/superlocalmemory/server/routes/timeline.py +252 -0
  47. package/src/superlocalmemory/server/routes/v3_api.py +161 -0
  48. package/src/superlocalmemory/server/ui.py +8 -0
  49. package/src/superlocalmemory/server/unified_daemon.py +691 -0
  50. package/src/superlocalmemory/storage/schema_v343.py +229 -0
  51. package/src/superlocalmemory/ui/index.html +168 -58
  52. package/src/superlocalmemory/ui/js/graph-event-bus.js +83 -0
  53. package/src/superlocalmemory/ui/js/graph-filters.js +1 -1
  54. package/src/superlocalmemory/ui/js/knowledge-graph.js +942 -0
  55. package/src/superlocalmemory/ui/js/memory-chat.js +344 -0
  56. package/src/superlocalmemory/ui/js/memory-timeline.js +265 -0
  57. package/src/superlocalmemory/ui/js/quick-actions.js +334 -0
  58. package/src/superlocalmemory.egg-info/PKG-INFO +0 -594
  59. package/src/superlocalmemory.egg-info/SOURCES.txt +0 -279
  60. package/src/superlocalmemory.egg-info/dependency_links.txt +0 -1
  61. package/src/superlocalmemory.egg-info/entry_points.txt +0 -2
  62. package/src/superlocalmemory.egg-info/requires.txt +0 -47
  63. package/src/superlocalmemory.egg-info/top_level.txt +0 -1
@@ -0,0 +1,377 @@
1
+ # Copyright (c) 2026 Varun Pratap Bhardwaj / Qualixar
2
+ # Licensed under the Elastic License 2.0 - see LICENSE file
3
+ # Part of SuperLocalMemory V3 | https://qualixar.com | https://varunpratap.com
4
+
5
+ """Entity Compilation Engine — auto-generates compiled truth per entity.
6
+
7
+ Builds knowledge summaries using PageRank centrality + Louvain community detection
8
+ (Mode A extractive) or local LLM (Mode B). Per-project, per-profile scoping.
9
+ 2000 character hard limit. Read-only layer — never replaces atomic facts.
10
+
11
+ Runs after consolidation (every 6 hours or on-demand).
12
+
13
+ Part of Qualixar | Author: Varun Pratap Bhardwaj
14
+ License: Elastic-2.0
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import json
20
+ import logging
21
+ import sqlite3
22
+ import time
23
+ import uuid
24
+ from datetime import datetime, timezone
25
+ from pathlib import Path
26
+
27
+ logger = logging.getLogger("superlocalmemory.entity_compiler")
28
+
29
+ _MAX_COMPILED_TRUTH_CHARS = 2000
30
+ _MAX_TIMELINE_ENTRIES = 100
31
+
32
+
33
+ class EntityCompiler:
34
+ """Compiles knowledge summaries for entities from atomic facts.
35
+
36
+ Mode A: Extractive (no LLM) — PageRank + Louvain + top sentences
37
+ Mode B: Local LLM via Ollama — prompt with top facts
38
+ """
39
+
40
+ def __init__(self, memory_db: str | Path, config=None):
41
+ self._db_path = str(memory_db)
42
+ self._config = config
43
+ self._mode = "a"
44
+ if config:
45
+ mode = getattr(config, 'mode', None)
46
+ if mode:
47
+ self._mode = getattr(mode, 'value', str(mode)).lower()
48
+
49
+ def compile_all(self, profile_id: str) -> dict:
50
+ """Compile all entities that have new facts across all projects.
51
+
52
+ Returns stats: {compiled: N, skipped: N, errors: N}
53
+ """
54
+ if self._config and not getattr(self._config, 'entity_compilation_enabled', True):
55
+ return {"compiled": 0, "skipped": 0, "errors": 0, "reason": "disabled"}
56
+
57
+ stats = {"compiled": 0, "skipped": 0, "errors": 0}
58
+ conn = self._connect()
59
+ try:
60
+ # Get all distinct projects for this profile
61
+ projects = conn.execute(
62
+ "SELECT DISTINCT project_name FROM entity_profiles WHERE profile_id = ?",
63
+ (profile_id,),
64
+ ).fetchall()
65
+ project_names = [r[0] for r in projects] if projects else [""]
66
+
67
+ for project_name in project_names:
68
+ result = self._compile_project(conn, profile_id, project_name)
69
+ stats["compiled"] += result["compiled"]
70
+ stats["skipped"] += result["skipped"]
71
+ stats["errors"] += result["errors"]
72
+ finally:
73
+ conn.close()
74
+
75
+ if stats["compiled"] > 0:
76
+ logger.info("Entity compilation: %d compiled, %d skipped, %d errors",
77
+ stats["compiled"], stats["skipped"], stats["errors"])
78
+ return stats
79
+
80
+ def compile_entity(self, profile_id: str, project_name: str,
81
+ entity_id: str, entity_name: str) -> dict | None:
82
+ """Compile a single entity. Returns compiled truth or None."""
83
+ conn = self._connect()
84
+ try:
85
+ return self._compile_single(conn, profile_id, project_name,
86
+ entity_id, entity_name)
87
+ finally:
88
+ conn.close()
89
+
90
+ def _connect(self) -> sqlite3.Connection:
91
+ conn = sqlite3.connect(self._db_path)
92
+ conn.execute("PRAGMA journal_mode=WAL")
93
+ conn.execute("PRAGMA busy_timeout=5000")
94
+ conn.row_factory = sqlite3.Row
95
+ return conn
96
+
97
+ def _compile_project(self, conn: sqlite3.Connection, profile_id: str,
98
+ project_name: str) -> dict:
99
+ """Compile all entities needing update in a project."""
100
+ stats = {"compiled": 0, "skipped": 0, "errors": 0}
101
+
102
+ # Find entities with new facts since last compilation
103
+ entities = conn.execute("""
104
+ SELECT DISTINCT ce.entity_id, ce.canonical_name, ce.entity_type
105
+ FROM canonical_entities ce
106
+ WHERE ce.profile_id = ?
107
+ AND (
108
+ EXISTS (
109
+ SELECT 1 FROM atomic_facts af
110
+ WHERE af.canonical_entities_json LIKE '%' || ce.entity_id || '%'
111
+ AND af.profile_id = ?
112
+ AND af.created_at > COALESCE(
113
+ (SELECT last_compiled_at FROM entity_profiles
114
+ WHERE entity_id = ce.entity_id
115
+ AND profile_id = ?
116
+ AND project_name = ?),
117
+ '1970-01-01')
118
+ )
119
+ OR NOT EXISTS (
120
+ SELECT 1 FROM entity_profiles
121
+ WHERE entity_id = ce.entity_id
122
+ AND profile_id = ?
123
+ AND project_name = ?
124
+ AND last_compiled_at IS NOT NULL
125
+ )
126
+ )
127
+ """, (profile_id, profile_id, profile_id, project_name,
128
+ profile_id, project_name)).fetchall()
129
+
130
+ for entity in entities:
131
+ try:
132
+ result = self._compile_single(
133
+ conn, profile_id, project_name,
134
+ entity["entity_id"], entity["canonical_name"],
135
+ entity_type=entity["entity_type"],
136
+ )
137
+ if result:
138
+ stats["compiled"] += 1
139
+ else:
140
+ stats["skipped"] += 1
141
+ except Exception as exc:
142
+ logger.debug("Entity compilation error for %s: %s",
143
+ entity["canonical_name"], exc)
144
+ stats["errors"] += 1
145
+
146
+ return stats
147
+
148
+ def _compile_single(self, conn: sqlite3.Connection, profile_id: str,
149
+ project_name: str, entity_id: str, entity_name: str,
150
+ entity_type: str = "unknown") -> dict | None:
151
+ """Compile one entity. Returns the compiled truth dict or None."""
152
+
153
+ # Gather atomic facts for this entity
154
+ facts = conn.execute("""
155
+ SELECT af.fact_id, af.content, af.confidence, af.created_at,
156
+ fi.pagerank_score, fi.community_id
157
+ FROM atomic_facts af
158
+ LEFT JOIN fact_importance fi ON af.fact_id = fi.fact_id
159
+ WHERE af.canonical_entities_json LIKE ? AND af.profile_id = ?
160
+ ORDER BY fi.pagerank_score DESC NULLS LAST, af.confidence DESC
161
+ LIMIT 50
162
+ """, (f"%{entity_id}%", profile_id)).fetchall()
163
+
164
+ if not facts:
165
+ return None
166
+
167
+ # Compute PageRank if missing
168
+ has_pagerank = any(f["pagerank_score"] is not None for f in facts)
169
+ if not has_pagerank and len(facts) > 2:
170
+ self._compute_pagerank(conn, [f["fact_id"] for f in facts], profile_id)
171
+ # Re-fetch with scores
172
+ facts = conn.execute("""
173
+ SELECT af.fact_id, af.content, af.confidence, af.created_at,
174
+ fi.pagerank_score, fi.community_id
175
+ FROM atomic_facts af
176
+ LEFT JOIN fact_importance fi ON af.fact_id = fi.fact_id
177
+ WHERE af.canonical_entities_json LIKE ? AND af.profile_id = ?
178
+ ORDER BY fi.pagerank_score DESC NULLS LAST, af.confidence DESC
179
+ LIMIT 50
180
+ """, (f"%{entity_id}%", profile_id)).fetchall()
181
+
182
+ # Generate compiled truth
183
+ if self._mode in ("b", "c") and len(facts) > 3:
184
+ compiled = self._compile_mode_b(entity_name, facts)
185
+ if not compiled:
186
+ compiled = self._compile_mode_a(entity_name, entity_type, facts)
187
+ else:
188
+ compiled = self._compile_mode_a(entity_name, entity_type, facts)
189
+
190
+ # Truncate to limit
191
+ compiled = self._truncate(compiled, _MAX_COMPILED_TRUTH_CHARS)
192
+
193
+ # Build timeline entry
194
+ now = datetime.now(timezone.utc).isoformat()
195
+ timeline_entry = {
196
+ "date": now,
197
+ "action": "compiled",
198
+ "facts_used": len(facts),
199
+ "mode": self._mode,
200
+ }
201
+
202
+ # Load existing timeline
203
+ existing = conn.execute(
204
+ "SELECT timeline, profile_entry_id FROM entity_profiles "
205
+ "WHERE entity_id = ? AND profile_id = ? AND project_name = ?",
206
+ (entity_id, profile_id, project_name),
207
+ ).fetchone()
208
+
209
+ timeline = []
210
+ if existing and existing["timeline"]:
211
+ try:
212
+ timeline = json.loads(existing["timeline"])
213
+ except (json.JSONDecodeError, TypeError):
214
+ timeline = []
215
+ timeline.append(timeline_entry)
216
+ # Cap at 100 entries
217
+ if len(timeline) > _MAX_TIMELINE_ENTRIES:
218
+ timeline = timeline[-_MAX_TIMELINE_ENTRIES:]
219
+
220
+ fact_ids = [f["fact_id"] for f in facts]
221
+ avg_conf = sum(f["confidence"] or 0.5 for f in facts) / max(len(facts), 1)
222
+
223
+ # Upsert
224
+ if existing:
225
+ conn.execute("""
226
+ UPDATE entity_profiles SET
227
+ compiled_truth = ?, timeline = ?, fact_ids_json = ?,
228
+ last_compiled_at = ?, compilation_confidence = ?, last_updated = ?
229
+ WHERE entity_id = ? AND profile_id = ? AND project_name = ?
230
+ """, (compiled, json.dumps(timeline), json.dumps(fact_ids),
231
+ now, round(avg_conf, 3), now,
232
+ entity_id, profile_id, project_name))
233
+ else:
234
+ entry_id = str(uuid.uuid4())[:16]
235
+ conn.execute("""
236
+ INSERT INTO entity_profiles
237
+ (profile_entry_id, entity_id, profile_id, project_name,
238
+ knowledge_summary, compiled_truth, timeline, fact_ids_json,
239
+ last_compiled_at, compilation_confidence, last_updated)
240
+ VALUES (?, ?, ?, ?, '', ?, ?, ?, ?, ?, ?)
241
+ """, (entry_id, entity_id, profile_id, project_name,
242
+ compiled, json.dumps(timeline), json.dumps(fact_ids),
243
+ now, round(avg_conf, 3), now))
244
+
245
+ conn.commit()
246
+
247
+ return {
248
+ "entity_name": entity_name,
249
+ "compiled_truth": compiled,
250
+ "facts_used": len(facts),
251
+ "confidence": round(avg_conf, 3),
252
+ }
253
+
254
+ # -- Mode A: Extractive (no LLM) --
255
+
256
+ def _compile_mode_a(self, entity_name: str, entity_type: str,
257
+ facts: list) -> str:
258
+ """Extract top sentences by PageRank, grouped by community."""
259
+ header = f"{entity_name}"
260
+ if entity_type and entity_type != "unknown":
261
+ header += f" ({entity_type})"
262
+ header += "\n"
263
+
264
+ # Group facts by community
265
+ communities: dict[int, list] = {}
266
+ for f in facts:
267
+ cid = f["community_id"] or 0
268
+ communities.setdefault(cid, []).append(f)
269
+
270
+ sentences = []
271
+ seen_content = set()
272
+ for cid in sorted(communities.keys()):
273
+ community_facts = communities[cid]
274
+ # Top 3 facts per community
275
+ for fact in community_facts[:3]:
276
+ content = fact["content"]
277
+ # Extract first sentence
278
+ first_sent = content.split(". ")[0].strip()
279
+ if not first_sent.endswith("."):
280
+ first_sent += "."
281
+ # Dedup by exact match
282
+ normalized = first_sent.lower().strip()
283
+ if normalized not in seen_content:
284
+ seen_content.add(normalized)
285
+ sentences.append(first_sent)
286
+
287
+ body = " ".join(sentences)
288
+ return header + body
289
+
290
+ # -- Mode B: LLM via Ollama --
291
+
292
+ def _compile_mode_b(self, entity_name: str, facts: list) -> str | None:
293
+ """Summarize via local LLM (Ollama). Returns None on failure."""
294
+ try:
295
+ import urllib.request
296
+ api_base = "http://localhost:11434"
297
+ if self._config and hasattr(self._config, 'llm'):
298
+ api_base = getattr(self._config.llm, 'api_base', api_base) or api_base
299
+ model = "llama3.2"
300
+ if self._config and hasattr(self._config, 'llm'):
301
+ model = getattr(self._config.llm, 'model', model) or model
302
+
303
+ top_facts = "\n".join(
304
+ f"- {f['content']}" for f in facts[:20]
305
+ )
306
+ prompt = (
307
+ f"Summarize these facts about {entity_name} into a concise profile. "
308
+ f"Maximum 2000 characters. Include key relationships, decisions, status. "
309
+ f"Organize by topic, not chronology. Flag contradictions.\n\n"
310
+ f"Facts (by importance):\n{top_facts}"
311
+ )
312
+
313
+ payload = json.dumps({
314
+ "model": model,
315
+ "prompt": prompt,
316
+ "stream": False,
317
+ "options": {"num_predict": 500},
318
+ }).encode()
319
+
320
+ req = urllib.request.Request(
321
+ f"{api_base}/api/generate",
322
+ data=payload,
323
+ headers={"Content-Type": "application/json"},
324
+ )
325
+ resp = urllib.request.urlopen(req, timeout=30)
326
+ result = json.loads(resp.read().decode())
327
+ text = result.get("response", "").strip()
328
+ return text if text else None
329
+ except Exception as exc:
330
+ logger.debug("Mode B compilation failed, falling back to Mode A: %s", exc)
331
+ return None
332
+
333
+ # -- Helpers --
334
+
335
+ def _compute_pagerank(self, conn: sqlite3.Connection,
336
+ fact_ids: list[str], profile_id: str) -> None:
337
+ """Compute PageRank for a set of facts. Stores in fact_importance."""
338
+ try:
339
+ import networkx as nx
340
+ G = nx.Graph()
341
+ for fid in fact_ids:
342
+ G.add_node(fid)
343
+ # Add edges based on shared entities
344
+ for i, fid1 in enumerate(fact_ids):
345
+ for fid2 in fact_ids[i + 1:]:
346
+ # Simple heuristic: facts about same entity are connected
347
+ G.add_edge(fid1, fid2, weight=0.5)
348
+
349
+ if len(G.nodes) < 2:
350
+ return
351
+
352
+ scores = nx.pagerank(G, alpha=0.85)
353
+ now = datetime.now(timezone.utc).isoformat()
354
+
355
+ for fid, score in scores.items():
356
+ conn.execute("""
357
+ INSERT INTO fact_importance (fact_id, profile_id, pagerank_score, computed_at)
358
+ VALUES (?, ?, ?, ?)
359
+ ON CONFLICT(fact_id) DO UPDATE SET pagerank_score=excluded.pagerank_score,
360
+ computed_at=excluded.computed_at
361
+ """, (fid, profile_id, round(score, 6), now))
362
+ conn.commit()
363
+ except ImportError:
364
+ logger.debug("NetworkX not available — skipping PageRank")
365
+ except Exception as exc:
366
+ logger.debug("PageRank computation failed: %s", exc)
367
+
368
+ @staticmethod
369
+ def _truncate(text: str, max_chars: int) -> str:
370
+ """Truncate at sentence boundary within char limit."""
371
+ if len(text) <= max_chars:
372
+ return text
373
+ truncated = text[:max_chars]
374
+ last_period = truncated.rfind(". ")
375
+ if last_period > max_chars // 2:
376
+ return truncated[:last_period + 1]
377
+ return truncated.rstrip() + "..."
@@ -0,0 +1,12 @@
1
+ # Copyright (c) 2026 Varun Pratap Bhardwaj / Qualixar
2
+ # Licensed under the Elastic License 2.0 - see LICENSE file
3
+ # Part of SuperLocalMemory V3 | https://qualixar.com | https://varunpratap.com
4
+
5
+ """SLM Mesh — Python port of the P2P agent communication broker.
6
+
7
+ Provides peer registry, message relay, shared state, file locks, and event logging
8
+ for multi-agent coordination. Runs as FastAPI sub-routes inside the unified daemon.
9
+
10
+ Independent broker — same wire protocol as standalone slm-mesh npm package,
11
+ but separate SQLite tables with mesh_ prefix.
12
+ """