superlocalmemory 3.4.0 → 3.4.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +7 -8
- package/docs/screenshots/01-dashboard-main.png +0 -0
- package/docs/screenshots/02-knowledge-graph.png +0 -0
- package/docs/screenshots/03-patterns-learning.png +0 -0
- package/docs/screenshots/04-learning-dashboard.png +0 -0
- package/docs/screenshots/05-behavioral-analysis.png +0 -0
- package/docs/screenshots/06-graph-communities.png +0 -0
- package/package.json +2 -2
- package/pyproject.toml +11 -2
- package/scripts/postinstall.js +26 -7
- package/src/superlocalmemory/cli/commands.py +42 -60
- package/src/superlocalmemory/cli/daemon.py +107 -47
- package/src/superlocalmemory/cli/main.py +10 -0
- package/src/superlocalmemory/cli/setup_wizard.py +137 -9
- package/src/superlocalmemory/core/config.py +28 -0
- package/src/superlocalmemory/core/consolidation_engine.py +38 -1
- package/src/superlocalmemory/core/engine.py +9 -0
- package/src/superlocalmemory/core/engine_wiring.py +5 -1
- package/src/superlocalmemory/core/graph_analyzer.py +254 -12
- package/src/superlocalmemory/core/health_monitor.py +313 -0
- package/src/superlocalmemory/core/reranker_worker.py +19 -5
- package/src/superlocalmemory/ingestion/__init__.py +13 -0
- package/src/superlocalmemory/ingestion/adapter_manager.py +234 -0
- package/src/superlocalmemory/ingestion/base_adapter.py +177 -0
- package/src/superlocalmemory/ingestion/calendar_adapter.py +340 -0
- package/src/superlocalmemory/ingestion/credentials.py +118 -0
- package/src/superlocalmemory/ingestion/gmail_adapter.py +369 -0
- package/src/superlocalmemory/ingestion/parsers.py +100 -0
- package/src/superlocalmemory/ingestion/transcript_adapter.py +156 -0
- package/src/superlocalmemory/learning/consolidation_worker.py +287 -53
- package/src/superlocalmemory/learning/entity_compiler.py +377 -0
- package/src/superlocalmemory/mesh/__init__.py +12 -0
- package/src/superlocalmemory/mesh/broker.py +344 -0
- package/src/superlocalmemory/retrieval/entity_channel.py +141 -4
- package/src/superlocalmemory/retrieval/spreading_activation.py +45 -0
- package/src/superlocalmemory/server/api.py +15 -8
- package/src/superlocalmemory/server/routes/behavioral.py +8 -4
- package/src/superlocalmemory/server/routes/chat.py +320 -0
- package/src/superlocalmemory/server/routes/entity.py +95 -0
- package/src/superlocalmemory/server/routes/ingest.py +110 -0
- package/src/superlocalmemory/server/routes/insights.py +368 -0
- package/src/superlocalmemory/server/routes/learning.py +106 -6
- package/src/superlocalmemory/server/routes/memories.py +20 -9
- package/src/superlocalmemory/server/routes/mesh.py +186 -0
- package/src/superlocalmemory/server/routes/stats.py +25 -3
- package/src/superlocalmemory/server/routes/timeline.py +252 -0
- package/src/superlocalmemory/server/routes/v3_api.py +161 -0
- package/src/superlocalmemory/server/ui.py +8 -0
- package/src/superlocalmemory/server/unified_daemon.py +691 -0
- package/src/superlocalmemory/storage/schema_v343.py +229 -0
- package/src/superlocalmemory/ui/index.html +168 -58
- package/src/superlocalmemory/ui/js/graph-event-bus.js +83 -0
- package/src/superlocalmemory/ui/js/graph-filters.js +1 -1
- package/src/superlocalmemory/ui/js/knowledge-graph.js +942 -0
- package/src/superlocalmemory/ui/js/memory-chat.js +344 -0
- package/src/superlocalmemory/ui/js/memory-timeline.js +265 -0
- package/src/superlocalmemory/ui/js/quick-actions.js +334 -0
- package/src/superlocalmemory.egg-info/PKG-INFO +0 -594
- package/src/superlocalmemory.egg-info/SOURCES.txt +0 -279
- package/src/superlocalmemory.egg-info/dependency_links.txt +0 -1
- package/src/superlocalmemory.egg-info/entry_points.txt +0 -2
- package/src/superlocalmemory.egg-info/requires.txt +0 -47
- package/src/superlocalmemory.egg-info/top_level.txt +0 -1
|
@@ -0,0 +1,344 @@
|
|
|
1
|
+
# Copyright (c) 2026 Varun Pratap Bhardwaj / Qualixar
|
|
2
|
+
# Licensed under the Elastic License 2.0 - see LICENSE file
|
|
3
|
+
# Part of SuperLocalMemory V3 | https://qualixar.com | https://varunpratap.com
|
|
4
|
+
|
|
5
|
+
"""SLM Mesh Broker — core orchestration for P2P agent communication.
|
|
6
|
+
|
|
7
|
+
Manages peer lifecycle, scheduled cleanup, and event logging.
|
|
8
|
+
All operations use the shared memory.db via SQLite tables with mesh_ prefix.
|
|
9
|
+
|
|
10
|
+
Part of Qualixar | Author: Varun Pratap Bhardwaj
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from __future__ import annotations
|
|
14
|
+
|
|
15
|
+
import logging
|
|
16
|
+
import sqlite3
|
|
17
|
+
import threading
|
|
18
|
+
import time
|
|
19
|
+
import uuid
|
|
20
|
+
from datetime import datetime, timezone
|
|
21
|
+
from pathlib import Path
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger("superlocalmemory.mesh")
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class MeshBroker:
|
|
27
|
+
"""Lightweight mesh broker for SLM's unified daemon.
|
|
28
|
+
|
|
29
|
+
Provides peer management, messaging, state, locks, and events.
|
|
30
|
+
All methods are synchronous (called from FastAPI via run_in_executor
|
|
31
|
+
or directly for quick operations).
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
def __init__(self, db_path: str | Path):
|
|
35
|
+
self._db_path = str(db_path)
|
|
36
|
+
self._started_at = time.monotonic()
|
|
37
|
+
self._cleanup_thread: threading.Thread | None = None
|
|
38
|
+
self._stop_event = threading.Event()
|
|
39
|
+
|
|
40
|
+
def start_cleanup(self) -> None:
|
|
41
|
+
"""Start background cleanup thread for stale peers/messages."""
|
|
42
|
+
self._cleanup_thread = threading.Thread(
|
|
43
|
+
target=self._cleanup_loop, daemon=True, name="mesh-cleanup",
|
|
44
|
+
)
|
|
45
|
+
self._cleanup_thread.start()
|
|
46
|
+
|
|
47
|
+
def stop(self) -> None:
|
|
48
|
+
self._stop_event.set()
|
|
49
|
+
|
|
50
|
+
# -- Connection helper --
|
|
51
|
+
|
|
52
|
+
def _conn(self) -> sqlite3.Connection:
|
|
53
|
+
conn = sqlite3.connect(self._db_path)
|
|
54
|
+
conn.execute("PRAGMA journal_mode=WAL")
|
|
55
|
+
conn.execute("PRAGMA busy_timeout=5000")
|
|
56
|
+
conn.row_factory = sqlite3.Row
|
|
57
|
+
return conn
|
|
58
|
+
|
|
59
|
+
# -- Peers --
|
|
60
|
+
|
|
61
|
+
def register_peer(self, session_id: str, summary: str = "",
|
|
62
|
+
host: str = "127.0.0.1", port: int = 0) -> dict:
|
|
63
|
+
conn = self._conn()
|
|
64
|
+
try:
|
|
65
|
+
now = datetime.now(timezone.utc).isoformat()
|
|
66
|
+
# Idempotent: update if same session_id exists
|
|
67
|
+
existing = conn.execute(
|
|
68
|
+
"SELECT peer_id FROM mesh_peers WHERE session_id = ?",
|
|
69
|
+
(session_id,),
|
|
70
|
+
).fetchone()
|
|
71
|
+
if existing:
|
|
72
|
+
peer_id = existing["peer_id"]
|
|
73
|
+
conn.execute(
|
|
74
|
+
"UPDATE mesh_peers SET summary=?, host=?, port=?, last_heartbeat=?, status='active' "
|
|
75
|
+
"WHERE peer_id=?",
|
|
76
|
+
(summary, host, port, now, peer_id),
|
|
77
|
+
)
|
|
78
|
+
else:
|
|
79
|
+
peer_id = str(uuid.uuid4())[:12]
|
|
80
|
+
conn.execute(
|
|
81
|
+
"INSERT INTO mesh_peers (peer_id, session_id, summary, status, host, port, registered_at, last_heartbeat) "
|
|
82
|
+
"VALUES (?, ?, ?, 'active', ?, ?, ?, ?)",
|
|
83
|
+
(peer_id, session_id, summary, host, port, now, now),
|
|
84
|
+
)
|
|
85
|
+
self._log_event(conn, "peer_registered", peer_id, {"session_id": session_id})
|
|
86
|
+
conn.commit()
|
|
87
|
+
return {"peer_id": peer_id, "ok": True}
|
|
88
|
+
finally:
|
|
89
|
+
conn.close()
|
|
90
|
+
|
|
91
|
+
def deregister_peer(self, peer_id: str) -> dict:
|
|
92
|
+
conn = self._conn()
|
|
93
|
+
try:
|
|
94
|
+
row = conn.execute("SELECT 1 FROM mesh_peers WHERE peer_id=?", (peer_id,)).fetchone()
|
|
95
|
+
if not row:
|
|
96
|
+
return {"ok": False, "error": "peer not found"}
|
|
97
|
+
conn.execute("DELETE FROM mesh_peers WHERE peer_id=?", (peer_id,))
|
|
98
|
+
self._log_event(conn, "peer_deregistered", peer_id)
|
|
99
|
+
conn.commit()
|
|
100
|
+
return {"ok": True}
|
|
101
|
+
finally:
|
|
102
|
+
conn.close()
|
|
103
|
+
|
|
104
|
+
def heartbeat(self, peer_id: str) -> dict:
|
|
105
|
+
conn = self._conn()
|
|
106
|
+
try:
|
|
107
|
+
now = datetime.now(timezone.utc).isoformat()
|
|
108
|
+
cursor = conn.execute(
|
|
109
|
+
"UPDATE mesh_peers SET last_heartbeat=?, status='active' WHERE peer_id=?",
|
|
110
|
+
(now, peer_id),
|
|
111
|
+
)
|
|
112
|
+
if cursor.rowcount == 0:
|
|
113
|
+
return {"ok": False, "error": "peer not found"}
|
|
114
|
+
conn.commit()
|
|
115
|
+
return {"ok": True}
|
|
116
|
+
finally:
|
|
117
|
+
conn.close()
|
|
118
|
+
|
|
119
|
+
def update_summary(self, peer_id: str, summary: str) -> dict:
|
|
120
|
+
conn = self._conn()
|
|
121
|
+
try:
|
|
122
|
+
cursor = conn.execute(
|
|
123
|
+
"UPDATE mesh_peers SET summary=? WHERE peer_id=?",
|
|
124
|
+
(summary, peer_id),
|
|
125
|
+
)
|
|
126
|
+
if cursor.rowcount == 0:
|
|
127
|
+
return {"ok": False, "error": "peer not found"}
|
|
128
|
+
conn.commit()
|
|
129
|
+
return {"ok": True}
|
|
130
|
+
finally:
|
|
131
|
+
conn.close()
|
|
132
|
+
|
|
133
|
+
def list_peers(self) -> list[dict]:
|
|
134
|
+
conn = self._conn()
|
|
135
|
+
try:
|
|
136
|
+
rows = conn.execute(
|
|
137
|
+
"SELECT peer_id, session_id, summary, status, host, port, registered_at, last_heartbeat "
|
|
138
|
+
"FROM mesh_peers ORDER BY last_heartbeat DESC",
|
|
139
|
+
).fetchall()
|
|
140
|
+
return [dict(r) for r in rows]
|
|
141
|
+
finally:
|
|
142
|
+
conn.close()
|
|
143
|
+
|
|
144
|
+
# -- Messages --
|
|
145
|
+
|
|
146
|
+
def send_message(self, from_peer: str, to_peer: str, content: str,
|
|
147
|
+
msg_type: str = "text") -> dict:
|
|
148
|
+
conn = self._conn()
|
|
149
|
+
try:
|
|
150
|
+
# Verify recipient exists
|
|
151
|
+
if not conn.execute("SELECT 1 FROM mesh_peers WHERE peer_id=?", (to_peer,)).fetchone():
|
|
152
|
+
return {"ok": False, "error": "recipient peer not found"}
|
|
153
|
+
now = datetime.now(timezone.utc).isoformat()
|
|
154
|
+
cursor = conn.execute(
|
|
155
|
+
"INSERT INTO mesh_messages (from_peer, to_peer, msg_type, content, read, created_at) "
|
|
156
|
+
"VALUES (?, ?, ?, ?, 0, ?)",
|
|
157
|
+
(from_peer, to_peer, msg_type, content, now),
|
|
158
|
+
)
|
|
159
|
+
self._log_event(conn, "message_sent", from_peer, {"to": to_peer})
|
|
160
|
+
conn.commit()
|
|
161
|
+
return {"ok": True, "id": cursor.lastrowid}
|
|
162
|
+
finally:
|
|
163
|
+
conn.close()
|
|
164
|
+
|
|
165
|
+
def get_inbox(self, peer_id: str) -> list[dict]:
|
|
166
|
+
conn = self._conn()
|
|
167
|
+
try:
|
|
168
|
+
rows = conn.execute(
|
|
169
|
+
"SELECT id, from_peer, to_peer, msg_type, content, read, created_at "
|
|
170
|
+
"FROM mesh_messages WHERE to_peer=? ORDER BY created_at DESC LIMIT 100",
|
|
171
|
+
(peer_id,),
|
|
172
|
+
).fetchall()
|
|
173
|
+
return [dict(r) for r in rows]
|
|
174
|
+
finally:
|
|
175
|
+
conn.close()
|
|
176
|
+
|
|
177
|
+
def mark_read(self, peer_id: str, message_ids: list[int]) -> dict:
|
|
178
|
+
conn = self._conn()
|
|
179
|
+
try:
|
|
180
|
+
placeholders = ",".join("?" * len(message_ids))
|
|
181
|
+
conn.execute(
|
|
182
|
+
f"UPDATE mesh_messages SET read=1 WHERE to_peer=? AND id IN ({placeholders})",
|
|
183
|
+
[peer_id, *message_ids],
|
|
184
|
+
)
|
|
185
|
+
conn.commit()
|
|
186
|
+
return {"ok": True}
|
|
187
|
+
finally:
|
|
188
|
+
conn.close()
|
|
189
|
+
|
|
190
|
+
# -- State --
|
|
191
|
+
|
|
192
|
+
def get_state(self) -> dict:
|
|
193
|
+
conn = self._conn()
|
|
194
|
+
try:
|
|
195
|
+
rows = conn.execute("SELECT key, value, set_by, updated_at FROM mesh_state").fetchall()
|
|
196
|
+
return {r["key"]: {"value": r["value"], "set_by": r["set_by"], "updated_at": r["updated_at"]} for r in rows}
|
|
197
|
+
finally:
|
|
198
|
+
conn.close()
|
|
199
|
+
|
|
200
|
+
def set_state(self, key: str, value: str, set_by: str) -> dict:
|
|
201
|
+
conn = self._conn()
|
|
202
|
+
try:
|
|
203
|
+
now = datetime.now(timezone.utc).isoformat()
|
|
204
|
+
conn.execute(
|
|
205
|
+
"INSERT INTO mesh_state (key, value, set_by, updated_at) VALUES (?, ?, ?, ?) "
|
|
206
|
+
"ON CONFLICT(key) DO UPDATE SET value=excluded.value, set_by=excluded.set_by, updated_at=excluded.updated_at",
|
|
207
|
+
(key, value, set_by, now),
|
|
208
|
+
)
|
|
209
|
+
conn.commit()
|
|
210
|
+
return {"ok": True}
|
|
211
|
+
finally:
|
|
212
|
+
conn.close()
|
|
213
|
+
|
|
214
|
+
def get_state_key(self, key: str) -> dict | None:
|
|
215
|
+
conn = self._conn()
|
|
216
|
+
try:
|
|
217
|
+
row = conn.execute(
|
|
218
|
+
"SELECT key, value, set_by, updated_at FROM mesh_state WHERE key=?", (key,),
|
|
219
|
+
).fetchone()
|
|
220
|
+
return dict(row) if row else None
|
|
221
|
+
finally:
|
|
222
|
+
conn.close()
|
|
223
|
+
|
|
224
|
+
# -- Locks --
|
|
225
|
+
|
|
226
|
+
def lock_action(self, file_path: str, locked_by: str, action: str) -> dict:
|
|
227
|
+
conn = self._conn()
|
|
228
|
+
try:
|
|
229
|
+
now = datetime.now(timezone.utc).isoformat()
|
|
230
|
+
|
|
231
|
+
if action == "acquire":
|
|
232
|
+
existing = conn.execute(
|
|
233
|
+
"SELECT locked_by, locked_at FROM mesh_locks WHERE file_path=?",
|
|
234
|
+
(file_path,),
|
|
235
|
+
).fetchone()
|
|
236
|
+
if existing and existing["locked_by"] != locked_by:
|
|
237
|
+
return {"locked": True, "by": existing["locked_by"], "since": existing["locked_at"]}
|
|
238
|
+
conn.execute(
|
|
239
|
+
"INSERT INTO mesh_locks (file_path, locked_by, locked_at) VALUES (?, ?, ?) "
|
|
240
|
+
"ON CONFLICT(file_path) DO UPDATE SET locked_by=excluded.locked_by, locked_at=excluded.locked_at",
|
|
241
|
+
(file_path, locked_by, now),
|
|
242
|
+
)
|
|
243
|
+
conn.commit()
|
|
244
|
+
return {"ok": True, "action": "acquired"}
|
|
245
|
+
|
|
246
|
+
elif action == "release":
|
|
247
|
+
conn.execute("DELETE FROM mesh_locks WHERE file_path=? AND locked_by=?",
|
|
248
|
+
(file_path, locked_by))
|
|
249
|
+
conn.commit()
|
|
250
|
+
return {"ok": True, "action": "released"}
|
|
251
|
+
|
|
252
|
+
elif action == "query":
|
|
253
|
+
row = conn.execute(
|
|
254
|
+
"SELECT locked_by, locked_at FROM mesh_locks WHERE file_path=?",
|
|
255
|
+
(file_path,),
|
|
256
|
+
).fetchone()
|
|
257
|
+
if row:
|
|
258
|
+
return {"locked": True, "by": row["locked_by"], "since": row["locked_at"]}
|
|
259
|
+
return {"locked": False}
|
|
260
|
+
|
|
261
|
+
return {"ok": False, "error": f"unknown action: {action}"}
|
|
262
|
+
finally:
|
|
263
|
+
conn.close()
|
|
264
|
+
|
|
265
|
+
# -- Events --
|
|
266
|
+
|
|
267
|
+
def get_events(self, limit: int = 100) -> list[dict]:
|
|
268
|
+
conn = self._conn()
|
|
269
|
+
try:
|
|
270
|
+
rows = conn.execute(
|
|
271
|
+
"SELECT id, event_type, payload, emitted_by, created_at "
|
|
272
|
+
"FROM mesh_events ORDER BY id DESC LIMIT ?",
|
|
273
|
+
(limit,),
|
|
274
|
+
).fetchall()
|
|
275
|
+
return [dict(r) for r in rows]
|
|
276
|
+
finally:
|
|
277
|
+
conn.close()
|
|
278
|
+
|
|
279
|
+
def _log_event(self, conn: sqlite3.Connection, event_type: str,
|
|
280
|
+
emitted_by: str, payload: dict | None = None) -> None:
|
|
281
|
+
import json as _json
|
|
282
|
+
now = datetime.now(timezone.utc).isoformat()
|
|
283
|
+
conn.execute(
|
|
284
|
+
"INSERT INTO mesh_events (event_type, payload, emitted_by, created_at) VALUES (?, ?, ?, ?)",
|
|
285
|
+
(event_type, _json.dumps(payload or {}), emitted_by, now),
|
|
286
|
+
)
|
|
287
|
+
|
|
288
|
+
# -- Status --
|
|
289
|
+
|
|
290
|
+
def get_status(self) -> dict:
|
|
291
|
+
conn = self._conn()
|
|
292
|
+
try:
|
|
293
|
+
peer_count = conn.execute("SELECT COUNT(*) FROM mesh_peers WHERE status='active'").fetchone()[0]
|
|
294
|
+
return {
|
|
295
|
+
"broker_up": True,
|
|
296
|
+
"peer_count": peer_count,
|
|
297
|
+
"uptime_s": round(time.monotonic() - self._started_at),
|
|
298
|
+
}
|
|
299
|
+
finally:
|
|
300
|
+
conn.close()
|
|
301
|
+
|
|
302
|
+
# -- Cleanup --
|
|
303
|
+
|
|
304
|
+
def _cleanup_loop(self) -> None:
|
|
305
|
+
"""Background cleanup: mark stale peers, delete old messages."""
|
|
306
|
+
while not self._stop_event.is_set():
|
|
307
|
+
self._stop_event.wait(300) # Every 5 min
|
|
308
|
+
if self._stop_event.is_set():
|
|
309
|
+
break
|
|
310
|
+
try:
|
|
311
|
+
self._run_cleanup()
|
|
312
|
+
except Exception as exc:
|
|
313
|
+
logger.debug("Mesh cleanup error: %s", exc)
|
|
314
|
+
|
|
315
|
+
def _run_cleanup(self) -> None:
|
|
316
|
+
conn = self._conn()
|
|
317
|
+
try:
|
|
318
|
+
now = datetime.now(timezone.utc)
|
|
319
|
+
# Mark stale peers (no heartbeat for 5 min)
|
|
320
|
+
conn.execute(
|
|
321
|
+
"UPDATE mesh_peers SET status='stale' "
|
|
322
|
+
"WHERE status='active' AND datetime(last_heartbeat) < datetime(?, '-5 minutes')",
|
|
323
|
+
(now.isoformat(),),
|
|
324
|
+
)
|
|
325
|
+
# Delete dead peers (stale > 30 min)
|
|
326
|
+
conn.execute(
|
|
327
|
+
"UPDATE mesh_peers SET status='dead' "
|
|
328
|
+
"WHERE status='stale' AND datetime(last_heartbeat) < datetime(?, '-30 minutes')",
|
|
329
|
+
(now.isoformat(),),
|
|
330
|
+
)
|
|
331
|
+
conn.execute("DELETE FROM mesh_peers WHERE status='dead'")
|
|
332
|
+
# Delete read messages > 24hr old
|
|
333
|
+
conn.execute(
|
|
334
|
+
"DELETE FROM mesh_messages WHERE read=1 AND datetime(created_at) < datetime(?, '-24 hours')",
|
|
335
|
+
(now.isoformat(),),
|
|
336
|
+
)
|
|
337
|
+
# Delete expired locks
|
|
338
|
+
conn.execute(
|
|
339
|
+
"DELETE FROM mesh_locks WHERE datetime(expires_at) < datetime(?)",
|
|
340
|
+
(now.isoformat(),),
|
|
341
|
+
)
|
|
342
|
+
conn.commit()
|
|
343
|
+
finally:
|
|
344
|
+
conn.close()
|
|
@@ -91,6 +91,7 @@ class EntityGraphChannel:
|
|
|
91
91
|
entity_resolver: EntityResolver | None = None,
|
|
92
92
|
decay: float = 0.7, activation_threshold: float = 0.05,
|
|
93
93
|
max_hops: int = 4,
|
|
94
|
+
graph_metrics: dict[str, dict] | None = None,
|
|
94
95
|
) -> None:
|
|
95
96
|
self._db = db
|
|
96
97
|
self._resolver = entity_resolver
|
|
@@ -101,6 +102,9 @@ class EntityGraphChannel:
|
|
|
101
102
|
self._adj: dict[str, list[tuple[str, float]]] = {}
|
|
102
103
|
self._adj_profile: str = "" # Track which profile is loaded
|
|
103
104
|
self._adj_edge_count: int = 0 # Track edge count for staleness detection
|
|
105
|
+
# v3.4.1: Graph intelligence metrics (loaded from fact_importance)
|
|
106
|
+
self._graph_metrics: dict[str, dict] = graph_metrics or {}
|
|
107
|
+
self._graph_metrics_profile: str = ""
|
|
104
108
|
|
|
105
109
|
def _ensure_adjacency(self, profile_id: str) -> None:
|
|
106
110
|
"""Load graph adjacency into memory for fast spreading activation.
|
|
@@ -133,6 +137,8 @@ class EntityGraphChannel:
|
|
|
133
137
|
self._adj_edge_count = current_count
|
|
134
138
|
# Also load entity maps (same staleness lifecycle)
|
|
135
139
|
self._load_entity_maps(profile_id)
|
|
140
|
+
# v3.4.1: Load graph intelligence metrics (P0)
|
|
141
|
+
self._load_graph_metrics(profile_id)
|
|
136
142
|
|
|
137
143
|
logger.info(
|
|
138
144
|
"Loaded adjacency cache: %d nodes, %d edges, %d entity mappings for profile %s",
|
|
@@ -192,6 +198,37 @@ class EntityGraphChannel:
|
|
|
192
198
|
len(self._entity_to_facts), len(self._fact_to_entities),
|
|
193
199
|
)
|
|
194
200
|
|
|
201
|
+
def _load_graph_metrics(self, profile_id: str) -> None:
|
|
202
|
+
"""Load PageRank, community_id, degree_centrality from fact_importance.
|
|
203
|
+
|
|
204
|
+
v3.4.1: Enables graph-enhanced retrieval (P0).
|
|
205
|
+
Called alongside adjacency loading. Same staleness lifecycle.
|
|
206
|
+
"""
|
|
207
|
+
if self._graph_metrics_profile == profile_id and self._graph_metrics:
|
|
208
|
+
return
|
|
209
|
+
self._graph_metrics = {}
|
|
210
|
+
self._graph_metrics_profile = profile_id
|
|
211
|
+
try:
|
|
212
|
+
rows = self._db.execute(
|
|
213
|
+
"SELECT fact_id, pagerank_score, community_id, degree_centrality "
|
|
214
|
+
"FROM fact_importance WHERE profile_id = ?",
|
|
215
|
+
(profile_id,),
|
|
216
|
+
)
|
|
217
|
+
for r in rows:
|
|
218
|
+
d = dict(r)
|
|
219
|
+
self._graph_metrics[d["fact_id"]] = {
|
|
220
|
+
"pagerank_score": float(d.get("pagerank_score", 0) or 0),
|
|
221
|
+
"community_id": d.get("community_id"),
|
|
222
|
+
"degree_centrality": float(d.get("degree_centrality", 0) or 0),
|
|
223
|
+
}
|
|
224
|
+
logger.info(
|
|
225
|
+
"Loaded graph metrics: %d facts for profile %s",
|
|
226
|
+
len(self._graph_metrics), profile_id,
|
|
227
|
+
)
|
|
228
|
+
except Exception as exc:
|
|
229
|
+
logger.debug("Graph metrics load failed (graceful degradation): %s", exc)
|
|
230
|
+
self._graph_metrics = {}
|
|
231
|
+
|
|
195
232
|
def invalidate_cache(self) -> None:
|
|
196
233
|
"""Clear all caches. Call after adding/removing edges or facts."""
|
|
197
234
|
self._adj.clear()
|
|
@@ -199,6 +236,8 @@ class EntityGraphChannel:
|
|
|
199
236
|
self._adj_edge_count = 0
|
|
200
237
|
self._entity_to_facts = defaultdict(list)
|
|
201
238
|
self._fact_to_entities = defaultdict(list)
|
|
239
|
+
self._graph_metrics.clear()
|
|
240
|
+
self._graph_metrics_profile = ""
|
|
202
241
|
|
|
203
242
|
def search(self, query: str, profile_id: str, top_k: int = 50) -> list[tuple[str, float]]:
|
|
204
243
|
"""Search via entity graph with spreading activation.
|
|
@@ -242,12 +281,26 @@ class EntityGraphChannel:
|
|
|
242
281
|
for fid in frontier:
|
|
243
282
|
if use_cache:
|
|
244
283
|
neighbors = self._adj.get(fid, ())
|
|
245
|
-
for neighbor,
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
284
|
+
for neighbor, edge_weight in neighbors:
|
|
285
|
+
# v3.4.2: Only apply edge_weight and PageRank bias when
|
|
286
|
+
# graph metrics are available. Without metrics, edge_weight
|
|
287
|
+
# dampens propagation by ~14% with no compensating boost,
|
|
288
|
+
# causing retrieval regression (68.4% vs 70.4% on LoCoMo).
|
|
289
|
+
if self._graph_metrics:
|
|
290
|
+
weighted = activation[fid] * self._decay * edge_weight
|
|
291
|
+
if neighbor in self._graph_metrics:
|
|
292
|
+
target_pr = self._graph_metrics[neighbor].get("pagerank_score", 0.0)
|
|
293
|
+
pr_boost = min(1.0 + target_pr * 2.0, 2.0)
|
|
294
|
+
weighted *= pr_boost
|
|
295
|
+
else:
|
|
296
|
+
weighted = activation[fid] * self._decay
|
|
297
|
+
if weighted >= self._threshold and weighted > activation.get(neighbor, 0.0):
|
|
298
|
+
activation[neighbor] = weighted
|
|
249
299
|
next_frontier.add(neighbor)
|
|
250
300
|
else:
|
|
301
|
+
# NOTE: SQL fallback path does NOT use graph intelligence (P1/P2/P3).
|
|
302
|
+
# Graph intelligence is only available on the in-memory cache path.
|
|
303
|
+
# This fallback exists for mock/test DBs. See Phase 7 LLD H-01.
|
|
251
304
|
for edge in self._db.get_edges_for_node(fid, profile_id):
|
|
252
305
|
neighbor = edge.target_id if edge.source_id == fid else edge.source_id
|
|
253
306
|
propagated = activation[fid] * self._decay
|
|
@@ -282,10 +335,94 @@ class EntityGraphChannel:
|
|
|
282
335
|
if not frontier:
|
|
283
336
|
break
|
|
284
337
|
|
|
338
|
+
# v3.4.1 P2: Community-aware boosting
|
|
339
|
+
if self._graph_metrics and use_cache:
|
|
340
|
+
from collections import Counter as _Counter
|
|
341
|
+
seed_communities: _Counter = _Counter()
|
|
342
|
+
for eid in canonical_ids:
|
|
343
|
+
for fid in self._entity_to_facts.get(eid, ()):
|
|
344
|
+
m = self._graph_metrics.get(fid, {})
|
|
345
|
+
comm = m.get("community_id")
|
|
346
|
+
if comm is not None:
|
|
347
|
+
seed_communities[comm] += 1
|
|
348
|
+
if seed_communities:
|
|
349
|
+
total_seeds = sum(seed_communities.values())
|
|
350
|
+
for fid in list(activation.keys()):
|
|
351
|
+
m = self._graph_metrics.get(fid, {})
|
|
352
|
+
fact_comm = m.get("community_id")
|
|
353
|
+
if fact_comm is not None and fact_comm in seed_communities:
|
|
354
|
+
boost = min(1.0 + 0.15 * (seed_communities[fact_comm] / total_seeds), 1.3)
|
|
355
|
+
activation[fid] *= boost
|
|
356
|
+
elif fact_comm is not None and fact_comm not in seed_communities:
|
|
357
|
+
activation[fid] *= 0.9 # Mild penalty for unrelated communities
|
|
358
|
+
|
|
359
|
+
# v3.4.1 P3: Contradiction suppression via graph_edges
|
|
360
|
+
if use_cache and activation:
|
|
361
|
+
self._suppress_contradictions(activation, profile_id)
|
|
362
|
+
|
|
363
|
+
# v3.4.1: Score normalization to [0, 1]
|
|
285
364
|
results = [(fid, sc) for fid, sc in activation.items() if sc >= self._threshold]
|
|
365
|
+
if not results:
|
|
366
|
+
return []
|
|
367
|
+
max_score = max(sc for _, sc in results)
|
|
368
|
+
if max_score > 0:
|
|
369
|
+
results = [(fid, sc / max_score) for fid, sc in results]
|
|
286
370
|
results.sort(key=lambda x: x[1], reverse=True)
|
|
287
371
|
return results[:top_k]
|
|
288
372
|
|
|
373
|
+
def _suppress_contradictions(
|
|
374
|
+
self, activation: dict[str, float], profile_id: str,
|
|
375
|
+
) -> None:
|
|
376
|
+
"""P3: Penalize older fact in contradiction pairs, heavy-penalize superseded.
|
|
377
|
+
|
|
378
|
+
Uses graph_edges (edge_type CHECK includes 'contradiction', 'supersedes').
|
|
379
|
+
"""
|
|
380
|
+
candidate_ids = list(activation.keys())
|
|
381
|
+
if not candidate_ids:
|
|
382
|
+
return
|
|
383
|
+
try:
|
|
384
|
+
placeholders = ",".join("?" * len(candidate_ids))
|
|
385
|
+
sql = (
|
|
386
|
+
"SELECT source_id, target_id, edge_type FROM graph_edges "
|
|
387
|
+
"WHERE profile_id = ? AND edge_type IN ('contradiction', 'supersedes') "
|
|
388
|
+
"AND (source_id IN (" + placeholders + ") "
|
|
389
|
+
"OR target_id IN (" + placeholders + "))"
|
|
390
|
+
)
|
|
391
|
+
rows = self._db.execute(sql, (profile_id, *candidate_ids, *candidate_ids))
|
|
392
|
+
edges = [dict(r) for r in rows]
|
|
393
|
+
if not edges:
|
|
394
|
+
return
|
|
395
|
+
|
|
396
|
+
# Batch load created_at for involved facts
|
|
397
|
+
involved = set()
|
|
398
|
+
for e in edges:
|
|
399
|
+
involved.add(e["source_id"])
|
|
400
|
+
involved.add(e["target_id"])
|
|
401
|
+
involved = involved & set(candidate_ids)
|
|
402
|
+
if not involved:
|
|
403
|
+
return
|
|
404
|
+
ph2 = ",".join("?" * len(involved))
|
|
405
|
+
ts_rows = self._db.execute(
|
|
406
|
+
"SELECT fact_id, created_at FROM atomic_facts "
|
|
407
|
+
"WHERE fact_id IN (" + ph2 + ") AND profile_id = ?",
|
|
408
|
+
(*involved, profile_id),
|
|
409
|
+
)
|
|
410
|
+
ts_map = {dict(r)["fact_id"]: dict(r).get("created_at", "") for r in ts_rows}
|
|
411
|
+
|
|
412
|
+
for e in edges:
|
|
413
|
+
src, tgt, etype = e["source_id"], e["target_id"], e["edge_type"]
|
|
414
|
+
if etype == "supersedes" and src in activation:
|
|
415
|
+
activation[src] *= 0.3 # Heavy penalty: this fact was replaced
|
|
416
|
+
elif etype == "contradiction":
|
|
417
|
+
src_ts = ts_map.get(src, "")
|
|
418
|
+
tgt_ts = ts_map.get(tgt, "")
|
|
419
|
+
if src_ts and tgt_ts:
|
|
420
|
+
older = src if src_ts < tgt_ts else tgt
|
|
421
|
+
if older in activation:
|
|
422
|
+
activation[older] *= 0.5
|
|
423
|
+
except Exception as exc:
|
|
424
|
+
logger.debug("Contradiction suppression failed: %s", exc)
|
|
425
|
+
|
|
289
426
|
def _resolve_entities(self, raw: list[str], profile_id: str) -> list[str]:
|
|
290
427
|
"""Resolve raw names to canonical entity IDs."""
|
|
291
428
|
ids: list[str] = []
|
|
@@ -53,6 +53,9 @@ class SpreadingActivationConfig:
|
|
|
53
53
|
max_iterations: int = 3 # T: propagation depth
|
|
54
54
|
tau_gate: float = 0.05 # FOK confidence gate (was 0.12)
|
|
55
55
|
enabled: bool = True # Ships enabled by default
|
|
56
|
+
# v3.4.1: Graph intelligence integration
|
|
57
|
+
use_pagerank_bias: bool = False # Multiply propagation by target PageRank
|
|
58
|
+
community_boost: float = 0.0 # Boost same-community nodes (0.0 = disabled)
|
|
56
59
|
|
|
57
60
|
|
|
58
61
|
# ---------------------------------------------------------------------------
|
|
@@ -82,6 +85,11 @@ class SpreadingActivation:
|
|
|
82
85
|
self._db = db
|
|
83
86
|
self._vector_store = vector_store
|
|
84
87
|
self._config = config or SpreadingActivationConfig()
|
|
88
|
+
# v3.4.1: Graph intelligence caches (loaded lazily per profile)
|
|
89
|
+
self._pr_cache: dict[str, float] = {}
|
|
90
|
+
self._pr_profile: str = ""
|
|
91
|
+
self._comm_cache: dict[str, int | None] = {}
|
|
92
|
+
self._comm_profile: str = ""
|
|
85
93
|
|
|
86
94
|
def search(
|
|
87
95
|
self,
|
|
@@ -311,3 +319,40 @@ class SpreadingActivation:
|
|
|
311
319
|
return len(result) if result else 0
|
|
312
320
|
except Exception:
|
|
313
321
|
return 0
|
|
322
|
+
|
|
323
|
+
# ── v3.4.1: Graph Intelligence Helpers ────────────────────────
|
|
324
|
+
|
|
325
|
+
def _load_graph_metrics_cache(self, profile_id: str) -> None:
|
|
326
|
+
"""Load PageRank + community data in a single SQL query.
|
|
327
|
+
|
|
328
|
+
Called lazily on first _get_pagerank() or _get_community() call.
|
|
329
|
+
Populates both _pr_cache and _comm_cache.
|
|
330
|
+
"""
|
|
331
|
+
if self._pr_profile == profile_id and self._pr_cache:
|
|
332
|
+
return # Already loaded for this profile
|
|
333
|
+
self._pr_cache = {}
|
|
334
|
+
self._pr_profile = profile_id
|
|
335
|
+
self._comm_cache = {}
|
|
336
|
+
self._comm_profile = profile_id
|
|
337
|
+
try:
|
|
338
|
+
rows = self._db.execute(
|
|
339
|
+
"SELECT fact_id, pagerank_score, community_id "
|
|
340
|
+
"FROM fact_importance WHERE profile_id = ?",
|
|
341
|
+
(profile_id,),
|
|
342
|
+
)
|
|
343
|
+
for r in rows:
|
|
344
|
+
d = dict(r)
|
|
345
|
+
self._pr_cache[d["fact_id"]] = float(d.get("pagerank_score", 0) or 0)
|
|
346
|
+
self._comm_cache[d["fact_id"]] = d.get("community_id")
|
|
347
|
+
except Exception:
|
|
348
|
+
pass
|
|
349
|
+
|
|
350
|
+
def _get_pagerank(self, fact_id: str, profile_id: str) -> float:
|
|
351
|
+
"""Look up PageRank score from fact_importance. Cached per profile."""
|
|
352
|
+
self._load_graph_metrics_cache(profile_id)
|
|
353
|
+
return self._pr_cache.get(fact_id, 0.0)
|
|
354
|
+
|
|
355
|
+
def _get_community(self, fact_id: str, profile_id: str) -> int | None:
|
|
356
|
+
"""Look up community_id from fact_importance. Shares unified cache."""
|
|
357
|
+
self._load_graph_metrics_cache(profile_id)
|
|
358
|
+
return self._comm_cache.get(fact_id)
|
|
@@ -176,8 +176,16 @@ def create_app() -> FastAPI:
|
|
|
176
176
|
application.include_router(ws_router)
|
|
177
177
|
application.include_router(v3_router)
|
|
178
178
|
|
|
179
|
+
# v3.4.1: Chat SSE endpoint
|
|
180
|
+
for _module_name_v341 in ("chat",):
|
|
181
|
+
try:
|
|
182
|
+
_mod_v341 = __import__(f"superlocalmemory.server.routes.{_module_name_v341}", fromlist=["router"])
|
|
183
|
+
application.include_router(_mod_v341.router)
|
|
184
|
+
except (ImportError, Exception):
|
|
185
|
+
pass
|
|
186
|
+
|
|
179
187
|
# Graceful optional routers
|
|
180
|
-
for _module_name in ("learning", "lifecycle", "behavioral", "compliance"):
|
|
188
|
+
for _module_name in ("learning", "lifecycle", "behavioral", "compliance", "insights", "timeline"):
|
|
181
189
|
try:
|
|
182
190
|
_mod = __import__(f"superlocalmemory.server.routes.{_module_name}", fromlist=["router"])
|
|
183
191
|
application.include_router(_mod.router)
|
|
@@ -228,16 +236,15 @@ def create_app() -> FastAPI:
|
|
|
228
236
|
return application
|
|
229
237
|
|
|
230
238
|
|
|
231
|
-
app
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
#
|
|
235
|
-
# Server Startup
|
|
236
|
-
# ============================================================================
|
|
239
|
+
# v3.4.3: Module-level app removed to prevent duplicate MemoryEngine.
|
|
240
|
+
# The unified daemon (server/unified_daemon.py) imports routes from this
|
|
241
|
+
# module via _register_dashboard_routes(). Do NOT create app here.
|
|
242
|
+
# For standalone use: python -m superlocalmemory.server.api
|
|
237
243
|
|
|
238
244
|
if __name__ == "__main__":
|
|
245
|
+
app = create_app()
|
|
239
246
|
print("=" * 60)
|
|
240
|
-
print("SuperLocalMemory V3 - API Server")
|
|
247
|
+
print("SuperLocalMemory V3 - API Server (standalone mode)")
|
|
241
248
|
print("=" * 60)
|
|
242
249
|
print(f"Database: {DB_PATH}")
|
|
243
250
|
print(f"UI Directory: {UI_DIR}")
|
|
@@ -25,8 +25,8 @@ try:
|
|
|
25
25
|
from superlocalmemory.learning.behavioral import BehavioralPatternStore
|
|
26
26
|
from superlocalmemory.learning.outcomes import OutcomeTracker
|
|
27
27
|
BEHAVIORAL_AVAILABLE = True
|
|
28
|
-
except ImportError:
|
|
29
|
-
logger.
|
|
28
|
+
except ImportError as e:
|
|
29
|
+
logger.warning("V3 behavioral engine import failed: %s", e)
|
|
30
30
|
|
|
31
31
|
|
|
32
32
|
@router.get("/api/behavioral/status")
|
|
@@ -66,9 +66,13 @@ async def behavioral_status():
|
|
|
66
66
|
try:
|
|
67
67
|
store = BehavioralPatternStore(db_path)
|
|
68
68
|
patterns = store.get_patterns(profile_id=profile)
|
|
69
|
-
|
|
69
|
+
# Count patterns spanning multiple projects
|
|
70
|
+
cross_project_transfers = len([
|
|
71
|
+
p for p in patterns
|
|
72
|
+
if isinstance(p, dict) and p.get("project_count", 1) > 1
|
|
73
|
+
])
|
|
70
74
|
except Exception as exc:
|
|
71
|
-
logger.
|
|
75
|
+
logger.warning("pattern store error: %s", exc)
|
|
72
76
|
|
|
73
77
|
return {
|
|
74
78
|
"available": True,
|