superlocalmemory 3.4.1 → 3.4.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/README.md +9 -12
  2. package/package.json +1 -1
  3. package/pyproject.toml +11 -2
  4. package/scripts/postinstall.js +26 -7
  5. package/src/superlocalmemory/cli/commands.py +71 -60
  6. package/src/superlocalmemory/cli/daemon.py +184 -64
  7. package/src/superlocalmemory/cli/main.py +25 -2
  8. package/src/superlocalmemory/cli/service_installer.py +367 -0
  9. package/src/superlocalmemory/cli/setup_wizard.py +150 -9
  10. package/src/superlocalmemory/core/config.py +28 -0
  11. package/src/superlocalmemory/core/consolidation_engine.py +38 -1
  12. package/src/superlocalmemory/core/engine.py +9 -0
  13. package/src/superlocalmemory/core/health_monitor.py +313 -0
  14. package/src/superlocalmemory/core/reranker_worker.py +19 -5
  15. package/src/superlocalmemory/ingestion/__init__.py +13 -0
  16. package/src/superlocalmemory/ingestion/adapter_manager.py +234 -0
  17. package/src/superlocalmemory/ingestion/base_adapter.py +177 -0
  18. package/src/superlocalmemory/ingestion/calendar_adapter.py +340 -0
  19. package/src/superlocalmemory/ingestion/credentials.py +118 -0
  20. package/src/superlocalmemory/ingestion/gmail_adapter.py +369 -0
  21. package/src/superlocalmemory/ingestion/parsers.py +100 -0
  22. package/src/superlocalmemory/ingestion/transcript_adapter.py +156 -0
  23. package/src/superlocalmemory/learning/consolidation_worker.py +47 -1
  24. package/src/superlocalmemory/learning/entity_compiler.py +377 -0
  25. package/src/superlocalmemory/mcp/server.py +32 -3
  26. package/src/superlocalmemory/mcp/tools_mesh.py +249 -0
  27. package/src/superlocalmemory/mesh/__init__.py +12 -0
  28. package/src/superlocalmemory/mesh/broker.py +344 -0
  29. package/src/superlocalmemory/retrieval/entity_channel.py +12 -6
  30. package/src/superlocalmemory/server/api.py +6 -7
  31. package/src/superlocalmemory/server/routes/adapters.py +63 -0
  32. package/src/superlocalmemory/server/routes/entity.py +151 -0
  33. package/src/superlocalmemory/server/routes/ingest.py +110 -0
  34. package/src/superlocalmemory/server/routes/mesh.py +186 -0
  35. package/src/superlocalmemory/server/unified_daemon.py +693 -0
  36. package/src/superlocalmemory/storage/schema_v343.py +229 -0
  37. package/src/superlocalmemory/ui/css/neural-glass.css +1588 -0
  38. package/src/superlocalmemory/ui/index.html +134 -4
  39. package/src/superlocalmemory/ui/js/memory-chat.js +28 -1
  40. package/src/superlocalmemory/ui/js/ng-entities.js +272 -0
  41. package/src/superlocalmemory/ui/js/ng-health.js +208 -0
  42. package/src/superlocalmemory/ui/js/ng-ingestion.js +203 -0
  43. package/src/superlocalmemory/ui/js/ng-mesh.js +311 -0
  44. package/src/superlocalmemory/ui/js/ng-shell.js +471 -0
  45. package/src/superlocalmemory.egg-info/PKG-INFO +18 -14
  46. package/src/superlocalmemory.egg-info/SOURCES.txt +26 -0
  47. package/src/superlocalmemory.egg-info/requires.txt +9 -1
@@ -0,0 +1,249 @@
1
+ # Copyright (c) 2026 Varun Pratap Bhardwaj / Qualixar
2
+ # Licensed under the Elastic License 2.0 - see LICENSE file
3
+ # Part of SuperLocalMemory V3 | https://qualixar.com | https://varunpratap.com
4
+
5
+ """SLM Mesh MCP Tools — P2P agent communication via the unified daemon.
6
+
7
+ v3.4.4: These tools ship WITH SuperLocalMemory, no separate slm-mesh install needed.
8
+ End users get full mesh functionality from `pip install superlocalmemory`.
9
+
10
+ All tools communicate with the daemon's Python mesh broker on port 8765.
11
+ Auto-heartbeat keeps the session alive as long as the MCP server is running.
12
+
13
+ 8 tools: mesh_summary, mesh_peers, mesh_send, mesh_inbox,
14
+ mesh_state, mesh_lock, mesh_events, mesh_status
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import json
20
+ import logging
21
+ import os
22
+ import threading
23
+ import time
24
+ import uuid
25
+ from typing import Callable
26
+
27
+ logger = logging.getLogger(__name__)
28
+
29
+ # Unique peer ID for this MCP server session
30
+ _PEER_ID = str(uuid.uuid4())[:12]
31
+ _SESSION_SUMMARY = ""
32
+ _HEARTBEAT_INTERVAL = 25 # seconds (broker marks stale at 30s, dead at 60s)
33
+ _HEARTBEAT_THREAD: threading.Thread | None = None
34
+ _REGISTERED = False
35
+
36
+
37
+ def _daemon_url() -> str:
38
+ """Get the daemon base URL."""
39
+ port = 8765
40
+ try:
41
+ port_file = os.path.join(os.path.expanduser("~"), ".superlocalmemory", "daemon.port")
42
+ if os.path.exists(port_file):
43
+ port = int(open(port_file).read().strip())
44
+ except Exception:
45
+ pass
46
+ return f"http://127.0.0.1:{port}"
47
+
48
+
49
+ def _mesh_request(method: str, path: str, body: dict | None = None) -> dict | None:
50
+ """Send request to daemon mesh broker."""
51
+ import urllib.request
52
+ url = f"{_daemon_url()}/mesh{path}"
53
+ try:
54
+ data = json.dumps(body).encode() if body else None
55
+ headers = {"Content-Type": "application/json"} if data else {}
56
+ req = urllib.request.Request(url, data=data, headers=headers, method=method)
57
+ resp = urllib.request.urlopen(req, timeout=10)
58
+ return json.loads(resp.read().decode())
59
+ except Exception as exc:
60
+ logger.debug("Mesh request failed: %s %s — %s", method, path, exc)
61
+ return None
62
+
63
+
64
+ def _ensure_registered() -> None:
65
+ """Register this session with the mesh broker if not already."""
66
+ global _REGISTERED
67
+ if _REGISTERED:
68
+ return
69
+
70
+ result = _mesh_request("POST", "/register", {
71
+ "peer_id": _PEER_ID,
72
+ "session_id": os.environ.get("CLAUDE_SESSION_ID", _PEER_ID),
73
+ "summary": _SESSION_SUMMARY or "SLM MCP session",
74
+ })
75
+ if result:
76
+ _REGISTERED = True
77
+ _start_heartbeat()
78
+
79
+
80
+ def _start_heartbeat() -> None:
81
+ """Background thread that sends heartbeat to keep session alive."""
82
+ global _HEARTBEAT_THREAD
83
+ if _HEARTBEAT_THREAD is not None:
84
+ return
85
+
86
+ def heartbeat_loop():
87
+ while True:
88
+ time.sleep(_HEARTBEAT_INTERVAL)
89
+ try:
90
+ _mesh_request("POST", "/heartbeat", {"peer_id": _PEER_ID})
91
+ except Exception:
92
+ pass
93
+
94
+ _HEARTBEAT_THREAD = threading.Thread(target=heartbeat_loop, daemon=True, name="mesh-heartbeat")
95
+ _HEARTBEAT_THREAD.start()
96
+ logger.info("Mesh heartbeat started (peer_id=%s, interval=%ds)", _PEER_ID, _HEARTBEAT_INTERVAL)
97
+
98
+
99
+ def register_mesh_tools(server, get_engine: Callable) -> None:
100
+ """Register all 8 mesh MCP tools."""
101
+
102
+ @server.tool()
103
+ async def mesh_summary(summary: str = "") -> dict:
104
+ """Register this session and describe what you're working on.
105
+
106
+ Call this at the start of every session. Other agents can see your summary
107
+ and send you messages. The session stays alive via automatic heartbeat.
108
+
109
+ Args:
110
+ summary: What this session is working on (e.g. "Fixing auth bug in api.py")
111
+ """
112
+ global _SESSION_SUMMARY
113
+ _SESSION_SUMMARY = summary or "Active session"
114
+
115
+ _ensure_registered()
116
+
117
+ # Update summary
118
+ result = _mesh_request("POST", "/summary", {
119
+ "peer_id": _PEER_ID,
120
+ "summary": _SESSION_SUMMARY,
121
+ })
122
+
123
+ return {
124
+ "peer_id": _PEER_ID,
125
+ "summary": _SESSION_SUMMARY,
126
+ "registered": True,
127
+ "heartbeat_active": _HEARTBEAT_THREAD is not None,
128
+ "broker_response": result,
129
+ }
130
+
131
+ @server.tool()
132
+ async def mesh_peers() -> dict:
133
+ """List all active peer sessions on this machine.
134
+
135
+ Shows other Claude Code, Cursor, or AI agent sessions that are
136
+ connected to the same SLM mesh network.
137
+ """
138
+ _ensure_registered()
139
+ result = _mesh_request("GET", "/peers")
140
+ peers = (result or {}).get("peers", [])
141
+ return {
142
+ "peers": peers,
143
+ "count": len(peers),
144
+ "my_peer_id": _PEER_ID,
145
+ }
146
+
147
+ @server.tool()
148
+ async def mesh_send(to: str, message: str) -> dict:
149
+ """Send a message to another peer session.
150
+
151
+ Args:
152
+ to: The peer_id of the recipient (from mesh_peers)
153
+ message: The message content to send
154
+ """
155
+ _ensure_registered()
156
+ result = _mesh_request("POST", "/send", {
157
+ "from_peer": _PEER_ID,
158
+ "to_peer": to,
159
+ "content": message,
160
+ })
161
+ return result or {"error": "Failed to send message"}
162
+
163
+ @server.tool()
164
+ async def mesh_inbox() -> dict:
165
+ """Read messages sent to this session.
166
+
167
+ Returns unread messages from other peer sessions.
168
+ Messages are marked as read after retrieval.
169
+ """
170
+ _ensure_registered()
171
+ messages = _mesh_request("GET", f"/inbox/{_PEER_ID}")
172
+ if messages:
173
+ # Mark as read
174
+ _mesh_request("POST", f"/inbox/{_PEER_ID}/read")
175
+ return messages or {"messages": [], "count": 0}
176
+
177
+ @server.tool()
178
+ async def mesh_state(key: str = "", value: str = "", action: str = "get") -> dict:
179
+ """Get or set shared state across all sessions.
180
+
181
+ Shared state is visible to all peers. Use for coordinating work:
182
+ server IPs, API keys, feature flags, task assignments.
183
+
184
+ Args:
185
+ key: State key name
186
+ value: Value to set (only for action="set")
187
+ action: "get" (read all or one key), "set" (write a key)
188
+ """
189
+ _ensure_registered()
190
+
191
+ if action == "set" and key:
192
+ result = _mesh_request("POST", "/state", {
193
+ "key": key,
194
+ "value": value,
195
+ "set_by": _PEER_ID,
196
+ })
197
+ return result or {"error": "Failed to set state"}
198
+
199
+ if key:
200
+ result = _mesh_request("GET", f"/state/{key}")
201
+ return result or {"key": key, "value": None}
202
+
203
+ result = _mesh_request("GET", "/state")
204
+ return result or {"state": {}}
205
+
206
+ @server.tool()
207
+ async def mesh_lock(
208
+ file_path: str,
209
+ action: str = "query",
210
+ ) -> dict:
211
+ """Manage file locks across sessions.
212
+
213
+ Before editing a shared file, check if another session has it locked.
214
+
215
+ Args:
216
+ file_path: Path to the file
217
+ action: "query" (check lock), "acquire" (lock file), "release" (unlock)
218
+ """
219
+ _ensure_registered()
220
+ result = _mesh_request("POST", "/lock", {
221
+ "file_path": file_path,
222
+ "action": action,
223
+ "locked_by": _PEER_ID,
224
+ })
225
+ return result or {"error": "Lock operation failed"}
226
+
227
+ @server.tool()
228
+ async def mesh_events() -> dict:
229
+ """Get recent mesh events (peer joins, leaves, messages, state changes).
230
+
231
+ Shows the activity log of the mesh network.
232
+ """
233
+ result = _mesh_request("GET", "/events")
234
+ return result or {"events": []}
235
+
236
+ @server.tool()
237
+ async def mesh_status() -> dict:
238
+ """Get mesh broker health and statistics.
239
+
240
+ Shows broker uptime, peer count, and connection status.
241
+ """
242
+ result = _mesh_request("GET", "/status")
243
+ if result:
244
+ result["my_peer_id"] = _PEER_ID
245
+ result["heartbeat_active"] = _HEARTBEAT_THREAD is not None
246
+ return result or {
247
+ "broker_up": False,
248
+ "error": "Cannot reach mesh broker. Is the daemon running? (slm serve start)",
249
+ }
@@ -0,0 +1,12 @@
1
+ # Copyright (c) 2026 Varun Pratap Bhardwaj / Qualixar
2
+ # Licensed under the Elastic License 2.0 - see LICENSE file
3
+ # Part of SuperLocalMemory V3 | https://qualixar.com | https://varunpratap.com
4
+
5
+ """SLM Mesh — Python port of the P2P agent communication broker.
6
+
7
+ Provides peer registry, message relay, shared state, file locks, and event logging
8
+ for multi-agent coordination. Runs as FastAPI sub-routes inside the unified daemon.
9
+
10
+ Independent broker — same wire protocol as standalone slm-mesh npm package,
11
+ but separate SQLite tables with mesh_ prefix.
12
+ """
@@ -0,0 +1,344 @@
1
+ # Copyright (c) 2026 Varun Pratap Bhardwaj / Qualixar
2
+ # Licensed under the Elastic License 2.0 - see LICENSE file
3
+ # Part of SuperLocalMemory V3 | https://qualixar.com | https://varunpratap.com
4
+
5
+ """SLM Mesh Broker — core orchestration for P2P agent communication.
6
+
7
+ Manages peer lifecycle, scheduled cleanup, and event logging.
8
+ All operations use the shared memory.db via SQLite tables with mesh_ prefix.
9
+
10
+ Part of Qualixar | Author: Varun Pratap Bhardwaj
11
+ """
12
+
13
+ from __future__ import annotations
14
+
15
+ import logging
16
+ import sqlite3
17
+ import threading
18
+ import time
19
+ import uuid
20
+ from datetime import datetime, timezone
21
+ from pathlib import Path
22
+
23
+ logger = logging.getLogger("superlocalmemory.mesh")
24
+
25
+
26
+ class MeshBroker:
27
+ """Lightweight mesh broker for SLM's unified daemon.
28
+
29
+ Provides peer management, messaging, state, locks, and events.
30
+ All methods are synchronous (called from FastAPI via run_in_executor
31
+ or directly for quick operations).
32
+ """
33
+
34
+ def __init__(self, db_path: str | Path):
35
+ self._db_path = str(db_path)
36
+ self._started_at = time.monotonic()
37
+ self._cleanup_thread: threading.Thread | None = None
38
+ self._stop_event = threading.Event()
39
+
40
+ def start_cleanup(self) -> None:
41
+ """Start background cleanup thread for stale peers/messages."""
42
+ self._cleanup_thread = threading.Thread(
43
+ target=self._cleanup_loop, daemon=True, name="mesh-cleanup",
44
+ )
45
+ self._cleanup_thread.start()
46
+
47
+ def stop(self) -> None:
48
+ self._stop_event.set()
49
+
50
+ # -- Connection helper --
51
+
52
+ def _conn(self) -> sqlite3.Connection:
53
+ conn = sqlite3.connect(self._db_path)
54
+ conn.execute("PRAGMA journal_mode=WAL")
55
+ conn.execute("PRAGMA busy_timeout=5000")
56
+ conn.row_factory = sqlite3.Row
57
+ return conn
58
+
59
+ # -- Peers --
60
+
61
+ def register_peer(self, session_id: str, summary: str = "",
62
+ host: str = "127.0.0.1", port: int = 0) -> dict:
63
+ conn = self._conn()
64
+ try:
65
+ now = datetime.now(timezone.utc).isoformat()
66
+ # Idempotent: update if same session_id exists
67
+ existing = conn.execute(
68
+ "SELECT peer_id FROM mesh_peers WHERE session_id = ?",
69
+ (session_id,),
70
+ ).fetchone()
71
+ if existing:
72
+ peer_id = existing["peer_id"]
73
+ conn.execute(
74
+ "UPDATE mesh_peers SET summary=?, host=?, port=?, last_heartbeat=?, status='active' "
75
+ "WHERE peer_id=?",
76
+ (summary, host, port, now, peer_id),
77
+ )
78
+ else:
79
+ peer_id = str(uuid.uuid4())[:12]
80
+ conn.execute(
81
+ "INSERT INTO mesh_peers (peer_id, session_id, summary, status, host, port, registered_at, last_heartbeat) "
82
+ "VALUES (?, ?, ?, 'active', ?, ?, ?, ?)",
83
+ (peer_id, session_id, summary, host, port, now, now),
84
+ )
85
+ self._log_event(conn, "peer_registered", peer_id, {"session_id": session_id})
86
+ conn.commit()
87
+ return {"peer_id": peer_id, "ok": True}
88
+ finally:
89
+ conn.close()
90
+
91
+ def deregister_peer(self, peer_id: str) -> dict:
92
+ conn = self._conn()
93
+ try:
94
+ row = conn.execute("SELECT 1 FROM mesh_peers WHERE peer_id=?", (peer_id,)).fetchone()
95
+ if not row:
96
+ return {"ok": False, "error": "peer not found"}
97
+ conn.execute("DELETE FROM mesh_peers WHERE peer_id=?", (peer_id,))
98
+ self._log_event(conn, "peer_deregistered", peer_id)
99
+ conn.commit()
100
+ return {"ok": True}
101
+ finally:
102
+ conn.close()
103
+
104
+ def heartbeat(self, peer_id: str) -> dict:
105
+ conn = self._conn()
106
+ try:
107
+ now = datetime.now(timezone.utc).isoformat()
108
+ cursor = conn.execute(
109
+ "UPDATE mesh_peers SET last_heartbeat=?, status='active' WHERE peer_id=?",
110
+ (now, peer_id),
111
+ )
112
+ if cursor.rowcount == 0:
113
+ return {"ok": False, "error": "peer not found"}
114
+ conn.commit()
115
+ return {"ok": True}
116
+ finally:
117
+ conn.close()
118
+
119
+ def update_summary(self, peer_id: str, summary: str) -> dict:
120
+ conn = self._conn()
121
+ try:
122
+ cursor = conn.execute(
123
+ "UPDATE mesh_peers SET summary=? WHERE peer_id=?",
124
+ (summary, peer_id),
125
+ )
126
+ if cursor.rowcount == 0:
127
+ return {"ok": False, "error": "peer not found"}
128
+ conn.commit()
129
+ return {"ok": True}
130
+ finally:
131
+ conn.close()
132
+
133
+ def list_peers(self) -> list[dict]:
134
+ conn = self._conn()
135
+ try:
136
+ rows = conn.execute(
137
+ "SELECT peer_id, session_id, summary, status, host, port, registered_at, last_heartbeat "
138
+ "FROM mesh_peers ORDER BY last_heartbeat DESC",
139
+ ).fetchall()
140
+ return [dict(r) for r in rows]
141
+ finally:
142
+ conn.close()
143
+
144
+ # -- Messages --
145
+
146
+ def send_message(self, from_peer: str, to_peer: str, content: str,
147
+ msg_type: str = "text") -> dict:
148
+ conn = self._conn()
149
+ try:
150
+ # Verify recipient exists
151
+ if not conn.execute("SELECT 1 FROM mesh_peers WHERE peer_id=?", (to_peer,)).fetchone():
152
+ return {"ok": False, "error": "recipient peer not found"}
153
+ now = datetime.now(timezone.utc).isoformat()
154
+ cursor = conn.execute(
155
+ "INSERT INTO mesh_messages (from_peer, to_peer, msg_type, content, read, created_at) "
156
+ "VALUES (?, ?, ?, ?, 0, ?)",
157
+ (from_peer, to_peer, msg_type, content, now),
158
+ )
159
+ self._log_event(conn, "message_sent", from_peer, {"to": to_peer})
160
+ conn.commit()
161
+ return {"ok": True, "id": cursor.lastrowid}
162
+ finally:
163
+ conn.close()
164
+
165
+ def get_inbox(self, peer_id: str) -> list[dict]:
166
+ conn = self._conn()
167
+ try:
168
+ rows = conn.execute(
169
+ "SELECT id, from_peer, to_peer, msg_type, content, read, created_at "
170
+ "FROM mesh_messages WHERE to_peer=? ORDER BY created_at DESC LIMIT 100",
171
+ (peer_id,),
172
+ ).fetchall()
173
+ return [dict(r) for r in rows]
174
+ finally:
175
+ conn.close()
176
+
177
+ def mark_read(self, peer_id: str, message_ids: list[int]) -> dict:
178
+ conn = self._conn()
179
+ try:
180
+ placeholders = ",".join("?" * len(message_ids))
181
+ conn.execute(
182
+ f"UPDATE mesh_messages SET read=1 WHERE to_peer=? AND id IN ({placeholders})",
183
+ [peer_id, *message_ids],
184
+ )
185
+ conn.commit()
186
+ return {"ok": True}
187
+ finally:
188
+ conn.close()
189
+
190
+ # -- State --
191
+
192
+ def get_state(self) -> dict:
193
+ conn = self._conn()
194
+ try:
195
+ rows = conn.execute("SELECT key, value, set_by, updated_at FROM mesh_state").fetchall()
196
+ return {r["key"]: {"value": r["value"], "set_by": r["set_by"], "updated_at": r["updated_at"]} for r in rows}
197
+ finally:
198
+ conn.close()
199
+
200
+ def set_state(self, key: str, value: str, set_by: str) -> dict:
201
+ conn = self._conn()
202
+ try:
203
+ now = datetime.now(timezone.utc).isoformat()
204
+ conn.execute(
205
+ "INSERT INTO mesh_state (key, value, set_by, updated_at) VALUES (?, ?, ?, ?) "
206
+ "ON CONFLICT(key) DO UPDATE SET value=excluded.value, set_by=excluded.set_by, updated_at=excluded.updated_at",
207
+ (key, value, set_by, now),
208
+ )
209
+ conn.commit()
210
+ return {"ok": True}
211
+ finally:
212
+ conn.close()
213
+
214
+ def get_state_key(self, key: str) -> dict | None:
215
+ conn = self._conn()
216
+ try:
217
+ row = conn.execute(
218
+ "SELECT key, value, set_by, updated_at FROM mesh_state WHERE key=?", (key,),
219
+ ).fetchone()
220
+ return dict(row) if row else None
221
+ finally:
222
+ conn.close()
223
+
224
+ # -- Locks --
225
+
226
+ def lock_action(self, file_path: str, locked_by: str, action: str) -> dict:
227
+ conn = self._conn()
228
+ try:
229
+ now = datetime.now(timezone.utc).isoformat()
230
+
231
+ if action == "acquire":
232
+ existing = conn.execute(
233
+ "SELECT locked_by, locked_at FROM mesh_locks WHERE file_path=?",
234
+ (file_path,),
235
+ ).fetchone()
236
+ if existing and existing["locked_by"] != locked_by:
237
+ return {"locked": True, "by": existing["locked_by"], "since": existing["locked_at"]}
238
+ conn.execute(
239
+ "INSERT INTO mesh_locks (file_path, locked_by, locked_at) VALUES (?, ?, ?) "
240
+ "ON CONFLICT(file_path) DO UPDATE SET locked_by=excluded.locked_by, locked_at=excluded.locked_at",
241
+ (file_path, locked_by, now),
242
+ )
243
+ conn.commit()
244
+ return {"ok": True, "action": "acquired"}
245
+
246
+ elif action == "release":
247
+ conn.execute("DELETE FROM mesh_locks WHERE file_path=? AND locked_by=?",
248
+ (file_path, locked_by))
249
+ conn.commit()
250
+ return {"ok": True, "action": "released"}
251
+
252
+ elif action == "query":
253
+ row = conn.execute(
254
+ "SELECT locked_by, locked_at FROM mesh_locks WHERE file_path=?",
255
+ (file_path,),
256
+ ).fetchone()
257
+ if row:
258
+ return {"locked": True, "by": row["locked_by"], "since": row["locked_at"]}
259
+ return {"locked": False}
260
+
261
+ return {"ok": False, "error": f"unknown action: {action}"}
262
+ finally:
263
+ conn.close()
264
+
265
+ # -- Events --
266
+
267
+ def get_events(self, limit: int = 100) -> list[dict]:
268
+ conn = self._conn()
269
+ try:
270
+ rows = conn.execute(
271
+ "SELECT id, event_type, payload, emitted_by, created_at "
272
+ "FROM mesh_events ORDER BY id DESC LIMIT ?",
273
+ (limit,),
274
+ ).fetchall()
275
+ return [dict(r) for r in rows]
276
+ finally:
277
+ conn.close()
278
+
279
+ def _log_event(self, conn: sqlite3.Connection, event_type: str,
280
+ emitted_by: str, payload: dict | None = None) -> None:
281
+ import json as _json
282
+ now = datetime.now(timezone.utc).isoformat()
283
+ conn.execute(
284
+ "INSERT INTO mesh_events (event_type, payload, emitted_by, created_at) VALUES (?, ?, ?, ?)",
285
+ (event_type, _json.dumps(payload or {}), emitted_by, now),
286
+ )
287
+
288
+ # -- Status --
289
+
290
+ def get_status(self) -> dict:
291
+ conn = self._conn()
292
+ try:
293
+ peer_count = conn.execute("SELECT COUNT(*) FROM mesh_peers WHERE status='active'").fetchone()[0]
294
+ return {
295
+ "broker_up": True,
296
+ "peer_count": peer_count,
297
+ "uptime_s": round(time.monotonic() - self._started_at),
298
+ }
299
+ finally:
300
+ conn.close()
301
+
302
+ # -- Cleanup --
303
+
304
+ def _cleanup_loop(self) -> None:
305
+ """Background cleanup: mark stale peers, delete old messages."""
306
+ while not self._stop_event.is_set():
307
+ self._stop_event.wait(300) # Every 5 min
308
+ if self._stop_event.is_set():
309
+ break
310
+ try:
311
+ self._run_cleanup()
312
+ except Exception as exc:
313
+ logger.debug("Mesh cleanup error: %s", exc)
314
+
315
+ def _run_cleanup(self) -> None:
316
+ conn = self._conn()
317
+ try:
318
+ now = datetime.now(timezone.utc)
319
+ # Mark stale peers (no heartbeat for 5 min)
320
+ conn.execute(
321
+ "UPDATE mesh_peers SET status='stale' "
322
+ "WHERE status='active' AND datetime(last_heartbeat) < datetime(?, '-5 minutes')",
323
+ (now.isoformat(),),
324
+ )
325
+ # Delete dead peers (stale > 30 min)
326
+ conn.execute(
327
+ "UPDATE mesh_peers SET status='dead' "
328
+ "WHERE status='stale' AND datetime(last_heartbeat) < datetime(?, '-30 minutes')",
329
+ (now.isoformat(),),
330
+ )
331
+ conn.execute("DELETE FROM mesh_peers WHERE status='dead'")
332
+ # Delete read messages > 24hr old
333
+ conn.execute(
334
+ "DELETE FROM mesh_messages WHERE read=1 AND datetime(created_at) < datetime(?, '-24 hours')",
335
+ (now.isoformat(),),
336
+ )
337
+ # Delete expired locks
338
+ conn.execute(
339
+ "DELETE FROM mesh_locks WHERE datetime(expires_at) < datetime(?)",
340
+ (now.isoformat(),),
341
+ )
342
+ conn.commit()
343
+ finally:
344
+ conn.close()
@@ -282,12 +282,18 @@ class EntityGraphChannel:
282
282
  if use_cache:
283
283
  neighbors = self._adj.get(fid, ())
284
284
  for neighbor, edge_weight in neighbors:
285
- # v3.4.1 P1: Weighted propagation + PageRank bias
286
- weighted = activation[fid] * self._decay * edge_weight
287
- if self._graph_metrics and neighbor in self._graph_metrics:
288
- target_pr = self._graph_metrics[neighbor].get("pagerank_score", 0.0)
289
- pr_boost = min(1.0 + target_pr * 2.0, 2.0)
290
- weighted *= pr_boost
285
+ # v3.4.2: Only apply edge_weight and PageRank bias when
286
+ # graph metrics are available. Without metrics, edge_weight
287
+ # dampens propagation by ~14% with no compensating boost,
288
+ # causing retrieval regression (68.4% vs 70.4% on LoCoMo).
289
+ if self._graph_metrics:
290
+ weighted = activation[fid] * self._decay * edge_weight
291
+ if neighbor in self._graph_metrics:
292
+ target_pr = self._graph_metrics[neighbor].get("pagerank_score", 0.0)
293
+ pr_boost = min(1.0 + target_pr * 2.0, 2.0)
294
+ weighted *= pr_boost
295
+ else:
296
+ weighted = activation[fid] * self._decay
291
297
  if weighted >= self._threshold and weighted > activation.get(neighbor, 0.0):
292
298
  activation[neighbor] = weighted
293
299
  next_frontier.add(neighbor)
@@ -236,16 +236,15 @@ def create_app() -> FastAPI:
236
236
  return application
237
237
 
238
238
 
239
- app = create_app()
240
-
241
-
242
- # ============================================================================
243
- # Server Startup
244
- # ============================================================================
239
+ # v3.4.3: Module-level app removed to prevent duplicate MemoryEngine.
240
+ # The unified daemon (server/unified_daemon.py) imports routes from this
241
+ # module via _register_dashboard_routes(). Do NOT create app here.
242
+ # For standalone use: python -m superlocalmemory.server.api
245
243
 
246
244
  if __name__ == "__main__":
245
+ app = create_app()
247
246
  print("=" * 60)
248
- print("SuperLocalMemory V3 - API Server")
247
+ print("SuperLocalMemory V3 - API Server (standalone mode)")
249
248
  print("=" * 60)
250
249
  print(f"Database: {DB_PATH}")
251
250
  print(f"UI Directory: {UI_DIR}")