superlocalmemory 3.4.1 → 3.4.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/pyproject.toml +11 -2
- package/scripts/postinstall.js +26 -7
- package/src/superlocalmemory/cli/commands.py +42 -60
- package/src/superlocalmemory/cli/daemon.py +107 -47
- package/src/superlocalmemory/cli/main.py +10 -0
- package/src/superlocalmemory/cli/setup_wizard.py +137 -9
- package/src/superlocalmemory/core/config.py +28 -0
- package/src/superlocalmemory/core/consolidation_engine.py +38 -1
- package/src/superlocalmemory/core/engine.py +9 -0
- package/src/superlocalmemory/core/health_monitor.py +313 -0
- package/src/superlocalmemory/core/reranker_worker.py +19 -5
- package/src/superlocalmemory/ingestion/__init__.py +13 -0
- package/src/superlocalmemory/ingestion/adapter_manager.py +234 -0
- package/src/superlocalmemory/ingestion/base_adapter.py +177 -0
- package/src/superlocalmemory/ingestion/calendar_adapter.py +340 -0
- package/src/superlocalmemory/ingestion/credentials.py +118 -0
- package/src/superlocalmemory/ingestion/gmail_adapter.py +369 -0
- package/src/superlocalmemory/ingestion/parsers.py +100 -0
- package/src/superlocalmemory/ingestion/transcript_adapter.py +156 -0
- package/src/superlocalmemory/learning/consolidation_worker.py +47 -1
- package/src/superlocalmemory/learning/entity_compiler.py +377 -0
- package/src/superlocalmemory/mesh/__init__.py +12 -0
- package/src/superlocalmemory/mesh/broker.py +344 -0
- package/src/superlocalmemory/retrieval/entity_channel.py +12 -6
- package/src/superlocalmemory/server/api.py +6 -7
- package/src/superlocalmemory/server/routes/entity.py +95 -0
- package/src/superlocalmemory/server/routes/ingest.py +110 -0
- package/src/superlocalmemory/server/routes/mesh.py +186 -0
- package/src/superlocalmemory/server/unified_daemon.py +691 -0
- package/src/superlocalmemory/storage/schema_v343.py +229 -0
- package/src/superlocalmemory.egg-info/PKG-INFO +0 -597
- package/src/superlocalmemory.egg-info/SOURCES.txt +0 -287
- package/src/superlocalmemory.egg-info/dependency_links.txt +0 -1
- package/src/superlocalmemory.egg-info/entry_points.txt +0 -2
- package/src/superlocalmemory.egg-info/requires.txt +0 -47
- package/src/superlocalmemory.egg-info/top_level.txt +0 -1
|
@@ -0,0 +1,344 @@
|
|
|
1
|
+
# Copyright (c) 2026 Varun Pratap Bhardwaj / Qualixar
|
|
2
|
+
# Licensed under the Elastic License 2.0 - see LICENSE file
|
|
3
|
+
# Part of SuperLocalMemory V3 | https://qualixar.com | https://varunpratap.com
|
|
4
|
+
|
|
5
|
+
"""SLM Mesh Broker — core orchestration for P2P agent communication.
|
|
6
|
+
|
|
7
|
+
Manages peer lifecycle, scheduled cleanup, and event logging.
|
|
8
|
+
All operations use the shared memory.db via SQLite tables with mesh_ prefix.
|
|
9
|
+
|
|
10
|
+
Part of Qualixar | Author: Varun Pratap Bhardwaj
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from __future__ import annotations
|
|
14
|
+
|
|
15
|
+
import logging
|
|
16
|
+
import sqlite3
|
|
17
|
+
import threading
|
|
18
|
+
import time
|
|
19
|
+
import uuid
|
|
20
|
+
from datetime import datetime, timezone
|
|
21
|
+
from pathlib import Path
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger("superlocalmemory.mesh")
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class MeshBroker:
|
|
27
|
+
"""Lightweight mesh broker for SLM's unified daemon.
|
|
28
|
+
|
|
29
|
+
Provides peer management, messaging, state, locks, and events.
|
|
30
|
+
All methods are synchronous (called from FastAPI via run_in_executor
|
|
31
|
+
or directly for quick operations).
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
def __init__(self, db_path: str | Path):
|
|
35
|
+
self._db_path = str(db_path)
|
|
36
|
+
self._started_at = time.monotonic()
|
|
37
|
+
self._cleanup_thread: threading.Thread | None = None
|
|
38
|
+
self._stop_event = threading.Event()
|
|
39
|
+
|
|
40
|
+
def start_cleanup(self) -> None:
|
|
41
|
+
"""Start background cleanup thread for stale peers/messages."""
|
|
42
|
+
self._cleanup_thread = threading.Thread(
|
|
43
|
+
target=self._cleanup_loop, daemon=True, name="mesh-cleanup",
|
|
44
|
+
)
|
|
45
|
+
self._cleanup_thread.start()
|
|
46
|
+
|
|
47
|
+
def stop(self) -> None:
|
|
48
|
+
self._stop_event.set()
|
|
49
|
+
|
|
50
|
+
# -- Connection helper --
|
|
51
|
+
|
|
52
|
+
def _conn(self) -> sqlite3.Connection:
|
|
53
|
+
conn = sqlite3.connect(self._db_path)
|
|
54
|
+
conn.execute("PRAGMA journal_mode=WAL")
|
|
55
|
+
conn.execute("PRAGMA busy_timeout=5000")
|
|
56
|
+
conn.row_factory = sqlite3.Row
|
|
57
|
+
return conn
|
|
58
|
+
|
|
59
|
+
# -- Peers --
|
|
60
|
+
|
|
61
|
+
def register_peer(self, session_id: str, summary: str = "",
|
|
62
|
+
host: str = "127.0.0.1", port: int = 0) -> dict:
|
|
63
|
+
conn = self._conn()
|
|
64
|
+
try:
|
|
65
|
+
now = datetime.now(timezone.utc).isoformat()
|
|
66
|
+
# Idempotent: update if same session_id exists
|
|
67
|
+
existing = conn.execute(
|
|
68
|
+
"SELECT peer_id FROM mesh_peers WHERE session_id = ?",
|
|
69
|
+
(session_id,),
|
|
70
|
+
).fetchone()
|
|
71
|
+
if existing:
|
|
72
|
+
peer_id = existing["peer_id"]
|
|
73
|
+
conn.execute(
|
|
74
|
+
"UPDATE mesh_peers SET summary=?, host=?, port=?, last_heartbeat=?, status='active' "
|
|
75
|
+
"WHERE peer_id=?",
|
|
76
|
+
(summary, host, port, now, peer_id),
|
|
77
|
+
)
|
|
78
|
+
else:
|
|
79
|
+
peer_id = str(uuid.uuid4())[:12]
|
|
80
|
+
conn.execute(
|
|
81
|
+
"INSERT INTO mesh_peers (peer_id, session_id, summary, status, host, port, registered_at, last_heartbeat) "
|
|
82
|
+
"VALUES (?, ?, ?, 'active', ?, ?, ?, ?)",
|
|
83
|
+
(peer_id, session_id, summary, host, port, now, now),
|
|
84
|
+
)
|
|
85
|
+
self._log_event(conn, "peer_registered", peer_id, {"session_id": session_id})
|
|
86
|
+
conn.commit()
|
|
87
|
+
return {"peer_id": peer_id, "ok": True}
|
|
88
|
+
finally:
|
|
89
|
+
conn.close()
|
|
90
|
+
|
|
91
|
+
def deregister_peer(self, peer_id: str) -> dict:
|
|
92
|
+
conn = self._conn()
|
|
93
|
+
try:
|
|
94
|
+
row = conn.execute("SELECT 1 FROM mesh_peers WHERE peer_id=?", (peer_id,)).fetchone()
|
|
95
|
+
if not row:
|
|
96
|
+
return {"ok": False, "error": "peer not found"}
|
|
97
|
+
conn.execute("DELETE FROM mesh_peers WHERE peer_id=?", (peer_id,))
|
|
98
|
+
self._log_event(conn, "peer_deregistered", peer_id)
|
|
99
|
+
conn.commit()
|
|
100
|
+
return {"ok": True}
|
|
101
|
+
finally:
|
|
102
|
+
conn.close()
|
|
103
|
+
|
|
104
|
+
def heartbeat(self, peer_id: str) -> dict:
|
|
105
|
+
conn = self._conn()
|
|
106
|
+
try:
|
|
107
|
+
now = datetime.now(timezone.utc).isoformat()
|
|
108
|
+
cursor = conn.execute(
|
|
109
|
+
"UPDATE mesh_peers SET last_heartbeat=?, status='active' WHERE peer_id=?",
|
|
110
|
+
(now, peer_id),
|
|
111
|
+
)
|
|
112
|
+
if cursor.rowcount == 0:
|
|
113
|
+
return {"ok": False, "error": "peer not found"}
|
|
114
|
+
conn.commit()
|
|
115
|
+
return {"ok": True}
|
|
116
|
+
finally:
|
|
117
|
+
conn.close()
|
|
118
|
+
|
|
119
|
+
def update_summary(self, peer_id: str, summary: str) -> dict:
|
|
120
|
+
conn = self._conn()
|
|
121
|
+
try:
|
|
122
|
+
cursor = conn.execute(
|
|
123
|
+
"UPDATE mesh_peers SET summary=? WHERE peer_id=?",
|
|
124
|
+
(summary, peer_id),
|
|
125
|
+
)
|
|
126
|
+
if cursor.rowcount == 0:
|
|
127
|
+
return {"ok": False, "error": "peer not found"}
|
|
128
|
+
conn.commit()
|
|
129
|
+
return {"ok": True}
|
|
130
|
+
finally:
|
|
131
|
+
conn.close()
|
|
132
|
+
|
|
133
|
+
def list_peers(self) -> list[dict]:
|
|
134
|
+
conn = self._conn()
|
|
135
|
+
try:
|
|
136
|
+
rows = conn.execute(
|
|
137
|
+
"SELECT peer_id, session_id, summary, status, host, port, registered_at, last_heartbeat "
|
|
138
|
+
"FROM mesh_peers ORDER BY last_heartbeat DESC",
|
|
139
|
+
).fetchall()
|
|
140
|
+
return [dict(r) for r in rows]
|
|
141
|
+
finally:
|
|
142
|
+
conn.close()
|
|
143
|
+
|
|
144
|
+
# -- Messages --
|
|
145
|
+
|
|
146
|
+
def send_message(self, from_peer: str, to_peer: str, content: str,
|
|
147
|
+
msg_type: str = "text") -> dict:
|
|
148
|
+
conn = self._conn()
|
|
149
|
+
try:
|
|
150
|
+
# Verify recipient exists
|
|
151
|
+
if not conn.execute("SELECT 1 FROM mesh_peers WHERE peer_id=?", (to_peer,)).fetchone():
|
|
152
|
+
return {"ok": False, "error": "recipient peer not found"}
|
|
153
|
+
now = datetime.now(timezone.utc).isoformat()
|
|
154
|
+
cursor = conn.execute(
|
|
155
|
+
"INSERT INTO mesh_messages (from_peer, to_peer, msg_type, content, read, created_at) "
|
|
156
|
+
"VALUES (?, ?, ?, ?, 0, ?)",
|
|
157
|
+
(from_peer, to_peer, msg_type, content, now),
|
|
158
|
+
)
|
|
159
|
+
self._log_event(conn, "message_sent", from_peer, {"to": to_peer})
|
|
160
|
+
conn.commit()
|
|
161
|
+
return {"ok": True, "id": cursor.lastrowid}
|
|
162
|
+
finally:
|
|
163
|
+
conn.close()
|
|
164
|
+
|
|
165
|
+
def get_inbox(self, peer_id: str) -> list[dict]:
|
|
166
|
+
conn = self._conn()
|
|
167
|
+
try:
|
|
168
|
+
rows = conn.execute(
|
|
169
|
+
"SELECT id, from_peer, to_peer, msg_type, content, read, created_at "
|
|
170
|
+
"FROM mesh_messages WHERE to_peer=? ORDER BY created_at DESC LIMIT 100",
|
|
171
|
+
(peer_id,),
|
|
172
|
+
).fetchall()
|
|
173
|
+
return [dict(r) for r in rows]
|
|
174
|
+
finally:
|
|
175
|
+
conn.close()
|
|
176
|
+
|
|
177
|
+
def mark_read(self, peer_id: str, message_ids: list[int]) -> dict:
|
|
178
|
+
conn = self._conn()
|
|
179
|
+
try:
|
|
180
|
+
placeholders = ",".join("?" * len(message_ids))
|
|
181
|
+
conn.execute(
|
|
182
|
+
f"UPDATE mesh_messages SET read=1 WHERE to_peer=? AND id IN ({placeholders})",
|
|
183
|
+
[peer_id, *message_ids],
|
|
184
|
+
)
|
|
185
|
+
conn.commit()
|
|
186
|
+
return {"ok": True}
|
|
187
|
+
finally:
|
|
188
|
+
conn.close()
|
|
189
|
+
|
|
190
|
+
# -- State --
|
|
191
|
+
|
|
192
|
+
def get_state(self) -> dict:
|
|
193
|
+
conn = self._conn()
|
|
194
|
+
try:
|
|
195
|
+
rows = conn.execute("SELECT key, value, set_by, updated_at FROM mesh_state").fetchall()
|
|
196
|
+
return {r["key"]: {"value": r["value"], "set_by": r["set_by"], "updated_at": r["updated_at"]} for r in rows}
|
|
197
|
+
finally:
|
|
198
|
+
conn.close()
|
|
199
|
+
|
|
200
|
+
def set_state(self, key: str, value: str, set_by: str) -> dict:
|
|
201
|
+
conn = self._conn()
|
|
202
|
+
try:
|
|
203
|
+
now = datetime.now(timezone.utc).isoformat()
|
|
204
|
+
conn.execute(
|
|
205
|
+
"INSERT INTO mesh_state (key, value, set_by, updated_at) VALUES (?, ?, ?, ?) "
|
|
206
|
+
"ON CONFLICT(key) DO UPDATE SET value=excluded.value, set_by=excluded.set_by, updated_at=excluded.updated_at",
|
|
207
|
+
(key, value, set_by, now),
|
|
208
|
+
)
|
|
209
|
+
conn.commit()
|
|
210
|
+
return {"ok": True}
|
|
211
|
+
finally:
|
|
212
|
+
conn.close()
|
|
213
|
+
|
|
214
|
+
def get_state_key(self, key: str) -> dict | None:
|
|
215
|
+
conn = self._conn()
|
|
216
|
+
try:
|
|
217
|
+
row = conn.execute(
|
|
218
|
+
"SELECT key, value, set_by, updated_at FROM mesh_state WHERE key=?", (key,),
|
|
219
|
+
).fetchone()
|
|
220
|
+
return dict(row) if row else None
|
|
221
|
+
finally:
|
|
222
|
+
conn.close()
|
|
223
|
+
|
|
224
|
+
# -- Locks --
|
|
225
|
+
|
|
226
|
+
def lock_action(self, file_path: str, locked_by: str, action: str) -> dict:
|
|
227
|
+
conn = self._conn()
|
|
228
|
+
try:
|
|
229
|
+
now = datetime.now(timezone.utc).isoformat()
|
|
230
|
+
|
|
231
|
+
if action == "acquire":
|
|
232
|
+
existing = conn.execute(
|
|
233
|
+
"SELECT locked_by, locked_at FROM mesh_locks WHERE file_path=?",
|
|
234
|
+
(file_path,),
|
|
235
|
+
).fetchone()
|
|
236
|
+
if existing and existing["locked_by"] != locked_by:
|
|
237
|
+
return {"locked": True, "by": existing["locked_by"], "since": existing["locked_at"]}
|
|
238
|
+
conn.execute(
|
|
239
|
+
"INSERT INTO mesh_locks (file_path, locked_by, locked_at) VALUES (?, ?, ?) "
|
|
240
|
+
"ON CONFLICT(file_path) DO UPDATE SET locked_by=excluded.locked_by, locked_at=excluded.locked_at",
|
|
241
|
+
(file_path, locked_by, now),
|
|
242
|
+
)
|
|
243
|
+
conn.commit()
|
|
244
|
+
return {"ok": True, "action": "acquired"}
|
|
245
|
+
|
|
246
|
+
elif action == "release":
|
|
247
|
+
conn.execute("DELETE FROM mesh_locks WHERE file_path=? AND locked_by=?",
|
|
248
|
+
(file_path, locked_by))
|
|
249
|
+
conn.commit()
|
|
250
|
+
return {"ok": True, "action": "released"}
|
|
251
|
+
|
|
252
|
+
elif action == "query":
|
|
253
|
+
row = conn.execute(
|
|
254
|
+
"SELECT locked_by, locked_at FROM mesh_locks WHERE file_path=?",
|
|
255
|
+
(file_path,),
|
|
256
|
+
).fetchone()
|
|
257
|
+
if row:
|
|
258
|
+
return {"locked": True, "by": row["locked_by"], "since": row["locked_at"]}
|
|
259
|
+
return {"locked": False}
|
|
260
|
+
|
|
261
|
+
return {"ok": False, "error": f"unknown action: {action}"}
|
|
262
|
+
finally:
|
|
263
|
+
conn.close()
|
|
264
|
+
|
|
265
|
+
# -- Events --
|
|
266
|
+
|
|
267
|
+
def get_events(self, limit: int = 100) -> list[dict]:
|
|
268
|
+
conn = self._conn()
|
|
269
|
+
try:
|
|
270
|
+
rows = conn.execute(
|
|
271
|
+
"SELECT id, event_type, payload, emitted_by, created_at "
|
|
272
|
+
"FROM mesh_events ORDER BY id DESC LIMIT ?",
|
|
273
|
+
(limit,),
|
|
274
|
+
).fetchall()
|
|
275
|
+
return [dict(r) for r in rows]
|
|
276
|
+
finally:
|
|
277
|
+
conn.close()
|
|
278
|
+
|
|
279
|
+
def _log_event(self, conn: sqlite3.Connection, event_type: str,
|
|
280
|
+
emitted_by: str, payload: dict | None = None) -> None:
|
|
281
|
+
import json as _json
|
|
282
|
+
now = datetime.now(timezone.utc).isoformat()
|
|
283
|
+
conn.execute(
|
|
284
|
+
"INSERT INTO mesh_events (event_type, payload, emitted_by, created_at) VALUES (?, ?, ?, ?)",
|
|
285
|
+
(event_type, _json.dumps(payload or {}), emitted_by, now),
|
|
286
|
+
)
|
|
287
|
+
|
|
288
|
+
# -- Status --
|
|
289
|
+
|
|
290
|
+
def get_status(self) -> dict:
|
|
291
|
+
conn = self._conn()
|
|
292
|
+
try:
|
|
293
|
+
peer_count = conn.execute("SELECT COUNT(*) FROM mesh_peers WHERE status='active'").fetchone()[0]
|
|
294
|
+
return {
|
|
295
|
+
"broker_up": True,
|
|
296
|
+
"peer_count": peer_count,
|
|
297
|
+
"uptime_s": round(time.monotonic() - self._started_at),
|
|
298
|
+
}
|
|
299
|
+
finally:
|
|
300
|
+
conn.close()
|
|
301
|
+
|
|
302
|
+
# -- Cleanup --
|
|
303
|
+
|
|
304
|
+
def _cleanup_loop(self) -> None:
|
|
305
|
+
"""Background cleanup: mark stale peers, delete old messages."""
|
|
306
|
+
while not self._stop_event.is_set():
|
|
307
|
+
self._stop_event.wait(300) # Every 5 min
|
|
308
|
+
if self._stop_event.is_set():
|
|
309
|
+
break
|
|
310
|
+
try:
|
|
311
|
+
self._run_cleanup()
|
|
312
|
+
except Exception as exc:
|
|
313
|
+
logger.debug("Mesh cleanup error: %s", exc)
|
|
314
|
+
|
|
315
|
+
def _run_cleanup(self) -> None:
|
|
316
|
+
conn = self._conn()
|
|
317
|
+
try:
|
|
318
|
+
now = datetime.now(timezone.utc)
|
|
319
|
+
# Mark stale peers (no heartbeat for 5 min)
|
|
320
|
+
conn.execute(
|
|
321
|
+
"UPDATE mesh_peers SET status='stale' "
|
|
322
|
+
"WHERE status='active' AND datetime(last_heartbeat) < datetime(?, '-5 minutes')",
|
|
323
|
+
(now.isoformat(),),
|
|
324
|
+
)
|
|
325
|
+
# Delete dead peers (stale > 30 min)
|
|
326
|
+
conn.execute(
|
|
327
|
+
"UPDATE mesh_peers SET status='dead' "
|
|
328
|
+
"WHERE status='stale' AND datetime(last_heartbeat) < datetime(?, '-30 minutes')",
|
|
329
|
+
(now.isoformat(),),
|
|
330
|
+
)
|
|
331
|
+
conn.execute("DELETE FROM mesh_peers WHERE status='dead'")
|
|
332
|
+
# Delete read messages > 24hr old
|
|
333
|
+
conn.execute(
|
|
334
|
+
"DELETE FROM mesh_messages WHERE read=1 AND datetime(created_at) < datetime(?, '-24 hours')",
|
|
335
|
+
(now.isoformat(),),
|
|
336
|
+
)
|
|
337
|
+
# Delete expired locks
|
|
338
|
+
conn.execute(
|
|
339
|
+
"DELETE FROM mesh_locks WHERE datetime(expires_at) < datetime(?)",
|
|
340
|
+
(now.isoformat(),),
|
|
341
|
+
)
|
|
342
|
+
conn.commit()
|
|
343
|
+
finally:
|
|
344
|
+
conn.close()
|
|
@@ -282,12 +282,18 @@ class EntityGraphChannel:
|
|
|
282
282
|
if use_cache:
|
|
283
283
|
neighbors = self._adj.get(fid, ())
|
|
284
284
|
for neighbor, edge_weight in neighbors:
|
|
285
|
-
# v3.4.
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
weighted
|
|
285
|
+
# v3.4.2: Only apply edge_weight and PageRank bias when
|
|
286
|
+
# graph metrics are available. Without metrics, edge_weight
|
|
287
|
+
# dampens propagation by ~14% with no compensating boost,
|
|
288
|
+
# causing retrieval regression (68.4% vs 70.4% on LoCoMo).
|
|
289
|
+
if self._graph_metrics:
|
|
290
|
+
weighted = activation[fid] * self._decay * edge_weight
|
|
291
|
+
if neighbor in self._graph_metrics:
|
|
292
|
+
target_pr = self._graph_metrics[neighbor].get("pagerank_score", 0.0)
|
|
293
|
+
pr_boost = min(1.0 + target_pr * 2.0, 2.0)
|
|
294
|
+
weighted *= pr_boost
|
|
295
|
+
else:
|
|
296
|
+
weighted = activation[fid] * self._decay
|
|
291
297
|
if weighted >= self._threshold and weighted > activation.get(neighbor, 0.0):
|
|
292
298
|
activation[neighbor] = weighted
|
|
293
299
|
next_frontier.add(neighbor)
|
|
@@ -236,16 +236,15 @@ def create_app() -> FastAPI:
|
|
|
236
236
|
return application
|
|
237
237
|
|
|
238
238
|
|
|
239
|
-
app
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
#
|
|
243
|
-
# Server Startup
|
|
244
|
-
# ============================================================================
|
|
239
|
+
# v3.4.3: Module-level app removed to prevent duplicate MemoryEngine.
|
|
240
|
+
# The unified daemon (server/unified_daemon.py) imports routes from this
|
|
241
|
+
# module via _register_dashboard_routes(). Do NOT create app here.
|
|
242
|
+
# For standalone use: python -m superlocalmemory.server.api
|
|
245
243
|
|
|
246
244
|
if __name__ == "__main__":
|
|
245
|
+
app = create_app()
|
|
247
246
|
print("=" * 60)
|
|
248
|
-
print("SuperLocalMemory V3 - API Server")
|
|
247
|
+
print("SuperLocalMemory V3 - API Server (standalone mode)")
|
|
249
248
|
print("=" * 60)
|
|
250
249
|
print(f"Database: {DB_PATH}")
|
|
251
250
|
print(f"UI Directory: {UI_DIR}")
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
# Copyright (c) 2026 Varun Pratap Bhardwaj / Qualixar
|
|
2
|
+
# Licensed under the Elastic License 2.0 - see LICENSE file
|
|
3
|
+
# Part of SuperLocalMemory V3 | https://qualixar.com | https://varunpratap.com
|
|
4
|
+
|
|
5
|
+
"""Entity compilation API routes — view and recompile entity summaries."""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from fastapi import APIRouter, HTTPException, Request, Query
|
|
10
|
+
|
|
11
|
+
router = APIRouter(prefix="/api/entity", tags=["entity"])
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@router.get("/{entity_name}")
|
|
15
|
+
async def get_entity(
|
|
16
|
+
entity_name: str,
|
|
17
|
+
request: Request,
|
|
18
|
+
profile: str = Query(default="default"),
|
|
19
|
+
project: str = Query(default=""),
|
|
20
|
+
):
|
|
21
|
+
"""Get compiled truth + timeline for an entity."""
|
|
22
|
+
engine = request.app.state.engine
|
|
23
|
+
if engine is None:
|
|
24
|
+
raise HTTPException(503, detail="Engine not initialized")
|
|
25
|
+
|
|
26
|
+
import sqlite3
|
|
27
|
+
import json
|
|
28
|
+
conn = sqlite3.connect(str(engine._config.db_path))
|
|
29
|
+
conn.row_factory = sqlite3.Row
|
|
30
|
+
try:
|
|
31
|
+
# Search by canonical_name (case-insensitive)
|
|
32
|
+
row = conn.execute("""
|
|
33
|
+
SELECT ep.compiled_truth, ep.timeline, ep.fact_ids_json,
|
|
34
|
+
ep.last_compiled_at, ep.compilation_confidence,
|
|
35
|
+
ep.knowledge_summary, ce.entity_type
|
|
36
|
+
FROM entity_profiles ep
|
|
37
|
+
JOIN canonical_entities ce ON ep.entity_id = ce.entity_id
|
|
38
|
+
WHERE LOWER(ce.canonical_name) = LOWER(?)
|
|
39
|
+
AND ep.profile_id = ?
|
|
40
|
+
AND ep.project_name = ?
|
|
41
|
+
""", (entity_name, profile, project)).fetchone()
|
|
42
|
+
|
|
43
|
+
if not row:
|
|
44
|
+
raise HTTPException(404, detail=f"Entity '{entity_name}' not found")
|
|
45
|
+
|
|
46
|
+
return {
|
|
47
|
+
"entity_name": entity_name,
|
|
48
|
+
"entity_type": row["entity_type"],
|
|
49
|
+
"compiled_truth": row["compiled_truth"] or "",
|
|
50
|
+
"knowledge_summary": row["knowledge_summary"] or "",
|
|
51
|
+
"timeline": json.loads(row["timeline"]) if row["timeline"] else [],
|
|
52
|
+
"source_fact_ids": json.loads(row["fact_ids_json"]) if row["fact_ids_json"] else [],
|
|
53
|
+
"last_compiled_at": row["last_compiled_at"],
|
|
54
|
+
"confidence": row["compilation_confidence"],
|
|
55
|
+
}
|
|
56
|
+
finally:
|
|
57
|
+
conn.close()
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
@router.post("/{entity_name}/recompile")
|
|
61
|
+
async def recompile_entity(
|
|
62
|
+
entity_name: str,
|
|
63
|
+
request: Request,
|
|
64
|
+
profile: str = Query(default="default"),
|
|
65
|
+
project: str = Query(default=""),
|
|
66
|
+
):
|
|
67
|
+
"""Force immediate recompilation of an entity."""
|
|
68
|
+
engine = request.app.state.engine
|
|
69
|
+
if engine is None:
|
|
70
|
+
raise HTTPException(503, detail="Engine not initialized")
|
|
71
|
+
|
|
72
|
+
import sqlite3
|
|
73
|
+
conn = sqlite3.connect(str(engine._config.db_path))
|
|
74
|
+
conn.row_factory = sqlite3.Row
|
|
75
|
+
try:
|
|
76
|
+
entity = conn.execute(
|
|
77
|
+
"SELECT entity_id, canonical_name, entity_type FROM canonical_entities "
|
|
78
|
+
"WHERE LOWER(canonical_name) = LOWER(?) AND profile_id = ?",
|
|
79
|
+
(entity_name, profile),
|
|
80
|
+
).fetchone()
|
|
81
|
+
|
|
82
|
+
if not entity:
|
|
83
|
+
raise HTTPException(404, detail=f"Entity '{entity_name}' not found")
|
|
84
|
+
|
|
85
|
+
from superlocalmemory.learning.entity_compiler import EntityCompiler
|
|
86
|
+
compiler = EntityCompiler(str(engine._config.db_path), engine._config)
|
|
87
|
+
result = compiler.compile_entity(
|
|
88
|
+
profile, project, entity["entity_id"], entity["canonical_name"],
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
if result:
|
|
92
|
+
return {"ok": True, **result}
|
|
93
|
+
return {"ok": False, "reason": "no facts to compile"}
|
|
94
|
+
finally:
|
|
95
|
+
conn.close()
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
# Copyright (c) 2026 Varun Pratap Bhardwaj / Qualixar
|
|
2
|
+
# Licensed under the Elastic License 2.0 - see LICENSE file
|
|
3
|
+
# Part of SuperLocalMemory V3 | https://qualixar.com | https://varunpratap.com
|
|
4
|
+
|
|
5
|
+
"""Ingestion endpoint — accepts data from external adapters.
|
|
6
|
+
|
|
7
|
+
POST /ingest with {content, source_type, dedup_key, metadata}.
|
|
8
|
+
Deduplicates by source_type + dedup_key. Stores via MemoryEngine.
|
|
9
|
+
Admission control: max 10 concurrent ingestions (HTTP 429 on overflow).
|
|
10
|
+
|
|
11
|
+
Part of Qualixar | Author: Varun Pratap Bhardwaj
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
import json
|
|
17
|
+
import sqlite3
|
|
18
|
+
import threading
|
|
19
|
+
from datetime import datetime, timezone
|
|
20
|
+
|
|
21
|
+
from fastapi import APIRouter, HTTPException, Request
|
|
22
|
+
from pydantic import BaseModel
|
|
23
|
+
|
|
24
|
+
router = APIRouter(tags=["ingestion"])
|
|
25
|
+
|
|
26
|
+
_MAX_CONCURRENT = 10
|
|
27
|
+
_active_count = 0
|
|
28
|
+
_active_lock = threading.Lock()
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class IngestRequest(BaseModel):
|
|
32
|
+
content: str
|
|
33
|
+
source_type: str
|
|
34
|
+
dedup_key: str
|
|
35
|
+
metadata: dict = {}
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
@router.post("/ingest")
|
|
39
|
+
async def ingest(req: IngestRequest, request: Request):
|
|
40
|
+
"""Ingest content from an external adapter.
|
|
41
|
+
|
|
42
|
+
Deduplicates by (source_type, dedup_key). Returns 429 if too many
|
|
43
|
+
concurrent ingestions. Stores via the singleton MemoryEngine.
|
|
44
|
+
"""
|
|
45
|
+
global _active_count
|
|
46
|
+
|
|
47
|
+
engine = request.app.state.engine
|
|
48
|
+
if engine is None:
|
|
49
|
+
raise HTTPException(503, detail="Engine not initialized")
|
|
50
|
+
|
|
51
|
+
if not req.content:
|
|
52
|
+
raise HTTPException(400, detail="content required")
|
|
53
|
+
if not req.source_type:
|
|
54
|
+
raise HTTPException(400, detail="source_type required")
|
|
55
|
+
if not req.dedup_key:
|
|
56
|
+
raise HTTPException(400, detail="dedup_key required")
|
|
57
|
+
|
|
58
|
+
# Admission control
|
|
59
|
+
with _active_lock:
|
|
60
|
+
if _active_count >= _MAX_CONCURRENT:
|
|
61
|
+
raise HTTPException(
|
|
62
|
+
429,
|
|
63
|
+
detail="Too many concurrent ingestions",
|
|
64
|
+
headers={"Retry-After": "5"},
|
|
65
|
+
)
|
|
66
|
+
_active_count += 1
|
|
67
|
+
|
|
68
|
+
try:
|
|
69
|
+
# Dedup check
|
|
70
|
+
conn = sqlite3.connect(str(engine._config.db_path))
|
|
71
|
+
try:
|
|
72
|
+
existing = conn.execute(
|
|
73
|
+
"SELECT id FROM ingestion_log WHERE source_type=? AND dedup_key=?",
|
|
74
|
+
(req.source_type, req.dedup_key),
|
|
75
|
+
).fetchone()
|
|
76
|
+
if existing:
|
|
77
|
+
return {"ingested": False, "reason": "already_ingested"}
|
|
78
|
+
finally:
|
|
79
|
+
conn.close()
|
|
80
|
+
|
|
81
|
+
# Store via engine
|
|
82
|
+
metadata = {**req.metadata, "source_type": req.source_type}
|
|
83
|
+
fact_ids = engine.store(req.content, metadata=metadata)
|
|
84
|
+
|
|
85
|
+
# Log to ingestion_log
|
|
86
|
+
conn = sqlite3.connect(str(engine._config.db_path))
|
|
87
|
+
try:
|
|
88
|
+
conn.execute(
|
|
89
|
+
"INSERT OR IGNORE INTO ingestion_log "
|
|
90
|
+
"(source_type, dedup_key, fact_ids, metadata, status, ingested_at) "
|
|
91
|
+
"VALUES (?, ?, ?, ?, 'ingested', ?)",
|
|
92
|
+
(
|
|
93
|
+
req.source_type,
|
|
94
|
+
req.dedup_key,
|
|
95
|
+
json.dumps(fact_ids),
|
|
96
|
+
json.dumps(req.metadata),
|
|
97
|
+
datetime.now(timezone.utc).isoformat(),
|
|
98
|
+
),
|
|
99
|
+
)
|
|
100
|
+
conn.commit()
|
|
101
|
+
finally:
|
|
102
|
+
conn.close()
|
|
103
|
+
|
|
104
|
+
return {"ingested": True, "fact_ids": fact_ids}
|
|
105
|
+
|
|
106
|
+
except Exception as exc:
|
|
107
|
+
raise HTTPException(500, detail=str(exc))
|
|
108
|
+
finally:
|
|
109
|
+
with _active_lock:
|
|
110
|
+
_active_count -= 1
|