superlocalmemory 3.4.1 → 3.4.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +9 -12
- package/package.json +1 -1
- package/pyproject.toml +11 -2
- package/scripts/postinstall.js +26 -7
- package/src/superlocalmemory/cli/commands.py +71 -60
- package/src/superlocalmemory/cli/daemon.py +184 -64
- package/src/superlocalmemory/cli/main.py +25 -2
- package/src/superlocalmemory/cli/service_installer.py +367 -0
- package/src/superlocalmemory/cli/setup_wizard.py +150 -9
- package/src/superlocalmemory/core/config.py +28 -0
- package/src/superlocalmemory/core/consolidation_engine.py +38 -1
- package/src/superlocalmemory/core/engine.py +9 -0
- package/src/superlocalmemory/core/health_monitor.py +313 -0
- package/src/superlocalmemory/core/reranker_worker.py +19 -5
- package/src/superlocalmemory/ingestion/__init__.py +13 -0
- package/src/superlocalmemory/ingestion/adapter_manager.py +234 -0
- package/src/superlocalmemory/ingestion/base_adapter.py +177 -0
- package/src/superlocalmemory/ingestion/calendar_adapter.py +340 -0
- package/src/superlocalmemory/ingestion/credentials.py +118 -0
- package/src/superlocalmemory/ingestion/gmail_adapter.py +369 -0
- package/src/superlocalmemory/ingestion/parsers.py +100 -0
- package/src/superlocalmemory/ingestion/transcript_adapter.py +156 -0
- package/src/superlocalmemory/learning/consolidation_worker.py +47 -1
- package/src/superlocalmemory/learning/entity_compiler.py +377 -0
- package/src/superlocalmemory/mcp/server.py +32 -3
- package/src/superlocalmemory/mcp/tools_mesh.py +249 -0
- package/src/superlocalmemory/mesh/__init__.py +12 -0
- package/src/superlocalmemory/mesh/broker.py +344 -0
- package/src/superlocalmemory/retrieval/entity_channel.py +12 -6
- package/src/superlocalmemory/server/api.py +6 -7
- package/src/superlocalmemory/server/routes/adapters.py +63 -0
- package/src/superlocalmemory/server/routes/entity.py +151 -0
- package/src/superlocalmemory/server/routes/ingest.py +110 -0
- package/src/superlocalmemory/server/routes/mesh.py +186 -0
- package/src/superlocalmemory/server/unified_daemon.py +693 -0
- package/src/superlocalmemory/storage/schema_v343.py +229 -0
- package/src/superlocalmemory/ui/css/neural-glass.css +1588 -0
- package/src/superlocalmemory/ui/index.html +134 -4
- package/src/superlocalmemory/ui/js/memory-chat.js +28 -1
- package/src/superlocalmemory/ui/js/ng-entities.js +272 -0
- package/src/superlocalmemory/ui/js/ng-health.js +208 -0
- package/src/superlocalmemory/ui/js/ng-ingestion.js +203 -0
- package/src/superlocalmemory/ui/js/ng-mesh.js +311 -0
- package/src/superlocalmemory/ui/js/ng-shell.js +471 -0
- package/src/superlocalmemory.egg-info/PKG-INFO +18 -14
- package/src/superlocalmemory.egg-info/SOURCES.txt +26 -0
- package/src/superlocalmemory.egg-info/requires.txt +9 -1
|
@@ -0,0 +1,377 @@
|
|
|
1
|
+
# Copyright (c) 2026 Varun Pratap Bhardwaj / Qualixar
|
|
2
|
+
# Licensed under the Elastic License 2.0 - see LICENSE file
|
|
3
|
+
# Part of SuperLocalMemory V3 | https://qualixar.com | https://varunpratap.com
|
|
4
|
+
|
|
5
|
+
"""Entity Compilation Engine — auto-generates compiled truth per entity.
|
|
6
|
+
|
|
7
|
+
Builds knowledge summaries using PageRank centrality + Louvain community detection
|
|
8
|
+
(Mode A extractive) or local LLM (Mode B). Per-project, per-profile scoping.
|
|
9
|
+
2000 character hard limit. Read-only layer — never replaces atomic facts.
|
|
10
|
+
|
|
11
|
+
Runs after consolidation (every 6 hours or on-demand).
|
|
12
|
+
|
|
13
|
+
Part of Qualixar | Author: Varun Pratap Bhardwaj
|
|
14
|
+
License: Elastic-2.0
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
from __future__ import annotations
|
|
18
|
+
|
|
19
|
+
import json
|
|
20
|
+
import logging
|
|
21
|
+
import sqlite3
|
|
22
|
+
import time
|
|
23
|
+
import uuid
|
|
24
|
+
from datetime import datetime, timezone
|
|
25
|
+
from pathlib import Path
|
|
26
|
+
|
|
27
|
+
logger = logging.getLogger("superlocalmemory.entity_compiler")
|
|
28
|
+
|
|
29
|
+
_MAX_COMPILED_TRUTH_CHARS = 2000
|
|
30
|
+
_MAX_TIMELINE_ENTRIES = 100
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class EntityCompiler:
|
|
34
|
+
"""Compiles knowledge summaries for entities from atomic facts.
|
|
35
|
+
|
|
36
|
+
Mode A: Extractive (no LLM) — PageRank + Louvain + top sentences
|
|
37
|
+
Mode B: Local LLM via Ollama — prompt with top facts
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
def __init__(self, memory_db: str | Path, config=None):
|
|
41
|
+
self._db_path = str(memory_db)
|
|
42
|
+
self._config = config
|
|
43
|
+
self._mode = "a"
|
|
44
|
+
if config:
|
|
45
|
+
mode = getattr(config, 'mode', None)
|
|
46
|
+
if mode:
|
|
47
|
+
self._mode = getattr(mode, 'value', str(mode)).lower()
|
|
48
|
+
|
|
49
|
+
def compile_all(self, profile_id: str) -> dict:
|
|
50
|
+
"""Compile all entities that have new facts across all projects.
|
|
51
|
+
|
|
52
|
+
Returns stats: {compiled: N, skipped: N, errors: N}
|
|
53
|
+
"""
|
|
54
|
+
if self._config and not getattr(self._config, 'entity_compilation_enabled', True):
|
|
55
|
+
return {"compiled": 0, "skipped": 0, "errors": 0, "reason": "disabled"}
|
|
56
|
+
|
|
57
|
+
stats = {"compiled": 0, "skipped": 0, "errors": 0}
|
|
58
|
+
conn = self._connect()
|
|
59
|
+
try:
|
|
60
|
+
# Get all distinct projects for this profile
|
|
61
|
+
projects = conn.execute(
|
|
62
|
+
"SELECT DISTINCT project_name FROM entity_profiles WHERE profile_id = ?",
|
|
63
|
+
(profile_id,),
|
|
64
|
+
).fetchall()
|
|
65
|
+
project_names = [r[0] for r in projects] if projects else [""]
|
|
66
|
+
|
|
67
|
+
for project_name in project_names:
|
|
68
|
+
result = self._compile_project(conn, profile_id, project_name)
|
|
69
|
+
stats["compiled"] += result["compiled"]
|
|
70
|
+
stats["skipped"] += result["skipped"]
|
|
71
|
+
stats["errors"] += result["errors"]
|
|
72
|
+
finally:
|
|
73
|
+
conn.close()
|
|
74
|
+
|
|
75
|
+
if stats["compiled"] > 0:
|
|
76
|
+
logger.info("Entity compilation: %d compiled, %d skipped, %d errors",
|
|
77
|
+
stats["compiled"], stats["skipped"], stats["errors"])
|
|
78
|
+
return stats
|
|
79
|
+
|
|
80
|
+
def compile_entity(self, profile_id: str, project_name: str,
|
|
81
|
+
entity_id: str, entity_name: str) -> dict | None:
|
|
82
|
+
"""Compile a single entity. Returns compiled truth or None."""
|
|
83
|
+
conn = self._connect()
|
|
84
|
+
try:
|
|
85
|
+
return self._compile_single(conn, profile_id, project_name,
|
|
86
|
+
entity_id, entity_name)
|
|
87
|
+
finally:
|
|
88
|
+
conn.close()
|
|
89
|
+
|
|
90
|
+
def _connect(self) -> sqlite3.Connection:
|
|
91
|
+
conn = sqlite3.connect(self._db_path)
|
|
92
|
+
conn.execute("PRAGMA journal_mode=WAL")
|
|
93
|
+
conn.execute("PRAGMA busy_timeout=5000")
|
|
94
|
+
conn.row_factory = sqlite3.Row
|
|
95
|
+
return conn
|
|
96
|
+
|
|
97
|
+
def _compile_project(self, conn: sqlite3.Connection, profile_id: str,
|
|
98
|
+
project_name: str) -> dict:
|
|
99
|
+
"""Compile all entities needing update in a project."""
|
|
100
|
+
stats = {"compiled": 0, "skipped": 0, "errors": 0}
|
|
101
|
+
|
|
102
|
+
# Find entities with new facts since last compilation
|
|
103
|
+
entities = conn.execute("""
|
|
104
|
+
SELECT DISTINCT ce.entity_id, ce.canonical_name, ce.entity_type
|
|
105
|
+
FROM canonical_entities ce
|
|
106
|
+
WHERE ce.profile_id = ?
|
|
107
|
+
AND (
|
|
108
|
+
EXISTS (
|
|
109
|
+
SELECT 1 FROM atomic_facts af
|
|
110
|
+
WHERE af.canonical_entities_json LIKE '%' || ce.entity_id || '%'
|
|
111
|
+
AND af.profile_id = ?
|
|
112
|
+
AND af.created_at > COALESCE(
|
|
113
|
+
(SELECT last_compiled_at FROM entity_profiles
|
|
114
|
+
WHERE entity_id = ce.entity_id
|
|
115
|
+
AND profile_id = ?
|
|
116
|
+
AND project_name = ?),
|
|
117
|
+
'1970-01-01')
|
|
118
|
+
)
|
|
119
|
+
OR NOT EXISTS (
|
|
120
|
+
SELECT 1 FROM entity_profiles
|
|
121
|
+
WHERE entity_id = ce.entity_id
|
|
122
|
+
AND profile_id = ?
|
|
123
|
+
AND project_name = ?
|
|
124
|
+
AND last_compiled_at IS NOT NULL
|
|
125
|
+
)
|
|
126
|
+
)
|
|
127
|
+
""", (profile_id, profile_id, profile_id, project_name,
|
|
128
|
+
profile_id, project_name)).fetchall()
|
|
129
|
+
|
|
130
|
+
for entity in entities:
|
|
131
|
+
try:
|
|
132
|
+
result = self._compile_single(
|
|
133
|
+
conn, profile_id, project_name,
|
|
134
|
+
entity["entity_id"], entity["canonical_name"],
|
|
135
|
+
entity_type=entity["entity_type"],
|
|
136
|
+
)
|
|
137
|
+
if result:
|
|
138
|
+
stats["compiled"] += 1
|
|
139
|
+
else:
|
|
140
|
+
stats["skipped"] += 1
|
|
141
|
+
except Exception as exc:
|
|
142
|
+
logger.debug("Entity compilation error for %s: %s",
|
|
143
|
+
entity["canonical_name"], exc)
|
|
144
|
+
stats["errors"] += 1
|
|
145
|
+
|
|
146
|
+
return stats
|
|
147
|
+
|
|
148
|
+
def _compile_single(self, conn: sqlite3.Connection, profile_id: str,
|
|
149
|
+
project_name: str, entity_id: str, entity_name: str,
|
|
150
|
+
entity_type: str = "unknown") -> dict | None:
|
|
151
|
+
"""Compile one entity. Returns the compiled truth dict or None."""
|
|
152
|
+
|
|
153
|
+
# Gather atomic facts for this entity
|
|
154
|
+
facts = conn.execute("""
|
|
155
|
+
SELECT af.fact_id, af.content, af.confidence, af.created_at,
|
|
156
|
+
fi.pagerank_score, fi.community_id
|
|
157
|
+
FROM atomic_facts af
|
|
158
|
+
LEFT JOIN fact_importance fi ON af.fact_id = fi.fact_id
|
|
159
|
+
WHERE af.canonical_entities_json LIKE ? AND af.profile_id = ?
|
|
160
|
+
ORDER BY fi.pagerank_score DESC NULLS LAST, af.confidence DESC
|
|
161
|
+
LIMIT 50
|
|
162
|
+
""", (f"%{entity_id}%", profile_id)).fetchall()
|
|
163
|
+
|
|
164
|
+
if not facts:
|
|
165
|
+
return None
|
|
166
|
+
|
|
167
|
+
# Compute PageRank if missing
|
|
168
|
+
has_pagerank = any(f["pagerank_score"] is not None for f in facts)
|
|
169
|
+
if not has_pagerank and len(facts) > 2:
|
|
170
|
+
self._compute_pagerank(conn, [f["fact_id"] for f in facts], profile_id)
|
|
171
|
+
# Re-fetch with scores
|
|
172
|
+
facts = conn.execute("""
|
|
173
|
+
SELECT af.fact_id, af.content, af.confidence, af.created_at,
|
|
174
|
+
fi.pagerank_score, fi.community_id
|
|
175
|
+
FROM atomic_facts af
|
|
176
|
+
LEFT JOIN fact_importance fi ON af.fact_id = fi.fact_id
|
|
177
|
+
WHERE af.canonical_entities_json LIKE ? AND af.profile_id = ?
|
|
178
|
+
ORDER BY fi.pagerank_score DESC NULLS LAST, af.confidence DESC
|
|
179
|
+
LIMIT 50
|
|
180
|
+
""", (f"%{entity_id}%", profile_id)).fetchall()
|
|
181
|
+
|
|
182
|
+
# Generate compiled truth
|
|
183
|
+
if self._mode in ("b", "c") and len(facts) > 3:
|
|
184
|
+
compiled = self._compile_mode_b(entity_name, facts)
|
|
185
|
+
if not compiled:
|
|
186
|
+
compiled = self._compile_mode_a(entity_name, entity_type, facts)
|
|
187
|
+
else:
|
|
188
|
+
compiled = self._compile_mode_a(entity_name, entity_type, facts)
|
|
189
|
+
|
|
190
|
+
# Truncate to limit
|
|
191
|
+
compiled = self._truncate(compiled, _MAX_COMPILED_TRUTH_CHARS)
|
|
192
|
+
|
|
193
|
+
# Build timeline entry
|
|
194
|
+
now = datetime.now(timezone.utc).isoformat()
|
|
195
|
+
timeline_entry = {
|
|
196
|
+
"date": now,
|
|
197
|
+
"action": "compiled",
|
|
198
|
+
"facts_used": len(facts),
|
|
199
|
+
"mode": self._mode,
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
# Load existing timeline
|
|
203
|
+
existing = conn.execute(
|
|
204
|
+
"SELECT timeline, profile_entry_id FROM entity_profiles "
|
|
205
|
+
"WHERE entity_id = ? AND profile_id = ? AND project_name = ?",
|
|
206
|
+
(entity_id, profile_id, project_name),
|
|
207
|
+
).fetchone()
|
|
208
|
+
|
|
209
|
+
timeline = []
|
|
210
|
+
if existing and existing["timeline"]:
|
|
211
|
+
try:
|
|
212
|
+
timeline = json.loads(existing["timeline"])
|
|
213
|
+
except (json.JSONDecodeError, TypeError):
|
|
214
|
+
timeline = []
|
|
215
|
+
timeline.append(timeline_entry)
|
|
216
|
+
# Cap at 100 entries
|
|
217
|
+
if len(timeline) > _MAX_TIMELINE_ENTRIES:
|
|
218
|
+
timeline = timeline[-_MAX_TIMELINE_ENTRIES:]
|
|
219
|
+
|
|
220
|
+
fact_ids = [f["fact_id"] for f in facts]
|
|
221
|
+
avg_conf = sum(f["confidence"] or 0.5 for f in facts) / max(len(facts), 1)
|
|
222
|
+
|
|
223
|
+
# Upsert
|
|
224
|
+
if existing:
|
|
225
|
+
conn.execute("""
|
|
226
|
+
UPDATE entity_profiles SET
|
|
227
|
+
compiled_truth = ?, timeline = ?, fact_ids_json = ?,
|
|
228
|
+
last_compiled_at = ?, compilation_confidence = ?, last_updated = ?
|
|
229
|
+
WHERE entity_id = ? AND profile_id = ? AND project_name = ?
|
|
230
|
+
""", (compiled, json.dumps(timeline), json.dumps(fact_ids),
|
|
231
|
+
now, round(avg_conf, 3), now,
|
|
232
|
+
entity_id, profile_id, project_name))
|
|
233
|
+
else:
|
|
234
|
+
entry_id = str(uuid.uuid4())[:16]
|
|
235
|
+
conn.execute("""
|
|
236
|
+
INSERT INTO entity_profiles
|
|
237
|
+
(profile_entry_id, entity_id, profile_id, project_name,
|
|
238
|
+
knowledge_summary, compiled_truth, timeline, fact_ids_json,
|
|
239
|
+
last_compiled_at, compilation_confidence, last_updated)
|
|
240
|
+
VALUES (?, ?, ?, ?, '', ?, ?, ?, ?, ?, ?)
|
|
241
|
+
""", (entry_id, entity_id, profile_id, project_name,
|
|
242
|
+
compiled, json.dumps(timeline), json.dumps(fact_ids),
|
|
243
|
+
now, round(avg_conf, 3), now))
|
|
244
|
+
|
|
245
|
+
conn.commit()
|
|
246
|
+
|
|
247
|
+
return {
|
|
248
|
+
"entity_name": entity_name,
|
|
249
|
+
"compiled_truth": compiled,
|
|
250
|
+
"facts_used": len(facts),
|
|
251
|
+
"confidence": round(avg_conf, 3),
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
# -- Mode A: Extractive (no LLM) --
|
|
255
|
+
|
|
256
|
+
def _compile_mode_a(self, entity_name: str, entity_type: str,
|
|
257
|
+
facts: list) -> str:
|
|
258
|
+
"""Extract top sentences by PageRank, grouped by community."""
|
|
259
|
+
header = f"{entity_name}"
|
|
260
|
+
if entity_type and entity_type != "unknown":
|
|
261
|
+
header += f" ({entity_type})"
|
|
262
|
+
header += "\n"
|
|
263
|
+
|
|
264
|
+
# Group facts by community
|
|
265
|
+
communities: dict[int, list] = {}
|
|
266
|
+
for f in facts:
|
|
267
|
+
cid = f["community_id"] or 0
|
|
268
|
+
communities.setdefault(cid, []).append(f)
|
|
269
|
+
|
|
270
|
+
sentences = []
|
|
271
|
+
seen_content = set()
|
|
272
|
+
for cid in sorted(communities.keys()):
|
|
273
|
+
community_facts = communities[cid]
|
|
274
|
+
# Top 3 facts per community
|
|
275
|
+
for fact in community_facts[:3]:
|
|
276
|
+
content = fact["content"]
|
|
277
|
+
# Extract first sentence
|
|
278
|
+
first_sent = content.split(". ")[0].strip()
|
|
279
|
+
if not first_sent.endswith("."):
|
|
280
|
+
first_sent += "."
|
|
281
|
+
# Dedup by exact match
|
|
282
|
+
normalized = first_sent.lower().strip()
|
|
283
|
+
if normalized not in seen_content:
|
|
284
|
+
seen_content.add(normalized)
|
|
285
|
+
sentences.append(first_sent)
|
|
286
|
+
|
|
287
|
+
body = " ".join(sentences)
|
|
288
|
+
return header + body
|
|
289
|
+
|
|
290
|
+
# -- Mode B: LLM via Ollama --
|
|
291
|
+
|
|
292
|
+
def _compile_mode_b(self, entity_name: str, facts: list) -> str | None:
|
|
293
|
+
"""Summarize via local LLM (Ollama). Returns None on failure."""
|
|
294
|
+
try:
|
|
295
|
+
import urllib.request
|
|
296
|
+
api_base = "http://localhost:11434"
|
|
297
|
+
if self._config and hasattr(self._config, 'llm'):
|
|
298
|
+
api_base = getattr(self._config.llm, 'api_base', api_base) or api_base
|
|
299
|
+
model = "llama3.2"
|
|
300
|
+
if self._config and hasattr(self._config, 'llm'):
|
|
301
|
+
model = getattr(self._config.llm, 'model', model) or model
|
|
302
|
+
|
|
303
|
+
top_facts = "\n".join(
|
|
304
|
+
f"- {f['content']}" for f in facts[:20]
|
|
305
|
+
)
|
|
306
|
+
prompt = (
|
|
307
|
+
f"Summarize these facts about {entity_name} into a concise profile. "
|
|
308
|
+
f"Maximum 2000 characters. Include key relationships, decisions, status. "
|
|
309
|
+
f"Organize by topic, not chronology. Flag contradictions.\n\n"
|
|
310
|
+
f"Facts (by importance):\n{top_facts}"
|
|
311
|
+
)
|
|
312
|
+
|
|
313
|
+
payload = json.dumps({
|
|
314
|
+
"model": model,
|
|
315
|
+
"prompt": prompt,
|
|
316
|
+
"stream": False,
|
|
317
|
+
"options": {"num_predict": 500},
|
|
318
|
+
}).encode()
|
|
319
|
+
|
|
320
|
+
req = urllib.request.Request(
|
|
321
|
+
f"{api_base}/api/generate",
|
|
322
|
+
data=payload,
|
|
323
|
+
headers={"Content-Type": "application/json"},
|
|
324
|
+
)
|
|
325
|
+
resp = urllib.request.urlopen(req, timeout=30)
|
|
326
|
+
result = json.loads(resp.read().decode())
|
|
327
|
+
text = result.get("response", "").strip()
|
|
328
|
+
return text if text else None
|
|
329
|
+
except Exception as exc:
|
|
330
|
+
logger.debug("Mode B compilation failed, falling back to Mode A: %s", exc)
|
|
331
|
+
return None
|
|
332
|
+
|
|
333
|
+
# -- Helpers --
|
|
334
|
+
|
|
335
|
+
def _compute_pagerank(self, conn: sqlite3.Connection,
|
|
336
|
+
fact_ids: list[str], profile_id: str) -> None:
|
|
337
|
+
"""Compute PageRank for a set of facts. Stores in fact_importance."""
|
|
338
|
+
try:
|
|
339
|
+
import networkx as nx
|
|
340
|
+
G = nx.Graph()
|
|
341
|
+
for fid in fact_ids:
|
|
342
|
+
G.add_node(fid)
|
|
343
|
+
# Add edges based on shared entities
|
|
344
|
+
for i, fid1 in enumerate(fact_ids):
|
|
345
|
+
for fid2 in fact_ids[i + 1:]:
|
|
346
|
+
# Simple heuristic: facts about same entity are connected
|
|
347
|
+
G.add_edge(fid1, fid2, weight=0.5)
|
|
348
|
+
|
|
349
|
+
if len(G.nodes) < 2:
|
|
350
|
+
return
|
|
351
|
+
|
|
352
|
+
scores = nx.pagerank(G, alpha=0.85)
|
|
353
|
+
now = datetime.now(timezone.utc).isoformat()
|
|
354
|
+
|
|
355
|
+
for fid, score in scores.items():
|
|
356
|
+
conn.execute("""
|
|
357
|
+
INSERT INTO fact_importance (fact_id, profile_id, pagerank_score, computed_at)
|
|
358
|
+
VALUES (?, ?, ?, ?)
|
|
359
|
+
ON CONFLICT(fact_id) DO UPDATE SET pagerank_score=excluded.pagerank_score,
|
|
360
|
+
computed_at=excluded.computed_at
|
|
361
|
+
""", (fid, profile_id, round(score, 6), now))
|
|
362
|
+
conn.commit()
|
|
363
|
+
except ImportError:
|
|
364
|
+
logger.debug("NetworkX not available — skipping PageRank")
|
|
365
|
+
except Exception as exc:
|
|
366
|
+
logger.debug("PageRank computation failed: %s", exc)
|
|
367
|
+
|
|
368
|
+
@staticmethod
|
|
369
|
+
def _truncate(text: str, max_chars: int) -> str:
|
|
370
|
+
"""Truncate at sentence boundary within char limit."""
|
|
371
|
+
if len(text) <= max_chars:
|
|
372
|
+
return text
|
|
373
|
+
truncated = text[:max_chars]
|
|
374
|
+
last_period = truncated.rfind(". ")
|
|
375
|
+
if last_period > max_chars // 2:
|
|
376
|
+
return truncated[:last_period + 1]
|
|
377
|
+
return truncated.rstrip() + "..."
|
|
@@ -65,7 +65,7 @@ def reset_engine():
|
|
|
65
65
|
|
|
66
66
|
import os as _os_reg
|
|
67
67
|
|
|
68
|
-
_ESSENTIAL_TOOLS:
|
|
68
|
+
_ESSENTIAL_TOOLS: set[str] = {
|
|
69
69
|
# Core memory operations (8)
|
|
70
70
|
"remember", "recall", "search", "fetch",
|
|
71
71
|
"list_recent", "delete_memory", "update_memory", "get_status",
|
|
@@ -76,7 +76,25 @@ _ESSENTIAL_TOOLS: frozenset[str] = frozenset({
|
|
|
76
76
|
# Infinite memory + learning (4)
|
|
77
77
|
"consolidate_cognitive", "get_soft_prompts",
|
|
78
78
|
"set_mode", "report_outcome",
|
|
79
|
-
}
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
# v3.4.4: Mesh tools — enabled if mesh_enabled in config or SLM_MCP_MESH_TOOLS=1
|
|
82
|
+
_mesh_tools_enabled = _os_reg.environ.get("SLM_MCP_MESH_TOOLS", "").lower() in ("1", "true")
|
|
83
|
+
if not _mesh_tools_enabled:
|
|
84
|
+
try:
|
|
85
|
+
from superlocalmemory.core.config import SLMConfig
|
|
86
|
+
_cfg = SLMConfig.load()
|
|
87
|
+
_mesh_tools_enabled = getattr(_cfg, "mesh_enabled", True) # default True in v3.4.3+
|
|
88
|
+
except Exception:
|
|
89
|
+
_mesh_tools_enabled = True # Safe default — mesh broker is always in daemon
|
|
90
|
+
|
|
91
|
+
if _mesh_tools_enabled:
|
|
92
|
+
_ESSENTIAL_TOOLS.update({
|
|
93
|
+
"mesh_summary", "mesh_peers", "mesh_send", "mesh_inbox",
|
|
94
|
+
"mesh_state", "mesh_lock", "mesh_events", "mesh_status",
|
|
95
|
+
})
|
|
96
|
+
|
|
97
|
+
_ESSENTIAL_TOOLS = frozenset(_ESSENTIAL_TOOLS)
|
|
80
98
|
|
|
81
99
|
_all_tools = _os_reg.environ.get("SLM_MCP_ALL_TOOLS") == "1"
|
|
82
100
|
|
|
@@ -115,6 +133,7 @@ from superlocalmemory.mcp.tools_active import register_active_tools
|
|
|
115
133
|
from superlocalmemory.mcp.tools_v33 import register_v33_tools
|
|
116
134
|
from superlocalmemory.mcp.resources import register_resources
|
|
117
135
|
from superlocalmemory.mcp.tools_code_graph import register_code_graph_tools
|
|
136
|
+
from superlocalmemory.mcp.tools_mesh import register_mesh_tools
|
|
118
137
|
|
|
119
138
|
register_core_tools(_target, get_engine)
|
|
120
139
|
register_v28_tools(_target, get_engine)
|
|
@@ -123,6 +142,7 @@ register_active_tools(_target, get_engine)
|
|
|
123
142
|
register_v33_tools(_target, get_engine)
|
|
124
143
|
register_resources(server, get_engine) # Resources always registered (not tools)
|
|
125
144
|
register_code_graph_tools(_target, get_engine) # CodeGraph: filtered like other tools (SLM_MCP_ALL_TOOLS=1 to show all)
|
|
145
|
+
register_mesh_tools(_target, get_engine) # v3.4.4: Mesh P2P tools — ships with SLM, no separate slm-mesh needed
|
|
126
146
|
|
|
127
147
|
|
|
128
148
|
# V3.3.21: Eager engine warmup — start initializing BEFORE first tool call.
|
|
@@ -132,7 +152,7 @@ register_code_graph_tools(_target, get_engine) # CodeGraph: filtered like other
|
|
|
132
152
|
# the first tool call arrives (1-2s later), the engine is already warm.
|
|
133
153
|
# This applies to ALL IDEs: Claude Code, Cursor, Antigravity, Gemini CLI, etc.
|
|
134
154
|
def _eager_warmup() -> None:
|
|
135
|
-
"""Pre-warm engine
|
|
155
|
+
"""Pre-warm engine + ensure daemon is running (background thread)."""
|
|
136
156
|
import logging
|
|
137
157
|
_logger = logging.getLogger(__name__)
|
|
138
158
|
try:
|
|
@@ -141,6 +161,15 @@ def _eager_warmup() -> None:
|
|
|
141
161
|
except Exception as exc:
|
|
142
162
|
_logger.debug("MCP engine pre-warmup failed (non-fatal): %s", exc)
|
|
143
163
|
|
|
164
|
+
# V3.4.4: Also ensure daemon is running for dashboard/mesh/health features.
|
|
165
|
+
# This runs in background — doesn't block MCP tool registration.
|
|
166
|
+
try:
|
|
167
|
+
from superlocalmemory.cli.daemon import ensure_daemon
|
|
168
|
+
if ensure_daemon():
|
|
169
|
+
_logger.info("Daemon auto-started by MCP server")
|
|
170
|
+
except Exception as exc:
|
|
171
|
+
_logger.debug("Daemon auto-start failed (non-fatal): %s", exc)
|
|
172
|
+
|
|
144
173
|
import threading
|
|
145
174
|
_warmup_thread = threading.Thread(target=_eager_warmup, daemon=True, name="mcp-warmup")
|
|
146
175
|
_warmup_thread.start()
|