superlocalmemory 3.4.1 → 3.4.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/README.md +9 -12
  2. package/package.json +1 -1
  3. package/pyproject.toml +11 -2
  4. package/scripts/postinstall.js +26 -7
  5. package/src/superlocalmemory/cli/commands.py +71 -60
  6. package/src/superlocalmemory/cli/daemon.py +184 -64
  7. package/src/superlocalmemory/cli/main.py +25 -2
  8. package/src/superlocalmemory/cli/service_installer.py +367 -0
  9. package/src/superlocalmemory/cli/setup_wizard.py +150 -9
  10. package/src/superlocalmemory/core/config.py +28 -0
  11. package/src/superlocalmemory/core/consolidation_engine.py +38 -1
  12. package/src/superlocalmemory/core/engine.py +9 -0
  13. package/src/superlocalmemory/core/health_monitor.py +313 -0
  14. package/src/superlocalmemory/core/reranker_worker.py +19 -5
  15. package/src/superlocalmemory/ingestion/__init__.py +13 -0
  16. package/src/superlocalmemory/ingestion/adapter_manager.py +234 -0
  17. package/src/superlocalmemory/ingestion/base_adapter.py +177 -0
  18. package/src/superlocalmemory/ingestion/calendar_adapter.py +340 -0
  19. package/src/superlocalmemory/ingestion/credentials.py +118 -0
  20. package/src/superlocalmemory/ingestion/gmail_adapter.py +369 -0
  21. package/src/superlocalmemory/ingestion/parsers.py +100 -0
  22. package/src/superlocalmemory/ingestion/transcript_adapter.py +156 -0
  23. package/src/superlocalmemory/learning/consolidation_worker.py +47 -1
  24. package/src/superlocalmemory/learning/entity_compiler.py +377 -0
  25. package/src/superlocalmemory/mcp/server.py +32 -3
  26. package/src/superlocalmemory/mcp/tools_mesh.py +249 -0
  27. package/src/superlocalmemory/mesh/__init__.py +12 -0
  28. package/src/superlocalmemory/mesh/broker.py +344 -0
  29. package/src/superlocalmemory/retrieval/entity_channel.py +12 -6
  30. package/src/superlocalmemory/server/api.py +6 -7
  31. package/src/superlocalmemory/server/routes/adapters.py +63 -0
  32. package/src/superlocalmemory/server/routes/entity.py +151 -0
  33. package/src/superlocalmemory/server/routes/ingest.py +110 -0
  34. package/src/superlocalmemory/server/routes/mesh.py +186 -0
  35. package/src/superlocalmemory/server/unified_daemon.py +693 -0
  36. package/src/superlocalmemory/storage/schema_v343.py +229 -0
  37. package/src/superlocalmemory/ui/css/neural-glass.css +1588 -0
  38. package/src/superlocalmemory/ui/index.html +134 -4
  39. package/src/superlocalmemory/ui/js/memory-chat.js +28 -1
  40. package/src/superlocalmemory/ui/js/ng-entities.js +272 -0
  41. package/src/superlocalmemory/ui/js/ng-health.js +208 -0
  42. package/src/superlocalmemory/ui/js/ng-ingestion.js +203 -0
  43. package/src/superlocalmemory/ui/js/ng-mesh.js +311 -0
  44. package/src/superlocalmemory/ui/js/ng-shell.js +471 -0
  45. package/src/superlocalmemory.egg-info/PKG-INFO +18 -14
  46. package/src/superlocalmemory.egg-info/SOURCES.txt +26 -0
  47. package/src/superlocalmemory.egg-info/requires.txt +9 -1
@@ -116,6 +116,15 @@ class MemoryEngine:
116
116
 
117
117
  self._db = DatabaseManager(self._config.db_path)
118
118
  self._db.initialize(schema)
119
+
120
+ # V3.4.3: Apply "Unified Brain" schema extensions (mesh, entity compilation, ingestion)
121
+ # Idempotent — safe to call on every init. Skips if already applied.
122
+ try:
123
+ from superlocalmemory.storage.schema_v343 import apply_v343_schema
124
+ apply_v343_schema(str(self._db.db_path))
125
+ except Exception as exc:
126
+ logger.debug("V3.4.3 schema migration: %s", exc)
127
+
119
128
  self._embedder = init_embedder(self._config)
120
129
 
121
130
  if self._caps.llm_fact_extraction:
@@ -0,0 +1,313 @@
1
+ # Copyright (c) 2026 Varun Pratap Bhardwaj / Qualixar
2
+ # Licensed under the Elastic License 2.0 - see LICENSE file
3
+ # Part of SuperLocalMemory V3 | https://qualixar.com | https://varunpratap.com
4
+
5
+ """Enterprise-grade health monitoring for the SLM Unified Daemon.
6
+
7
+ Monitors:
8
+ - Global RSS budget (kill heaviest worker if over limit)
9
+ - Worker heartbeat (kill unresponsive workers after 60s)
10
+ - Structured JSON logging (daemon.json.log alongside text logs)
11
+ - Extensible health check registry (Phase C/D/E add checks)
12
+
13
+ Part of Qualixar | Author: Varun Pratap Bhardwaj
14
+ License: Elastic-2.0
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import json
20
+ import logging
21
+ import os
22
+ import threading
23
+ import time
24
+ from datetime import datetime, timezone
25
+ from pathlib import Path
26
+ from typing import Callable
27
+
28
+ logger = logging.getLogger("superlocalmemory.health_monitor")
29
+
30
+ # Try psutil — graceful fallback if not available
31
+ try:
32
+ import psutil
33
+ PSUTIL_AVAILABLE = True
34
+ except ImportError:
35
+ PSUTIL_AVAILABLE = False
36
+ logger.info("psutil not available — health monitoring limited")
37
+
38
+
39
+ # ---------------------------------------------------------------------------
40
+ # Health Check Registry (extensible by other phases)
41
+ # ---------------------------------------------------------------------------
42
+
43
+ _HEALTH_CHECKS: list[Callable[[], dict]] = []
44
+
45
+
46
+ def register_health_check(check_fn: Callable[[], dict]) -> None:
47
+ """Register a health check function. Returns dict with name, status, detail."""
48
+ _HEALTH_CHECKS.append(check_fn)
49
+
50
+
51
+ def run_all_health_checks() -> list[dict]:
52
+ """Run all registered health checks. Returns list of results."""
53
+ results = []
54
+ for check_fn in _HEALTH_CHECKS:
55
+ try:
56
+ results.append(check_fn())
57
+ except Exception as e:
58
+ results.append({
59
+ "name": getattr(check_fn, '__name__', 'unknown'),
60
+ "status": "error",
61
+ "detail": str(e),
62
+ })
63
+ return results
64
+
65
+
66
+ # ---------------------------------------------------------------------------
67
+ # Structured JSON Logger (additive — does NOT replace text logs)
68
+ # ---------------------------------------------------------------------------
69
+
70
+ _json_logger: logging.Logger | None = None
71
+
72
+
73
+ def setup_structured_logging(log_dir: Path | None = None) -> None:
74
+ """Set up JSON structured logging alongside existing text logs.
75
+
76
+ Creates a separate daemon.json.log file with RotatingFileHandler.
77
+ Text logs continue working unchanged.
78
+ """
79
+ global _json_logger
80
+
81
+ log_dir = log_dir or (Path.home() / ".superlocalmemory" / "logs")
82
+ log_dir.mkdir(parents=True, exist_ok=True)
83
+ json_log_path = log_dir / "daemon.json.log"
84
+
85
+ _json_logger = logging.getLogger("superlocalmemory.structured")
86
+ _json_logger.setLevel(logging.INFO)
87
+ _json_logger.propagate = False # Don't send to text handler
88
+
89
+ from logging.handlers import RotatingFileHandler
90
+ handler = RotatingFileHandler(
91
+ str(json_log_path), maxBytes=10 * 1024 * 1024, backupCount=5,
92
+ )
93
+ handler.setFormatter(logging.Formatter("%(message)s"))
94
+ _json_logger.addHandler(handler)
95
+
96
+
97
+ def log_structured(**fields) -> None:
98
+ """Emit a structured JSON log entry.
99
+
100
+ Always includes: timestamp, level. Caller provides the rest.
101
+ Example fields: worker_pid, memory_rss_mb, operation, latency_ms, message.
102
+ """
103
+ if _json_logger is None:
104
+ return
105
+ entry = {
106
+ "timestamp": datetime.now(timezone.utc).isoformat(),
107
+ "level": fields.pop("level", "info"),
108
+ **fields,
109
+ }
110
+ try:
111
+ _json_logger.info(json.dumps(entry, default=str))
112
+ except Exception:
113
+ pass
114
+
115
+
116
+ # ---------------------------------------------------------------------------
117
+ # Health Monitor Thread
118
+ # ---------------------------------------------------------------------------
119
+
120
+ class HealthMonitor:
121
+ """Background thread monitoring worker health, RSS budget, heartbeats.
122
+
123
+ Self-healing: if the monitor itself crashes, it logs and retries
124
+ with exponential backoff (max 5 min sleep).
125
+ """
126
+
127
+ # SLM worker command-line identifiers for child process filtering
128
+ _WORKER_IDENTIFIERS = (
129
+ "superlocalmemory.core.embedding_worker",
130
+ "superlocalmemory.core.reranker_worker",
131
+ "superlocalmemory.core.recall_worker",
132
+ )
133
+
134
+ def __init__(
135
+ self,
136
+ global_rss_budget_mb: int = 4096,
137
+ heartbeat_timeout_sec: int = 60,
138
+ check_interval_sec: int = 30,
139
+ enable_structured_logging: bool = True,
140
+ ):
141
+ self._budget_mb = global_rss_budget_mb
142
+ self._heartbeat_timeout = heartbeat_timeout_sec
143
+ self._interval = check_interval_sec
144
+ self._enable_logging = enable_structured_logging
145
+ self._thread: threading.Thread | None = None
146
+ self._stop_event = threading.Event()
147
+ self._consecutive_failures = 0
148
+
149
+ def start(self) -> None:
150
+ """Start the health monitor in a daemon thread."""
151
+ if not PSUTIL_AVAILABLE:
152
+ logger.warning("Health monitor disabled: psutil not installed")
153
+ return
154
+
155
+ if self._enable_logging:
156
+ setup_structured_logging()
157
+
158
+ self._thread = threading.Thread(
159
+ target=self._run_loop, daemon=True, name="health-monitor",
160
+ )
161
+ self._thread.start()
162
+ logger.info("Health monitor started (budget=%dMB, heartbeat=%ds)",
163
+ self._budget_mb, self._heartbeat_timeout)
164
+
165
+ # Register built-in health checks
166
+ register_health_check(self._check_daemon_health)
167
+ register_health_check(self._check_worker_health)
168
+ register_health_check(self._check_memory_budget)
169
+
170
+ def stop(self) -> None:
171
+ self._stop_event.set()
172
+
173
+ def _run_loop(self) -> None:
174
+ """Main monitoring loop with self-healing."""
175
+ while not self._stop_event.is_set():
176
+ try:
177
+ self._check_once()
178
+ self._consecutive_failures = 0
179
+ except Exception as exc:
180
+ self._consecutive_failures += 1
181
+ backoff = min(300, 30 * self._consecutive_failures)
182
+ logger.error("Health check failed (%d consecutive): %s. Backoff %ds.",
183
+ self._consecutive_failures, exc, backoff)
184
+ log_structured(
185
+ level="error", operation="health_check",
186
+ message=f"Health check failed: {exc}",
187
+ consecutive_failures=self._consecutive_failures,
188
+ )
189
+ self._stop_event.wait(backoff)
190
+ continue
191
+
192
+ self._stop_event.wait(self._interval)
193
+
194
+ def _check_once(self) -> None:
195
+ """Single health check cycle."""
196
+ proc = psutil.Process(os.getpid())
197
+ daemon_rss_mb = proc.memory_info().rss / (1024 * 1024)
198
+
199
+ # Find SLM worker children only (not adapters or other children)
200
+ children = proc.children(recursive=True)
201
+ slm_workers = []
202
+ for child in children:
203
+ try:
204
+ cmdline = " ".join(child.cmdline()).lower()
205
+ if any(ident in cmdline for ident in self._WORKER_IDENTIFIERS):
206
+ rss_mb = child.memory_info().rss / (1024 * 1024)
207
+ slm_workers.append({
208
+ "pid": child.pid,
209
+ "rss_mb": round(rss_mb, 1),
210
+ "cmdline": cmdline[:80],
211
+ })
212
+ except (psutil.NoSuchProcess, psutil.AccessDenied):
213
+ continue
214
+
215
+ total_rss_mb = daemon_rss_mb + sum(w["rss_mb"] for w in slm_workers)
216
+
217
+ # Structured log entry
218
+ log_structured(
219
+ level="info",
220
+ operation="health_check",
221
+ memory_rss_mb=round(daemon_rss_mb, 1),
222
+ total_rss_mb=round(total_rss_mb, 1),
223
+ worker_count=len(slm_workers),
224
+ workers=slm_workers,
225
+ budget_mb=self._budget_mb,
226
+ )
227
+
228
+ # RSS budget enforcement
229
+ if total_rss_mb > self._budget_mb and slm_workers:
230
+ heaviest = max(slm_workers, key=lambda w: w["rss_mb"])
231
+ logger.warning(
232
+ "RSS budget exceeded (%.0fMB > %dMB). Killing heaviest worker PID %d (%.0fMB)",
233
+ total_rss_mb, self._budget_mb, heaviest["pid"], heaviest["rss_mb"],
234
+ )
235
+ log_structured(
236
+ level="warning",
237
+ operation="rss_budget_kill",
238
+ killed_pid=heaviest["pid"],
239
+ killed_rss_mb=heaviest["rss_mb"],
240
+ total_rss_mb=round(total_rss_mb, 1),
241
+ )
242
+ try:
243
+ psutil.Process(heaviest["pid"]).terminate()
244
+ except psutil.NoSuchProcess:
245
+ pass
246
+
247
+ # Heartbeat checks delegated to WorkerPool (Phase B wiring)
248
+ # WorkerPool tracks last_heartbeat per worker. HealthMonitor
249
+ # reads it here. Actual heartbeat protocol is in worker_pool.py.
250
+ try:
251
+ from superlocalmemory.core.worker_pool import WorkerPool
252
+ pool = WorkerPool.shared()
253
+ last_hb = getattr(pool, '_last_heartbeat', {})
254
+ now = time.monotonic()
255
+ for wpid, last_time in list(last_hb.items()):
256
+ if now - last_time > self._heartbeat_timeout:
257
+ logger.warning("Worker PID %d unresponsive (no heartbeat for %ds). Killing.",
258
+ wpid, int(now - last_time))
259
+ log_structured(
260
+ level="warning",
261
+ operation="heartbeat_kill",
262
+ worker_pid=wpid,
263
+ seconds_since_heartbeat=round(now - last_time),
264
+ )
265
+ try:
266
+ psutil.Process(wpid).terminate()
267
+ except psutil.NoSuchProcess:
268
+ pass
269
+ del last_hb[wpid]
270
+ except Exception:
271
+ pass # WorkerPool not initialized yet — fine
272
+
273
+ # -- Built-in health checks for registry --
274
+
275
+ def _check_daemon_health(self) -> dict:
276
+ if not PSUTIL_AVAILABLE:
277
+ return {"name": "daemon", "status": "unknown", "detail": "psutil unavailable"}
278
+ proc = psutil.Process(os.getpid())
279
+ rss_mb = proc.memory_info().rss / (1024 * 1024)
280
+ return {
281
+ "name": "daemon",
282
+ "status": "ok" if rss_mb < 500 else "warning",
283
+ "detail": f"PID {os.getpid()}, RSS {rss_mb:.0f}MB",
284
+ }
285
+
286
+ def _check_worker_health(self) -> dict:
287
+ try:
288
+ from superlocalmemory.core.worker_pool import WorkerPool
289
+ pool = WorkerPool.shared()
290
+ wpid = pool.worker_pid
291
+ if wpid:
292
+ return {"name": "workers", "status": "ok", "detail": f"Worker PID {wpid}"}
293
+ return {"name": "workers", "status": "warning", "detail": "No active worker"}
294
+ except Exception as e:
295
+ return {"name": "workers", "status": "error", "detail": str(e)}
296
+
297
+ def _check_memory_budget(self) -> dict:
298
+ if not PSUTIL_AVAILABLE:
299
+ return {"name": "memory", "status": "unknown", "detail": "psutil unavailable"}
300
+ proc = psutil.Process(os.getpid())
301
+ total_rss = proc.memory_info().rss
302
+ for child in proc.children(recursive=True):
303
+ try:
304
+ total_rss += child.memory_info().rss
305
+ except (psutil.NoSuchProcess, psutil.AccessDenied):
306
+ pass
307
+ total_mb = total_rss / (1024 * 1024)
308
+ status = "ok" if total_mb < self._budget_mb else "critical"
309
+ return {
310
+ "name": "memory",
311
+ "status": status,
312
+ "detail": f"{total_mb:.0f}MB / {self._budget_mb}MB budget",
313
+ }
@@ -73,15 +73,29 @@ def _start_parent_watchdog() -> None:
73
73
  t.start()
74
74
 
75
75
 
76
- def _detect_onnx_variant() -> str:
77
- """Auto-detect the best ONNX model variant for the current platform."""
76
+ def _detect_onnx_variant(model_name: str = "") -> str:
77
+ """Auto-detect the best ONNX model variant for the current platform.
78
+
79
+ V3.4.2: Supports both legacy ms-marco-MiniLM (platform-specific quantized)
80
+ and new gte-modernbert-base (int8/uint8 quantized). Falls back to generic
81
+ model.onnx if platform-specific variant unavailable.
82
+ """
78
83
  arch = platform.machine().lower()
79
84
  is_64bit = struct.calcsize("P") * 8 == 64
80
85
 
86
+ # Legacy ms-marco-MiniLM models have platform-specific quantized variants
87
+ if "ms-marco" in model_name or "MiniLM" in model_name:
88
+ if sys.platform == "darwin" and arch in ("arm64", "aarch64"):
89
+ return "onnx/model_qint8_arm64.onnx"
90
+ if arch in ("x86_64", "amd64") and is_64bit:
91
+ return "onnx/model_quint8_avx2.onnx"
92
+ return "onnx/model.onnx"
93
+
94
+ # gte-modernbert-base and other modern models: int8 for ARM64, uint8 for x86
81
95
  if sys.platform == "darwin" and arch in ("arm64", "aarch64"):
82
- return "onnx/model_qint8_arm64.onnx"
96
+ return "onnx/model_int8.onnx"
83
97
  if arch in ("x86_64", "amd64") and is_64bit:
84
- return "onnx/model_quint8_avx2.onnx"
98
+ return "onnx/model_uint8.onnx"
85
99
  return "onnx/model.onnx"
86
100
 
87
101
 
@@ -239,7 +253,7 @@ def _load_model(
239
253
  if backend == "onnx":
240
254
  # Tier 1: Platform-specific quantized ONNX (fastest)
241
255
  try:
242
- onnx_file = _detect_onnx_variant()
256
+ onnx_file = _detect_onnx_variant(name)
243
257
  m = CrossEncoder(
244
258
  name, backend="onnx",
245
259
  model_kwargs={"file_name": onnx_file},
@@ -0,0 +1,13 @@
1
+ # Copyright (c) 2026 Varun Pratap Bhardwaj / Qualixar
2
+ # Licensed under the Elastic License 2.0 - see LICENSE file
3
+ # Part of SuperLocalMemory V3 | https://qualixar.com | https://varunpratap.com
4
+
5
+ """SLM Ingestion — external source adapters for Gmail, Calendar, Transcripts.
6
+
7
+ ALL adapters are OPT-IN. Nothing runs by default. User enables via:
8
+ slm adapters enable gmail
9
+ slm adapters enable calendar
10
+ slm adapters enable transcript
11
+
12
+ Adapters are stateless external processes that POST to the daemon's /ingest endpoint.
13
+ """
@@ -0,0 +1,234 @@
1
+ # Copyright (c) 2026 Varun Pratap Bhardwaj / Qualixar
2
+ # Licensed under the Elastic License 2.0 - see LICENSE file
3
+ # Part of SuperLocalMemory V3 | https://qualixar.com | https://varunpratap.com
4
+
5
+ """Adapter lifecycle manager — start, stop, enable, disable ingestion adapters.
6
+
7
+ All adapters run as separate subprocesses managed via PID files.
8
+ Config stored in ~/.superlocalmemory/adapters.json.
9
+
10
+ Part of Qualixar | Author: Varun Pratap Bhardwaj
11
+ License: Elastic-2.0
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ import json
17
+ import logging
18
+ import os
19
+ import subprocess
20
+ import sys
21
+ from pathlib import Path
22
+
23
+ logger = logging.getLogger("superlocalmemory.ingestion.manager")
24
+
25
+ _SLM_HOME = Path.home() / ".superlocalmemory"
26
+ _ADAPTERS_CONFIG = _SLM_HOME / "adapters.json"
27
+ _VALID_ADAPTERS = ("gmail", "calendar", "transcript")
28
+
29
+ # Module paths for each adapter
30
+ _ADAPTER_MODULES = {
31
+ "gmail": "superlocalmemory.ingestion.gmail_adapter",
32
+ "calendar": "superlocalmemory.ingestion.calendar_adapter",
33
+ "transcript": "superlocalmemory.ingestion.transcript_adapter",
34
+ }
35
+
36
+
37
+ def _load_config() -> dict:
38
+ if _ADAPTERS_CONFIG.exists():
39
+ return json.loads(_ADAPTERS_CONFIG.read_text())
40
+ return {name: {"enabled": False} for name in _VALID_ADAPTERS}
41
+
42
+
43
+ def _save_config(config: dict) -> None:
44
+ _ADAPTERS_CONFIG.parent.mkdir(parents=True, exist_ok=True)
45
+ _ADAPTERS_CONFIG.write_text(json.dumps(config, indent=2))
46
+
47
+
48
+ def _pid_file(name: str) -> Path:
49
+ return _SLM_HOME / f"adapter-{name}.pid"
50
+
51
+
52
+ def _is_running(name: str) -> tuple[bool, int | None]:
53
+ """Check if adapter is running. Returns (running, pid)."""
54
+ pf = _pid_file(name)
55
+ if not pf.exists():
56
+ return False, None
57
+ try:
58
+ pid = int(pf.read_text().strip())
59
+ try:
60
+ import psutil
61
+ return psutil.pid_exists(pid), pid
62
+ except ImportError:
63
+ os.kill(pid, 0)
64
+ return True, pid
65
+ except (ValueError, ProcessLookupError, PermissionError):
66
+ pf.unlink(missing_ok=True)
67
+ return False, None
68
+
69
+
70
+ # ---------------------------------------------------------------------------
71
+ # Public API
72
+ # ---------------------------------------------------------------------------
73
+
74
+ def list_adapters() -> list[dict]:
75
+ """List all adapters with their status."""
76
+ config = _load_config()
77
+ result = []
78
+ for name in _VALID_ADAPTERS:
79
+ ac = config.get(name, {})
80
+ running, pid = _is_running(name)
81
+ result.append({
82
+ "name": name,
83
+ "enabled": ac.get("enabled", False),
84
+ "running": running,
85
+ "pid": pid,
86
+ "tier": ac.get("tier", ""),
87
+ "watch_dir": ac.get("watch_dir", ""),
88
+ })
89
+ return result
90
+
91
+
92
+ def enable_adapter(name: str) -> dict:
93
+ """Enable an adapter in config."""
94
+ if name not in _VALID_ADAPTERS:
95
+ return {"ok": False, "error": f"Unknown adapter: {name}. Valid: {_VALID_ADAPTERS}"}
96
+ config = _load_config()
97
+ config.setdefault(name, {})["enabled"] = True
98
+ _save_config(config)
99
+ return {"ok": True, "message": f"{name} adapter enabled. Run `slm adapters start {name}` to start."}
100
+
101
+
102
+ def disable_adapter(name: str) -> dict:
103
+ """Disable an adapter. Stops it if running."""
104
+ if name not in _VALID_ADAPTERS:
105
+ return {"ok": False, "error": f"Unknown adapter: {name}"}
106
+ stop_adapter(name)
107
+ config = _load_config()
108
+ config.setdefault(name, {})["enabled"] = False
109
+ _save_config(config)
110
+ return {"ok": True, "message": f"{name} adapter disabled"}
111
+
112
+
113
+ def start_adapter(name: str) -> dict:
114
+ """Start an adapter subprocess."""
115
+ if name not in _VALID_ADAPTERS:
116
+ return {"ok": False, "error": f"Unknown adapter: {name}"}
117
+
118
+ config = _load_config()
119
+ if not config.get(name, {}).get("enabled"):
120
+ return {"ok": False, "error": f"{name} not enabled. Run `slm adapters enable {name}` first."}
121
+
122
+ running, pid = _is_running(name)
123
+ if running:
124
+ return {"ok": True, "message": f"{name} already running (PID {pid})"}
125
+
126
+ module = _ADAPTER_MODULES.get(name)
127
+ if not module:
128
+ return {"ok": False, "error": f"No module for {name}"}
129
+
130
+ cmd = [sys.executable, "-m", module]
131
+ log_dir = _SLM_HOME / "logs"
132
+ log_dir.mkdir(parents=True, exist_ok=True)
133
+ log_path = log_dir / f"adapter-{name}.log"
134
+
135
+ kwargs: dict = {}
136
+ if sys.platform == "win32":
137
+ kwargs["creationflags"] = subprocess.CREATE_NO_WINDOW
138
+ else:
139
+ kwargs["start_new_session"] = True
140
+
141
+ with open(log_path, "a") as lf:
142
+ proc = subprocess.Popen(cmd, stdout=lf, stderr=lf, **kwargs)
143
+
144
+ _pid_file(name).write_text(str(proc.pid))
145
+ return {"ok": True, "message": f"{name} started (PID {proc.pid})", "pid": proc.pid}
146
+
147
+
148
+ def stop_adapter(name: str) -> dict:
149
+ """Stop a running adapter."""
150
+ running, pid = _is_running(name)
151
+ if not running:
152
+ return {"ok": True, "message": f"{name} not running"}
153
+
154
+ try:
155
+ import psutil
156
+ proc = psutil.Process(pid)
157
+ proc.terminate()
158
+ proc.wait(timeout=10)
159
+ except ImportError:
160
+ os.kill(pid, 15) # SIGTERM
161
+ except Exception:
162
+ pass
163
+
164
+ _pid_file(name).unlink(missing_ok=True)
165
+ return {"ok": True, "message": f"{name} stopped"}
166
+
167
+
168
+ def status_adapters() -> list[dict]:
169
+ """Get detailed status of all adapters."""
170
+ return list_adapters()
171
+
172
+
173
+ # ---------------------------------------------------------------------------
174
+ # CLI handler (called from commands.py)
175
+ # ---------------------------------------------------------------------------
176
+
177
+ def handle_adapters_cli(args: list[str]) -> None:
178
+ """Handle `slm adapters <action> [name]` commands."""
179
+ if not args:
180
+ args = ["list"]
181
+
182
+ action = args[0]
183
+ name = args[1] if len(args) > 1 else ""
184
+
185
+ if action == "list":
186
+ adapters = list_adapters()
187
+ print(" Ingestion Adapters:")
188
+ print(" " + "-" * 50)
189
+ for a in adapters:
190
+ status = "running" if a["running"] else ("enabled" if a["enabled"] else "disabled")
191
+ pid_str = f" (PID {a['pid']})" if a["pid"] else ""
192
+ print(f" {a['name']:12s} {status:10s}{pid_str}")
193
+ print()
194
+
195
+ elif action == "enable":
196
+ if not name:
197
+ print(" Usage: slm adapters enable <gmail|calendar|transcript>")
198
+ return
199
+ result = enable_adapter(name)
200
+ print(f" {result.get('message', result.get('error', ''))}")
201
+
202
+ elif action == "disable":
203
+ if not name:
204
+ print(" Usage: slm adapters disable <name>")
205
+ return
206
+ result = disable_adapter(name)
207
+ print(f" {result.get('message', result.get('error', ''))}")
208
+
209
+ elif action == "start":
210
+ if not name:
211
+ print(" Usage: slm adapters start <name>")
212
+ return
213
+ result = start_adapter(name)
214
+ print(f" {result.get('message', result.get('error', ''))}")
215
+
216
+ elif action == "stop":
217
+ if not name:
218
+ print(" Usage: slm adapters stop <name>")
219
+ return
220
+ result = stop_adapter(name)
221
+ print(f" {result.get('message', result.get('error', ''))}")
222
+
223
+ elif action == "status":
224
+ adapters = status_adapters()
225
+ for a in adapters:
226
+ status = "RUNNING" if a["running"] else ("enabled" if a["enabled"] else "off")
227
+ print(f" {a['name']:12s} [{status}]", end="")
228
+ if a["pid"]:
229
+ print(f" PID={a['pid']}", end="")
230
+ print()
231
+
232
+ else:
233
+ print(f" Unknown action: {action}")
234
+ print(" Usage: slm adapters <list|enable|disable|start|stop|status> [name]")