superlocalmemory 3.4.1 → 3.4.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/README.md +9 -12
  2. package/package.json +1 -1
  3. package/pyproject.toml +11 -2
  4. package/scripts/postinstall.js +26 -7
  5. package/src/superlocalmemory/cli/commands.py +71 -60
  6. package/src/superlocalmemory/cli/daemon.py +184 -64
  7. package/src/superlocalmemory/cli/main.py +25 -2
  8. package/src/superlocalmemory/cli/service_installer.py +367 -0
  9. package/src/superlocalmemory/cli/setup_wizard.py +150 -9
  10. package/src/superlocalmemory/core/config.py +28 -0
  11. package/src/superlocalmemory/core/consolidation_engine.py +38 -1
  12. package/src/superlocalmemory/core/engine.py +9 -0
  13. package/src/superlocalmemory/core/health_monitor.py +313 -0
  14. package/src/superlocalmemory/core/reranker_worker.py +19 -5
  15. package/src/superlocalmemory/ingestion/__init__.py +13 -0
  16. package/src/superlocalmemory/ingestion/adapter_manager.py +234 -0
  17. package/src/superlocalmemory/ingestion/base_adapter.py +177 -0
  18. package/src/superlocalmemory/ingestion/calendar_adapter.py +340 -0
  19. package/src/superlocalmemory/ingestion/credentials.py +118 -0
  20. package/src/superlocalmemory/ingestion/gmail_adapter.py +369 -0
  21. package/src/superlocalmemory/ingestion/parsers.py +100 -0
  22. package/src/superlocalmemory/ingestion/transcript_adapter.py +156 -0
  23. package/src/superlocalmemory/learning/consolidation_worker.py +47 -1
  24. package/src/superlocalmemory/learning/entity_compiler.py +377 -0
  25. package/src/superlocalmemory/mcp/server.py +32 -3
  26. package/src/superlocalmemory/mcp/tools_mesh.py +249 -0
  27. package/src/superlocalmemory/mesh/__init__.py +12 -0
  28. package/src/superlocalmemory/mesh/broker.py +344 -0
  29. package/src/superlocalmemory/retrieval/entity_channel.py +12 -6
  30. package/src/superlocalmemory/server/api.py +6 -7
  31. package/src/superlocalmemory/server/routes/adapters.py +63 -0
  32. package/src/superlocalmemory/server/routes/entity.py +151 -0
  33. package/src/superlocalmemory/server/routes/ingest.py +110 -0
  34. package/src/superlocalmemory/server/routes/mesh.py +186 -0
  35. package/src/superlocalmemory/server/unified_daemon.py +693 -0
  36. package/src/superlocalmemory/storage/schema_v343.py +229 -0
  37. package/src/superlocalmemory/ui/css/neural-glass.css +1588 -0
  38. package/src/superlocalmemory/ui/index.html +134 -4
  39. package/src/superlocalmemory/ui/js/memory-chat.js +28 -1
  40. package/src/superlocalmemory/ui/js/ng-entities.js +272 -0
  41. package/src/superlocalmemory/ui/js/ng-health.js +208 -0
  42. package/src/superlocalmemory/ui/js/ng-ingestion.js +203 -0
  43. package/src/superlocalmemory/ui/js/ng-mesh.js +311 -0
  44. package/src/superlocalmemory/ui/js/ng-shell.js +471 -0
  45. package/src/superlocalmemory.egg-info/PKG-INFO +18 -14
  46. package/src/superlocalmemory.egg-info/SOURCES.txt +26 -0
  47. package/src/superlocalmemory.egg-info/requires.txt +9 -1
@@ -0,0 +1,367 @@
1
+ # Copyright (c) 2026 Varun Pratap Bhardwaj / Qualixar
2
+ # Licensed under the Elastic License 2.0 - see LICENSE file
3
+ # Part of SuperLocalMemory V3 | https://qualixar.com | https://varunpratap.com
4
+
5
+ """OS-level service installer — daemon survives reboots.
6
+
7
+ Cross-platform:
8
+ - macOS: LaunchAgent plist (user-level, no sudo)
9
+ - Linux: systemd user service (no sudo)
10
+ - Windows: Task Scheduler (runs at logon)
11
+
12
+ Usage:
13
+ slm serve install — install OS service
14
+ slm serve uninstall — remove OS service
15
+ slm serve status — show daemon + service status
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ import logging
21
+ import os
22
+ import shutil
23
+ import subprocess
24
+ import sys
25
+ from pathlib import Path
26
+
27
+ logger = logging.getLogger(__name__)
28
+
29
+ _SERVICE_NAME = "com.qualixar.superlocalmemory"
30
+ _DISPLAY_NAME = "SuperLocalMemory Daemon"
31
+
32
+
33
+ def get_python_path() -> str:
34
+ """Get the full path to the Python interpreter running SLM."""
35
+ return sys.executable
36
+
37
+
38
+ def get_log_path() -> Path:
39
+ log_dir = Path.home() / ".superlocalmemory" / "logs"
40
+ log_dir.mkdir(parents=True, exist_ok=True)
41
+ return log_dir / "daemon.log"
42
+
43
+
44
+ def get_error_log_path() -> Path:
45
+ log_dir = Path.home() / ".superlocalmemory" / "logs"
46
+ log_dir.mkdir(parents=True, exist_ok=True)
47
+ return log_dir / "daemon-error.log"
48
+
49
+
50
+ # ─── macOS: LaunchAgent ───────────────────────────────────────────────────
51
+
52
+ def _macos_plist_path() -> Path:
53
+ return Path.home() / "Library" / "LaunchAgents" / f"{_SERVICE_NAME}.plist"
54
+
55
+
56
+ def _macos_plist_content() -> str:
57
+ python = get_python_path()
58
+ log = get_log_path()
59
+ err_log = get_error_log_path()
60
+
61
+ return f"""<?xml version="1.0" encoding="UTF-8"?>
62
+ <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN"
63
+ "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
64
+ <plist version="1.0">
65
+ <dict>
66
+ <key>Label</key>
67
+ <string>{_SERVICE_NAME}</string>
68
+ <key>ProgramArguments</key>
69
+ <array>
70
+ <string>{python}</string>
71
+ <string>-m</string>
72
+ <string>superlocalmemory.server.unified_daemon</string>
73
+ <string>--start</string>
74
+ </array>
75
+ <key>RunAtLoad</key>
76
+ <true/>
77
+ <key>KeepAlive</key>
78
+ <dict>
79
+ <key>SuccessfulExit</key>
80
+ <false/>
81
+ </dict>
82
+ <key>ThrottleInterval</key>
83
+ <integer>30</integer>
84
+ <key>StandardOutPath</key>
85
+ <string>{log}</string>
86
+ <key>StandardErrorPath</key>
87
+ <string>{err_log}</string>
88
+ <key>EnvironmentVariables</key>
89
+ <dict>
90
+ <key>PATH</key>
91
+ <string>/usr/local/bin:/usr/bin:/bin:{Path(python).parent}</string>
92
+ <key>HOME</key>
93
+ <string>{Path.home()}</string>
94
+ </dict>
95
+ </dict>
96
+ </plist>
97
+ """
98
+
99
+
100
+ def install_macos() -> bool:
101
+ plist = _macos_plist_path()
102
+ plist.parent.mkdir(parents=True, exist_ok=True)
103
+ plist.write_text(_macos_plist_content())
104
+ logger.info("Wrote LaunchAgent plist: %s", plist)
105
+
106
+ # Load the service
107
+ try:
108
+ subprocess.run(
109
+ ["launchctl", "unload", str(plist)],
110
+ capture_output=True, timeout=10,
111
+ )
112
+ except Exception:
113
+ pass
114
+
115
+ result = subprocess.run(
116
+ ["launchctl", "load", str(plist)],
117
+ capture_output=True, text=True, timeout=10,
118
+ )
119
+ if result.returncode == 0:
120
+ logger.info("LaunchAgent loaded successfully")
121
+ return True
122
+ else:
123
+ logger.error("launchctl load failed: %s", result.stderr)
124
+ return False
125
+
126
+
127
+ def uninstall_macos() -> bool:
128
+ plist = _macos_plist_path()
129
+ if plist.exists():
130
+ try:
131
+ subprocess.run(
132
+ ["launchctl", "unload", str(plist)],
133
+ capture_output=True, timeout=10,
134
+ )
135
+ except Exception:
136
+ pass
137
+ plist.unlink()
138
+ logger.info("LaunchAgent removed: %s", plist)
139
+ return True
140
+
141
+
142
+ def status_macos() -> dict:
143
+ result = subprocess.run(
144
+ ["launchctl", "list", _SERVICE_NAME],
145
+ capture_output=True, text=True, timeout=10,
146
+ )
147
+ installed = result.returncode == 0
148
+ return {
149
+ "platform": "macOS",
150
+ "service_type": "LaunchAgent",
151
+ "installed": installed,
152
+ "plist_path": str(_macos_plist_path()),
153
+ "details": result.stdout.strip() if installed else "Not installed",
154
+ }
155
+
156
+
157
+ # ─── Linux: systemd user service ──────────────────────────────────────────
158
+
159
+ def _linux_service_path() -> Path:
160
+ return Path.home() / ".config" / "systemd" / "user" / "superlocalmemory.service"
161
+
162
+
163
+ def _linux_service_content() -> str:
164
+ python = get_python_path()
165
+ log = get_log_path()
166
+
167
+ return f"""[Unit]
168
+ Description={_DISPLAY_NAME}
169
+ After=network.target
170
+
171
+ [Service]
172
+ Type=simple
173
+ ExecStart={python} -m superlocalmemory.server.unified_daemon --start
174
+ Restart=on-failure
175
+ RestartSec=30
176
+ StandardOutput=append:{log}
177
+ StandardError=append:{get_error_log_path()}
178
+ Environment=HOME={Path.home()}
179
+ Environment=PATH=/usr/local/bin:/usr/bin:/bin:{Path(python).parent}
180
+
181
+ [Install]
182
+ WantedBy=default.target
183
+ """
184
+
185
+
186
+ def install_linux() -> bool:
187
+ service = _linux_service_path()
188
+ service.parent.mkdir(parents=True, exist_ok=True)
189
+ service.write_text(_linux_service_content())
190
+ logger.info("Wrote systemd user service: %s", service)
191
+
192
+ try:
193
+ subprocess.run(["systemctl", "--user", "daemon-reload"], capture_output=True, timeout=10)
194
+ subprocess.run(["systemctl", "--user", "enable", "superlocalmemory"], capture_output=True, timeout=10)
195
+ result = subprocess.run(
196
+ ["systemctl", "--user", "start", "superlocalmemory"],
197
+ capture_output=True, text=True, timeout=10,
198
+ )
199
+ if result.returncode == 0:
200
+ logger.info("systemd user service started")
201
+ return True
202
+ else:
203
+ logger.error("systemctl start failed: %s", result.stderr)
204
+ return False
205
+ except FileNotFoundError:
206
+ logger.warning("systemctl not found — systemd not available on this system")
207
+ return False
208
+
209
+
210
+ def uninstall_linux() -> bool:
211
+ try:
212
+ subprocess.run(["systemctl", "--user", "stop", "superlocalmemory"], capture_output=True, timeout=10)
213
+ subprocess.run(["systemctl", "--user", "disable", "superlocalmemory"], capture_output=True, timeout=10)
214
+ except Exception:
215
+ pass
216
+ service = _linux_service_path()
217
+ if service.exists():
218
+ service.unlink()
219
+ logger.info("systemd user service removed: %s", service)
220
+ try:
221
+ subprocess.run(["systemctl", "--user", "daemon-reload"], capture_output=True, timeout=10)
222
+ except Exception:
223
+ pass
224
+ return True
225
+
226
+
227
+ def status_linux() -> dict:
228
+ try:
229
+ result = subprocess.run(
230
+ ["systemctl", "--user", "is-active", "superlocalmemory"],
231
+ capture_output=True, text=True, timeout=10,
232
+ )
233
+ active = result.stdout.strip() == "active"
234
+ return {
235
+ "platform": "Linux",
236
+ "service_type": "systemd user service",
237
+ "installed": True,
238
+ "active": active,
239
+ "details": result.stdout.strip(),
240
+ }
241
+ except Exception:
242
+ return {"platform": "Linux", "service_type": "systemd", "installed": False}
243
+
244
+
245
+ # ─── Windows: Task Scheduler ─────────────────────────────────────────────
246
+
247
+ _WINDOWS_TASK_NAME = "SuperLocalMemory"
248
+
249
+
250
+ def install_windows() -> bool:
251
+ python = get_python_path()
252
+ log = get_log_path()
253
+
254
+ # Create a VBS wrapper to run Python without console window
255
+ vbs_path = Path.home() / ".superlocalmemory" / "start-daemon.vbs"
256
+ vbs_path.parent.mkdir(parents=True, exist_ok=True)
257
+ vbs_content = (
258
+ f'Set WshShell = CreateObject("WScript.Shell")\n'
259
+ f'WshShell.Run """{python}"" -m superlocalmemory.server.unified_daemon --start", 0, False\n'
260
+ )
261
+ vbs_path.write_text(vbs_content)
262
+
263
+ # Use schtasks to create a logon trigger task
264
+ try:
265
+ # Remove existing task if any
266
+ subprocess.run(
267
+ ["schtasks", "/Delete", "/TN", _WINDOWS_TASK_NAME, "/F"],
268
+ capture_output=True, timeout=10,
269
+ )
270
+ except Exception:
271
+ pass
272
+
273
+ try:
274
+ result = subprocess.run(
275
+ [
276
+ "schtasks", "/Create",
277
+ "/TN", _WINDOWS_TASK_NAME,
278
+ "/TR", f'wscript.exe "{vbs_path}"',
279
+ "/SC", "ONLOGON",
280
+ "/RL", "LIMITED",
281
+ "/F",
282
+ ],
283
+ capture_output=True, text=True, timeout=10,
284
+ )
285
+ if result.returncode == 0:
286
+ logger.info("Windows Task Scheduler task created: %s", _WINDOWS_TASK_NAME)
287
+ return True
288
+ else:
289
+ logger.error("schtasks create failed: %s", result.stderr)
290
+ return False
291
+ except FileNotFoundError:
292
+ logger.warning("schtasks not found — not a standard Windows system")
293
+ return False
294
+
295
+
296
+ def uninstall_windows() -> bool:
297
+ try:
298
+ subprocess.run(
299
+ ["schtasks", "/Delete", "/TN", _WINDOWS_TASK_NAME, "/F"],
300
+ capture_output=True, timeout=10,
301
+ )
302
+ logger.info("Windows Task Scheduler task removed")
303
+ except Exception:
304
+ pass
305
+
306
+ vbs_path = Path.home() / ".superlocalmemory" / "start-daemon.vbs"
307
+ if vbs_path.exists():
308
+ vbs_path.unlink()
309
+
310
+ return True
311
+
312
+
313
+ def status_windows() -> dict:
314
+ try:
315
+ result = subprocess.run(
316
+ ["schtasks", "/Query", "/TN", _WINDOWS_TASK_NAME, "/FO", "LIST"],
317
+ capture_output=True, text=True, timeout=10,
318
+ )
319
+ installed = result.returncode == 0
320
+ return {
321
+ "platform": "Windows",
322
+ "service_type": "Task Scheduler",
323
+ "installed": installed,
324
+ "task_name": _WINDOWS_TASK_NAME,
325
+ "details": result.stdout.strip()[:200] if installed else "Not installed",
326
+ }
327
+ except Exception:
328
+ return {"platform": "Windows", "service_type": "Task Scheduler", "installed": False}
329
+
330
+
331
+ # ─── Cross-platform dispatcher ───────────────────────────────────────────
332
+
333
+ def install_service() -> bool:
334
+ """Install OS-level service for auto-start on login/boot."""
335
+ if sys.platform == "darwin":
336
+ return install_macos()
337
+ elif sys.platform == "win32":
338
+ return install_windows()
339
+ elif sys.platform.startswith("linux"):
340
+ return install_linux()
341
+ else:
342
+ logger.warning("Unsupported platform: %s", sys.platform)
343
+ return False
344
+
345
+
346
+ def uninstall_service() -> bool:
347
+ """Remove OS-level service."""
348
+ if sys.platform == "darwin":
349
+ return uninstall_macos()
350
+ elif sys.platform == "win32":
351
+ return uninstall_windows()
352
+ elif sys.platform.startswith("linux"):
353
+ return uninstall_linux()
354
+ else:
355
+ return False
356
+
357
+
358
+ def service_status() -> dict:
359
+ """Get OS-level service status."""
360
+ if sys.platform == "darwin":
361
+ return status_macos()
362
+ elif sys.platform == "win32":
363
+ return status_windows()
364
+ elif sys.platform.startswith("linux"):
365
+ return status_linux()
366
+ else:
367
+ return {"platform": sys.platform, "installed": False, "details": "Unsupported platform"}
@@ -259,7 +259,7 @@ def run_wizard(auto: bool = False) -> None:
259
259
 
260
260
  print()
261
261
  print("╔══════════════════════════════════════════════════════════╗")
262
- print("║ SuperLocalMemory V3 — Setup Wizard ║")
262
+ print("║ SuperLocalMemory V3 — The Unified Brain ║")
263
263
  print("║ by Varun Pratap Bhardwaj / Qualixar ║")
264
264
  print("╚══════════════════════════════════════════════════════════╝")
265
265
  print()
@@ -373,9 +373,9 @@ def run_wizard(auto: bool = False) -> None:
373
373
  else:
374
374
  print(f"\n ✓ CodeGraph disabled (enable later in {cg_config_path})")
375
375
 
376
- # -- Step 4: Download embedding model --
376
+ # -- Step 4: Download models --
377
377
  print()
378
- print("─── Step 4/6: Download Embedding Model ───")
378
+ print("─── Step 4/9: Download Embedding Model ───")
379
379
 
380
380
  if not st_ok:
381
381
  print(" ⚠ Skipped (sentence-transformers not installed)")
@@ -385,18 +385,129 @@ def run_wizard(auto: bool = False) -> None:
385
385
  if not embed_ok:
386
386
  print(" ⚠ Model will download on first use (may take a few minutes)")
387
387
 
388
- # -- Step 4: Download reranker model --
389
388
  print()
390
- print("─── Step 5/6: Download Reranker Model ───")
389
+ print("─── Step 4b/9: Download Reranker Model ───")
391
390
 
392
391
  if not st_ok:
393
392
  print(" ⚠ Skipped (sentence-transformers not installed)")
394
393
  else:
395
394
  _download_reranker(_RERANKER_MODEL)
396
395
 
397
- # -- Step 5: Verification --
396
+ # -- Step 5: Daemon Configuration (v3.4.3) --
397
+ print()
398
+ print("─── Step 5/9: Daemon Configuration ───")
399
+ print()
400
+ print(" The SLM daemon runs in the background for instant memory access.")
401
+ print()
402
+ print(" [1] 24/7 Always-On (recommended — brain never sleeps)")
403
+ print(" [2] Auto-shutdown after idle (saves RAM when not coding)")
398
404
  print()
399
- print("─── Step 6/6: Verification ───")
405
+
406
+ if interactive:
407
+ daemon_choice = _prompt(" Select daemon mode [1/2] (default: 1): ", "1")
408
+ else:
409
+ daemon_choice = "1"
410
+ print(" Auto-selecting 24/7 mode (non-interactive)")
411
+
412
+ if daemon_choice == "2":
413
+ if interactive:
414
+ timeout_choice = _prompt(" Idle timeout [30m/1h/2h] (default: 30m): ", "30m")
415
+ else:
416
+ timeout_choice = "30m"
417
+ timeout_map = {"30m": 1800, "1h": 3600, "2h": 7200}
418
+ config.daemon_idle_timeout = timeout_map.get(timeout_choice, 1800)
419
+ print(f"\n ✓ Auto-shutdown after {timeout_choice} idle")
420
+ else:
421
+ config.daemon_idle_timeout = 0
422
+ print("\n ✓ 24/7 Always-On mode")
423
+
424
+ config.save()
425
+
426
+ # -- Step 6: Mesh Communication (v3.4.3) --
427
+ print()
428
+ print("─── Step 6/9: Mesh Communication ───")
429
+ print()
430
+ print(" SLM Mesh enables agent-to-agent P2P communication.")
431
+ print(" Multiple AI sessions can share knowledge in real-time.")
432
+ print()
433
+ print(" [Y] Enable Mesh (recommended)")
434
+ print(" [N] Disable Mesh")
435
+ print()
436
+
437
+ if interactive:
438
+ mesh_choice = _prompt(" Enable Mesh? [Y/n] (default: Y): ", "y").lower()
439
+ else:
440
+ mesh_choice = "y"
441
+ print(" Auto-enabling Mesh (non-interactive)")
442
+
443
+ config.mesh_enabled = mesh_choice in ("", "y", "yes")
444
+ config.save()
445
+ print(f"\n ✓ Mesh {'enabled' if config.mesh_enabled else 'disabled'}")
446
+
447
+ # -- Step 7: Ingestion Adapters (v3.4.3) --
448
+ print()
449
+ print("─── Step 7/9: Ingestion Adapters ───")
450
+ print()
451
+ print(" These let SLM learn from your email, calendar, and meetings.")
452
+ print(" All adapters are OFF by default. You can enable them later.")
453
+ print()
454
+ print(" Available adapters:")
455
+ print(" • Gmail Ingestion — requires Google OAuth setup")
456
+ print(" • Google Calendar — shares Gmail credentials")
457
+ print(" • Meeting Transcripts — watches a folder for .srt/.vtt files")
458
+ print()
459
+
460
+ if interactive:
461
+ adapter_input = _prompt(" Enable any now? [Enter to skip, or type: gmail,calendar,transcript]: ", "")
462
+ else:
463
+ adapter_input = ""
464
+
465
+ # Save adapter preferences (actual setup happens via `slm adapters enable X`)
466
+ adapters_config = {"gmail": False, "calendar": False, "transcript": False}
467
+ if adapter_input:
468
+ for name in adapter_input.split(","):
469
+ name = name.strip().lower()
470
+ if name in adapters_config:
471
+ adapters_config[name] = True
472
+
473
+ adapters_path = _SLM_HOME / "adapters.json"
474
+ import json as _json
475
+ adapters_path.write_text(_json.dumps(
476
+ {k: {"enabled": v, "tier": "polling"} for k, v in adapters_config.items()},
477
+ indent=2,
478
+ ))
479
+
480
+ enabled_adapters = [k for k, v in adapters_config.items() if v]
481
+ if enabled_adapters:
482
+ print(f"\n ✓ Enabled: {', '.join(enabled_adapters)}")
483
+ print(" Run `slm adapters start <name>` to begin ingestion")
484
+ else:
485
+ print("\n ✓ All adapters disabled (enable later: slm adapters enable gmail)")
486
+
487
+ # -- Step 8: Entity Compilation (v3.4.3) --
488
+ print()
489
+ print("─── Step 8/9: Entity Compilation ───")
490
+ print()
491
+ print(" Entity compilation builds knowledge summaries per person,")
492
+ print(" project, and concept. Runs automatically during consolidation.")
493
+ print()
494
+ print(" [Y] Enable entity compilation (recommended)")
495
+ print(" [N] Disable")
496
+ print()
497
+
498
+ if interactive:
499
+ ec_choice = _prompt(" Enable entity compilation? [Y/n] (default: Y): ", "y").lower()
500
+ else:
501
+ ec_choice = "y"
502
+ print(" Auto-enabling entity compilation (non-interactive)")
503
+
504
+ config.entity_compilation_enabled = ec_choice in ("", "y", "yes")
505
+ config.save()
506
+ print(f"\n ✓ Entity compilation {'enabled' if config.entity_compilation_enabled else 'disabled'}")
507
+
508
+ # -- Step 9: Verification --
509
+ print()
510
+ print("─── Step 9/9: Verification ───")
400
511
 
401
512
  if st_ok:
402
513
  verified = _verify_installation()
@@ -410,20 +521,50 @@ def run_wizard(auto: bool = False) -> None:
410
521
  print()
411
522
  print("╔══════════════════════════════════════════════════════════╗")
412
523
  if verified:
413
- print("║ ✓ Setup Complete — SuperLocalMemory is ready! ║")
524
+ print("║ ✓ Setup Complete — The Unified Brain is ready! ║")
414
525
  else:
415
526
  print("║ ✓ Setup Complete — basic config saved ║")
416
527
  print("║ Models will auto-download on first use ║")
417
528
  print("╚══════════════════════════════════════════════════════════╝")
418
529
  print()
530
+
531
+ # Summary of choices
532
+ daemon_mode = "24/7" if config.daemon_idle_timeout == 0 else f"auto-shutdown ({config.daemon_idle_timeout}s)"
533
+ print(f" Enabled: Mode {choice.upper()}, Daemon ({daemon_mode})", end="")
534
+ if config.mesh_enabled:
535
+ print(", Mesh", end="")
536
+ if config.entity_compilation_enabled:
537
+ print(", Entity Compilation", end="")
538
+ if code_graph_enabled:
539
+ print(", CodeGraph", end="")
540
+ print()
541
+ if enabled_adapters:
542
+ print(f" Adapters: {', '.join(enabled_adapters)}")
543
+ else:
544
+ print(" Adapters: none (enable via: slm adapters enable gmail)")
545
+ print()
419
546
  print(" Quick start:")
420
547
  print(' slm remember "your first memory"')
421
548
  print(' slm recall "search query"')
422
- print(" slm dashboard")
549
+ print(" slm dashboard → http://localhost:8765")
550
+ print(" slm adapters enable gmail → start Gmail ingestion")
551
+ print()
552
+ # V3.4.4: Auto-install OS service for daemon persistence (survive reboots)
553
+ try:
554
+ from superlocalmemory.cli.service_installer import install_service
555
+ print(" Installing OS service for auto-start...")
556
+ if install_service():
557
+ print(" ✓ SLM will auto-start on login — zero friction.")
558
+ else:
559
+ print(" ⚠ OS service not installed (run: slm serve install)")
560
+ except Exception:
561
+ print(" ⚠ Could not install OS service (run: slm serve install)")
562
+
423
563
  print()
424
564
  print(" Need help?")
425
565
  print(" slm doctor — diagnose issues")
426
566
  print(" slm --help — all commands")
567
+ print(" slm serve install — install auto-start service")
427
568
  print(" https://github.com/qualixar/superlocalmemory")
428
569
  print()
429
570
 
@@ -153,6 +153,10 @@ class RetrievalConfig:
153
153
  temporal_proximity_days: int = 30
154
154
 
155
155
  # Reranking (V3.3.2: ONNX backend enabled for all modes)
156
+ # V3.4.2: Tested gte-reranker-modernbert-base (8K context) — REGRESSED
157
+ # LoCoMo from 68.4% to 64.1%. Reverted to MiniLM-L-12-v2. The 512-token
158
+ # limit is acceptable because SLM's 6-channel retrieval pre-filters
159
+ # relevant facts before reranking. See bench-v342-locomo.md.
156
160
  use_cross_encoder: bool = True
157
161
  cross_encoder_model: str = "cross-encoder/ms-marco-MiniLM-L-12-v2"
158
162
  cross_encoder_backend: str = "" # "" = PyTorch (~500MB stable), "onnx" = ONNX (leaks on ARM64 CoreML)
@@ -577,6 +581,19 @@ class SLMConfig:
577
581
  default_factory=ParameterizationConfig,
578
582
  )
579
583
 
584
+ # v3.4.3: Daemon configuration
585
+ daemon_idle_timeout: int = 0 # 0 = 24/7 (no auto-kill). >0 = seconds before auto-kill.
586
+ daemon_port: int = 8765 # Primary daemon port
587
+ daemon_legacy_port: int = 8767 # Backward-compat redirect port
588
+ daemon_enable_legacy_port: bool = True # Set False to disable 8767 redirect
589
+
590
+ # v3.4.3: Entity compilation
591
+ entity_compilation_enabled: bool = True
592
+ entity_compilation_retrieval_boost: float = 1.0 # 1.0 = disabled. >1.0 = boost score.
593
+
594
+ # v3.4.3: Mesh
595
+ mesh_enabled: bool = True
596
+
580
597
  def __post_init__(self) -> None:
581
598
  if self.db_path is None:
582
599
  self.db_path = self.base_dir / DEFAULT_DB_NAME
@@ -629,6 +646,17 @@ class SLMConfig:
629
646
  if k in RetrievalConfig.__dataclass_fields__
630
647
  })
631
648
 
649
+ # V3.4.3 config fields (additive — missing keys get dataclass defaults)
650
+ config.daemon_idle_timeout = data.get("daemon_idle_timeout", 0)
651
+ config.daemon_port = data.get("daemon_port", 8765)
652
+ config.daemon_legacy_port = data.get("daemon_legacy_port", 8767)
653
+ config.daemon_enable_legacy_port = data.get("daemon_enable_legacy_port", True)
654
+ config.entity_compilation_enabled = data.get("entity_compilation_enabled", True)
655
+ config.entity_compilation_retrieval_boost = data.get(
656
+ "entity_compilation_retrieval_boost", 1.0,
657
+ )
658
+ config.mesh_enabled = data.get("mesh_enabled", True)
659
+
632
660
  return config
633
661
 
634
662
  def save(self, config_path: Path | None = None) -> None:
@@ -27,6 +27,7 @@ from __future__ import annotations
27
27
 
28
28
  import json
29
29
  import logging
30
+ import threading
30
31
  from datetime import datetime, timezone
31
32
  from typing import TYPE_CHECKING, Any
32
33
 
@@ -141,7 +142,13 @@ class ConsolidationEngine:
141
142
  """Called after each store() in store_pipeline.py.
142
143
 
143
144
  Increments internal counter. When counter hits step_count_trigger
144
- (default 50), runs lightweight consolidation.
145
+ (default 50), runs lightweight consolidation AND queues async
146
+ graph analysis.
147
+
148
+ V3.4.2: Graph analysis runs in background thread after every
149
+ lightweight consolidation trigger. This populates fact_importance
150
+ (PageRank, communities, bridge scores) so retrieval channels can
151
+ use graph intelligence without blocking store/recall latency.
145
152
 
146
153
  Returns True if lightweight consolidation was triggered.
147
154
  """
@@ -152,9 +159,39 @@ class ConsolidationEngine:
152
159
  if self._store_count >= self._config.step_count_trigger:
153
160
  self._store_count = 0
154
161
  self.consolidate(profile_id, lightweight=True)
162
+ # V3.4.2: Queue graph analysis in background (non-blocking)
163
+ self._queue_graph_analysis(profile_id)
155
164
  return True
156
165
  return False
157
166
 
167
+ def _queue_graph_analysis(self, profile_id: str) -> None:
168
+ """Run graph_analyzer.compute_and_store() in a background thread.
169
+
170
+ V3.4.2: Populates fact_importance table with PageRank, community_id,
171
+ degree_centrality, and bridge_score. Next recall() automatically
172
+ uses updated graph intelligence for entity channel and spreading
173
+ activation. Takes ~200-800ms, runs on daemon thread, zero impact
174
+ on store/recall latency.
175
+ """
176
+ if self._graph_analyzer is None:
177
+ return
178
+ analyzer = self._graph_analyzer
179
+ pid = profile_id
180
+
181
+ def _run() -> None:
182
+ try:
183
+ result = analyzer.compute_and_store(pid)
184
+ logger.info(
185
+ "Background graph analysis complete: %d nodes, %d communities",
186
+ result.get("node_count", 0),
187
+ result.get("community_count", 0),
188
+ )
189
+ except Exception as exc:
190
+ logger.debug("Background graph analysis failed (non-fatal): %s", exc)
191
+
192
+ t = threading.Thread(target=_run, daemon=True, name="graph-analysis-bg")
193
+ t.start()
194
+
158
195
  def get_core_memory(self, profile_id: str) -> dict[str, str]:
159
196
  """Load all Core Memory blocks for a profile.
160
197