superlocalmemory 3.3.27 → 3.3.29
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/ATTRIBUTION.md +1 -1
- package/CHANGELOG.md +15 -0
- package/README.md +5 -5
- package/package.json +1 -1
- package/pyproject.toml +1 -1
- package/src/superlocalmemory/cli/commands.py +53 -16
- package/src/superlocalmemory/cli/daemon.py +91 -0
- package/src/superlocalmemory/core/embeddings.py +125 -0
- package/src/superlocalmemory/mcp/tools_v33.py +15 -11
package/ATTRIBUTION.md
CHANGED
|
@@ -46,7 +46,7 @@ SuperLocalMemory is backed by three peer-reviewed research papers:
|
|
|
46
46
|
2. **Paper 2 — Information-Geometric Foundations** (arXiv:2603.14588)
|
|
47
47
|
Fisher-Rao geodesic distance, cellular sheaf cohomology, Riemannian Langevin lifecycle dynamics.
|
|
48
48
|
|
|
49
|
-
3. **Paper 3 — The Living Brain** (
|
|
49
|
+
3. **Paper 3 — The Living Brain** (arXiv:2604.04514)
|
|
50
50
|
FRQAD mixed-precision metric, Ebbinghaus adaptive forgetting, 7-channel cognitive retrieval, memory parameterization, trust-weighted forgetting.
|
|
51
51
|
|
|
52
52
|
### Research Initiative
|
package/CHANGELOG.md
CHANGED
|
@@ -16,6 +16,21 @@ SuperLocalMemory V3 - Intelligent local memory system for AI coding assistants.
|
|
|
16
16
|
|
|
17
17
|
---
|
|
18
18
|
|
|
19
|
+
## [3.3.28] - 2026-04-07 — Stability Hotfix
|
|
20
|
+
|
|
21
|
+
### Fixed
|
|
22
|
+
- **Excessive memory usage during rapid file edits** — auto-observe now reuses a single background process instead of spawning one per edit. Rapid multi-file operations (parallel agents, branch switching, batch edits) no longer risk high memory usage.
|
|
23
|
+
- **Observation debounce** — rapid-fire observations are batched and deduplicated within a short window, reducing redundant work.
|
|
24
|
+
- **Memory-aware worker management** — new safety check skips heavy processing when system memory is low.
|
|
25
|
+
|
|
26
|
+
### New Environment Variables
|
|
27
|
+
| Variable | Default | Description |
|
|
28
|
+
|----------|---------|-------------|
|
|
29
|
+
| `SLM_OBSERVE_DEBOUNCE_SEC` | `3.0` | Observation batching window |
|
|
30
|
+
| `SLM_MIN_AVAILABLE_MEMORY_GB` | `2.0` | Min free RAM for background processing |
|
|
31
|
+
|
|
32
|
+
---
|
|
33
|
+
|
|
19
34
|
## [3.3.3] - 2026-04-01 — Langevin Awakening
|
|
20
35
|
|
|
21
36
|
### Fixed
|
package/README.md
CHANGED
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
<h1 align="center">SuperLocalMemory V3.3</h1>
|
|
6
6
|
<p align="center"><strong>Every other AI forgets. Yours won't.</strong><br/><em>Infinite memory for Claude Code, Cursor, Windsurf & 17+ AI tools.</em></p>
|
|
7
7
|
<p align="center"><code>v3.3.26</code> — Install once. Every session remembers the last. Automatically.</p>
|
|
8
|
-
<p align="center"><strong>Backed by 3 peer-reviewed research papers</strong> · <a href="
|
|
8
|
+
<p align="center"><strong>Backed by 3 peer-reviewed research papers</strong> · <a href="https://arxiv.org/abs/2603.02240">arXiv:2603.02240</a> · <a href="https://arxiv.org/abs/2603.14588">arXiv:2603.14588</a> · <a href="https://arxiv.org/abs/2604.04514">arXiv:2604.04514</a></p>
|
|
9
9
|
|
|
10
10
|
<p align="center">
|
|
11
11
|
<code>+16pp vs Mem0 (zero cloud)</code> · <code>85% Open-Domain (best of any system)</code> · <code>EU AI Act Ready</code>
|
|
@@ -441,7 +441,7 @@ SuperLocalMemory is backed by three peer-reviewed research papers covering trust
|
|
|
441
441
|
### Paper 3: The Living Brain (V3.3)
|
|
442
442
|
> **SuperLocalMemory V3.3: The Living Brain — Biologically-Inspired Forgetting, Cognitive Quantization, and Multi-Channel Retrieval for Zero-LLM Agent Memory Systems**
|
|
443
443
|
> Varun Pratap Bhardwaj (2026)
|
|
444
|
-
> [Zenodo DOI: 10.5281/zenodo.19435120](https://zenodo.org/records/19435120)
|
|
444
|
+
> [arXiv:2604.04514](https://arxiv.org/abs/2604.04514) · [Zenodo DOI: 10.5281/zenodo.19435120](https://zenodo.org/records/19435120)
|
|
445
445
|
|
|
446
446
|
### Paper 2: Information-Geometric Foundations (V3)
|
|
447
447
|
> **SuperLocalMemory V3: Information-Geometric Foundations for Zero-LLM Enterprise Agent Memory**
|
|
@@ -461,9 +461,9 @@ SuperLocalMemory is backed by three peer-reviewed research papers covering trust
|
|
|
461
461
|
Forgetting, Cognitive Quantization, and Multi-Channel Retrieval
|
|
462
462
|
for Zero-LLM Agent Memory Systems},
|
|
463
463
|
author={Bhardwaj, Varun Pratap},
|
|
464
|
-
journal={
|
|
465
|
-
|
|
466
|
-
|
|
464
|
+
journal={arXiv preprint arXiv:2604.04514},
|
|
465
|
+
year={2026},
|
|
466
|
+
url={https://arxiv.org/abs/2604.04514}
|
|
467
467
|
}
|
|
468
468
|
|
|
469
469
|
@article{bhardwaj2026slmv3,
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "superlocalmemory",
|
|
3
|
-
"version": "3.3.
|
|
3
|
+
"version": "3.3.29",
|
|
4
4
|
"description": "Information-geometric agent memory with mathematical guarantees. 4-channel retrieval, Fisher-Rao similarity, zero-LLM mode, EU AI Act compliant. Works with Claude, Cursor, Windsurf, and 17+ AI tools.",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"ai-memory",
|
package/pyproject.toml
CHANGED
|
@@ -1554,11 +1554,14 @@ def cmd_session_context(args: Namespace) -> None:
|
|
|
1554
1554
|
|
|
1555
1555
|
|
|
1556
1556
|
def cmd_observe(args: Namespace) -> None:
|
|
1557
|
-
"""Evaluate and auto-capture content from stdin or argument.
|
|
1557
|
+
"""Evaluate and auto-capture content from stdin or argument.
|
|
1558
|
+
|
|
1559
|
+
V3.3.28: Routes through daemon to prevent embedding worker memory blast.
|
|
1560
|
+
Previously each `slm observe` spawned its own MemoryEngine + embedding
|
|
1561
|
+
worker (~1.4 GB each). With 20 parallel edits = 28+ GB = system crash.
|
|
1562
|
+
Now uses the daemon's singleton engine (1 worker total).
|
|
1563
|
+
"""
|
|
1558
1564
|
import sys
|
|
1559
|
-
from superlocalmemory.hooks.auto_capture import AutoCapture
|
|
1560
|
-
from superlocalmemory.core.config import SLMConfig
|
|
1561
|
-
from superlocalmemory.core.engine import MemoryEngine
|
|
1562
1565
|
|
|
1563
1566
|
content = getattr(args, "content", "") or ""
|
|
1564
1567
|
if not content and not sys.stdin.isatty():
|
|
@@ -1568,22 +1571,56 @@ def cmd_observe(args: Namespace) -> None:
|
|
|
1568
1571
|
print("No content to observe.")
|
|
1569
1572
|
return
|
|
1570
1573
|
|
|
1574
|
+
# V3.3.28: Route through daemon (singleton engine, single embedding worker).
|
|
1575
|
+
# This is the P0 fix for the memory blast incident of April 7, 2026.
|
|
1571
1576
|
try:
|
|
1572
|
-
|
|
1573
|
-
|
|
1574
|
-
|
|
1577
|
+
from superlocalmemory.cli.daemon import is_daemon_running, daemon_request, ensure_daemon
|
|
1578
|
+
if is_daemon_running() or ensure_daemon():
|
|
1579
|
+
result = daemon_request("POST", "/observe", {"content": content})
|
|
1580
|
+
if result is not None:
|
|
1581
|
+
if result.get("captured"):
|
|
1582
|
+
cat = result.get("category", "unknown")
|
|
1583
|
+
conf = result.get("confidence", 0)
|
|
1584
|
+
print(f"Auto-captured: {cat} (confidence: {conf:.2f}) (via daemon)")
|
|
1585
|
+
else:
|
|
1586
|
+
reason = result.get("reason", "no patterns matched")
|
|
1587
|
+
print(f"Not captured: {reason}")
|
|
1588
|
+
return
|
|
1589
|
+
except Exception:
|
|
1590
|
+
pass # Fall through to direct engine
|
|
1575
1591
|
|
|
1576
|
-
|
|
1577
|
-
|
|
1592
|
+
# Fallback: direct engine (only if daemon unavailable).
|
|
1593
|
+
# Acquires a system-wide file lock to prevent concurrent worker spawns.
|
|
1594
|
+
try:
|
|
1595
|
+
from superlocalmemory.hooks.auto_capture import AutoCapture
|
|
1596
|
+
from superlocalmemory.core.config import SLMConfig
|
|
1597
|
+
from superlocalmemory.core.engine import MemoryEngine
|
|
1598
|
+
from superlocalmemory.core.embeddings import acquire_embedding_lock
|
|
1599
|
+
|
|
1600
|
+
if not acquire_embedding_lock():
|
|
1601
|
+
logger.debug("observe: another embedding worker active, skipping")
|
|
1602
|
+
print("Not captured: system busy (another embedding in progress)")
|
|
1603
|
+
return
|
|
1604
|
+
|
|
1605
|
+
try:
|
|
1606
|
+
config = SLMConfig.load()
|
|
1607
|
+
engine = MemoryEngine(config)
|
|
1608
|
+
engine.initialize()
|
|
1578
1609
|
|
|
1579
|
-
|
|
1580
|
-
|
|
1581
|
-
|
|
1582
|
-
|
|
1610
|
+
auto = AutoCapture(engine=engine)
|
|
1611
|
+
decision = auto.evaluate(content)
|
|
1612
|
+
|
|
1613
|
+
if decision.capture:
|
|
1614
|
+
stored = auto.capture(content, category=decision.category)
|
|
1615
|
+
if stored:
|
|
1616
|
+
print(f"Auto-captured: {decision.category} (confidence: {decision.confidence:.2f})")
|
|
1617
|
+
else:
|
|
1618
|
+
print(f"Detected {decision.category} but store failed.")
|
|
1583
1619
|
else:
|
|
1584
|
-
print(f"
|
|
1585
|
-
|
|
1586
|
-
|
|
1620
|
+
print(f"Not captured: {decision.reason}")
|
|
1621
|
+
finally:
|
|
1622
|
+
from superlocalmemory.core.embeddings import release_embedding_lock
|
|
1623
|
+
release_embedding_lock()
|
|
1587
1624
|
except Exception as exc:
|
|
1588
1625
|
logger.debug("observe failed: %s", exc)
|
|
1589
1626
|
|
|
@@ -37,6 +37,7 @@ import sys
|
|
|
37
37
|
import time
|
|
38
38
|
from http.server import HTTPServer, BaseHTTPRequestHandler
|
|
39
39
|
from pathlib import Path
|
|
40
|
+
import threading
|
|
40
41
|
from threading import Thread
|
|
41
42
|
|
|
42
43
|
logger = logging.getLogger(__name__)
|
|
@@ -153,6 +154,73 @@ def stop_daemon() -> bool:
|
|
|
153
154
|
_engine = None
|
|
154
155
|
_last_activity = time.monotonic()
|
|
155
156
|
|
|
157
|
+
# ---------------------------------------------------------------------------
|
|
158
|
+
# V3.3.28: Observation debounce buffer.
|
|
159
|
+
#
|
|
160
|
+
# When 20+ file edits arrive in quick succession (from parallel AI agents,
|
|
161
|
+
# git checkout, or batch sed), we buffer observations for _OBSERVE_DEBOUNCE_SEC
|
|
162
|
+
# seconds and deduplicate by content hash. This reduces 20 observations → 1-3
|
|
163
|
+
# batches, each processed by the singleton engine (1 embedding worker).
|
|
164
|
+
# ---------------------------------------------------------------------------
|
|
165
|
+
|
|
166
|
+
_OBSERVE_DEBOUNCE_SEC = float(os.environ.get("SLM_OBSERVE_DEBOUNCE_SEC", "3.0"))
|
|
167
|
+
_observe_buffer: list[str] = []
|
|
168
|
+
_observe_seen: set[str] = set() # content hashes for dedup within window
|
|
169
|
+
_observe_lock = threading.Lock()
|
|
170
|
+
_observe_timer: threading.Timer | None = None
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
def _flush_observe_buffer() -> None:
|
|
174
|
+
"""Process all buffered observations as a single batch."""
|
|
175
|
+
global _observe_timer
|
|
176
|
+
with _observe_lock:
|
|
177
|
+
if not _observe_buffer:
|
|
178
|
+
return
|
|
179
|
+
batch = list(_observe_buffer)
|
|
180
|
+
_observe_buffer.clear()
|
|
181
|
+
_observe_seen.clear()
|
|
182
|
+
_observe_timer = None
|
|
183
|
+
|
|
184
|
+
# Process each unique observation (already deduped)
|
|
185
|
+
engine = _get_engine()
|
|
186
|
+
from superlocalmemory.hooks.auto_capture import AutoCapture
|
|
187
|
+
auto = AutoCapture(engine=engine)
|
|
188
|
+
|
|
189
|
+
for content in batch:
|
|
190
|
+
try:
|
|
191
|
+
decision = auto.evaluate(content)
|
|
192
|
+
if decision.capture:
|
|
193
|
+
auto.capture(content, category=decision.category)
|
|
194
|
+
except Exception:
|
|
195
|
+
pass # Don't let one bad observation kill the batch
|
|
196
|
+
|
|
197
|
+
logger.info("Observe debounce: processed %d observations (from buffer)", len(batch))
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
def _enqueue_observation(content: str) -> dict:
|
|
201
|
+
"""Add an observation to the debounce buffer. Returns immediate response."""
|
|
202
|
+
global _observe_timer
|
|
203
|
+
import hashlib
|
|
204
|
+
content_hash = hashlib.md5(content.encode()).hexdigest()
|
|
205
|
+
|
|
206
|
+
with _observe_lock:
|
|
207
|
+
if content_hash in _observe_seen:
|
|
208
|
+
return {"captured": False, "reason": "duplicate within debounce window"}
|
|
209
|
+
|
|
210
|
+
_observe_seen.add(content_hash)
|
|
211
|
+
_observe_buffer.append(content)
|
|
212
|
+
buf_size = len(_observe_buffer)
|
|
213
|
+
|
|
214
|
+
# Reset debounce timer
|
|
215
|
+
if _observe_timer is not None:
|
|
216
|
+
_observe_timer.cancel()
|
|
217
|
+
_observe_timer = threading.Timer(_OBSERVE_DEBOUNCE_SEC, _flush_observe_buffer)
|
|
218
|
+
_observe_timer.daemon = True
|
|
219
|
+
_observe_timer.start()
|
|
220
|
+
|
|
221
|
+
return {"captured": True, "queued": True, "buffer_size": buf_size,
|
|
222
|
+
"debounce_sec": _OBSERVE_DEBOUNCE_SEC}
|
|
223
|
+
|
|
156
224
|
|
|
157
225
|
def _get_engine():
|
|
158
226
|
global _engine
|
|
@@ -276,6 +344,24 @@ class DaemonHandler(BaseHTTPRequestHandler):
|
|
|
276
344
|
self._send_json(500, {"error": str(exc)})
|
|
277
345
|
return
|
|
278
346
|
|
|
347
|
+
if self.path == "/observe":
|
|
348
|
+
try:
|
|
349
|
+
body = self._read_body()
|
|
350
|
+
content = body.get("content", "")
|
|
351
|
+
if not content:
|
|
352
|
+
self._send_json(400, {"error": "content required"})
|
|
353
|
+
return
|
|
354
|
+
|
|
355
|
+
# V3.3.28: Debounced observation processing.
|
|
356
|
+
# Buffers observations for 3s, deduplicates, processes as batch.
|
|
357
|
+
# Returns immediately — the actual capture happens asynchronously
|
|
358
|
+
# via the debounce timer, using the singleton engine.
|
|
359
|
+
result = _enqueue_observation(content)
|
|
360
|
+
self._send_json(200, result)
|
|
361
|
+
except Exception as exc:
|
|
362
|
+
self._send_json(500, {"error": str(exc)})
|
|
363
|
+
return
|
|
364
|
+
|
|
279
365
|
if self.path == "/stop":
|
|
280
366
|
self._send_json(200, {"status": "stopping"})
|
|
281
367
|
Thread(target=_shutdown_server, daemon=True).start()
|
|
@@ -294,6 +380,11 @@ _server_start_time = time.monotonic()
|
|
|
294
380
|
|
|
295
381
|
def _shutdown_server() -> None:
|
|
296
382
|
global _engine, _server
|
|
383
|
+
# V3.3.28: Flush any buffered observations before shutdown
|
|
384
|
+
try:
|
|
385
|
+
_flush_observe_buffer()
|
|
386
|
+
except Exception:
|
|
387
|
+
pass
|
|
297
388
|
time.sleep(0.5)
|
|
298
389
|
if _engine is not None:
|
|
299
390
|
try:
|
|
@@ -49,6 +49,66 @@ class DimensionMismatchError(RuntimeError):
|
|
|
49
49
|
"""Raised when the actual embedding dimension differs from config."""
|
|
50
50
|
|
|
51
51
|
|
|
52
|
+
# ---------------------------------------------------------------------------
|
|
53
|
+
# V3.3.28: System-wide concurrency guard for embedding workers.
|
|
54
|
+
#
|
|
55
|
+
# The memory blast incident (April 7, 2026) was caused by 20+ concurrent
|
|
56
|
+
# `slm observe` CLI processes each spawning their own embedding_worker
|
|
57
|
+
# subprocess (1.4 GB each). This file lock ensures only MAX_CONCURRENT
|
|
58
|
+
# embedding workers can exist across ALL processes on the machine.
|
|
59
|
+
#
|
|
60
|
+
# Primary defense: daemon routing (cmd_observe → daemon → singleton engine).
|
|
61
|
+
# This lock is the secondary safety net for when the daemon isn't available.
|
|
62
|
+
# ---------------------------------------------------------------------------
|
|
63
|
+
|
|
64
|
+
_EMBEDDING_LOCK_FILE = Path.home() / ".superlocalmemory" / ".embedding.lock"
|
|
65
|
+
_MAX_CONCURRENT_WORKERS = int(os.environ.get("SLM_MAX_EMBEDDING_WORKERS", 2))
|
|
66
|
+
_embedding_lock_fd: int | None = None
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def acquire_embedding_lock(timeout: float = 5.0) -> bool:
|
|
70
|
+
"""Acquire system-wide embedding worker lock.
|
|
71
|
+
|
|
72
|
+
Uses fcntl.flock on Unix. On Windows, falls back to allowing (no lock).
|
|
73
|
+
Returns True if lock acquired, False if timed out (another worker active).
|
|
74
|
+
"""
|
|
75
|
+
global _embedding_lock_fd
|
|
76
|
+
if sys.platform == "win32":
|
|
77
|
+
return True # No file locking on Windows — daemon routing is primary defense
|
|
78
|
+
|
|
79
|
+
import fcntl
|
|
80
|
+
_EMBEDDING_LOCK_FILE.parent.mkdir(parents=True, exist_ok=True)
|
|
81
|
+
|
|
82
|
+
try:
|
|
83
|
+
_embedding_lock_fd = os.open(str(_EMBEDDING_LOCK_FILE), os.O_CREAT | os.O_RDWR)
|
|
84
|
+
deadline = time.time() + timeout
|
|
85
|
+
while time.time() < deadline:
|
|
86
|
+
try:
|
|
87
|
+
fcntl.flock(_embedding_lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
|
88
|
+
return True
|
|
89
|
+
except (BlockingIOError, OSError):
|
|
90
|
+
time.sleep(0.2)
|
|
91
|
+
# Timeout — another worker holds the lock
|
|
92
|
+
os.close(_embedding_lock_fd)
|
|
93
|
+
_embedding_lock_fd = None
|
|
94
|
+
return False
|
|
95
|
+
except Exception:
|
|
96
|
+
return True # On error, allow through (don't block functionality)
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def release_embedding_lock() -> None:
|
|
100
|
+
"""Release system-wide embedding worker lock."""
|
|
101
|
+
global _embedding_lock_fd
|
|
102
|
+
if _embedding_lock_fd is not None:
|
|
103
|
+
try:
|
|
104
|
+
import fcntl
|
|
105
|
+
fcntl.flock(_embedding_lock_fd, fcntl.LOCK_UN)
|
|
106
|
+
os.close(_embedding_lock_fd)
|
|
107
|
+
except Exception:
|
|
108
|
+
pass
|
|
109
|
+
_embedding_lock_fd = None
|
|
110
|
+
|
|
111
|
+
|
|
52
112
|
_IDLE_TIMEOUT_SECONDS = 120 # 2 minutes — kill worker after idle
|
|
53
113
|
# V3.3.12: Configurable via SLM_EMBED_IDLE_TIMEOUT env var (seconds)
|
|
54
114
|
_IDLE_TIMEOUT_SECONDS = int(os.environ.get("SLM_EMBED_IDLE_TIMEOUT", _IDLE_TIMEOUT_SECONDS))
|
|
@@ -270,11 +330,76 @@ class EmbeddingService:
|
|
|
270
330
|
raise error_container[0]
|
|
271
331
|
return result_container[0] if result_container else ""
|
|
272
332
|
|
|
333
|
+
@staticmethod
|
|
334
|
+
def _check_memory_pressure() -> bool:
|
|
335
|
+
"""Check if system has enough memory to spawn a worker.
|
|
336
|
+
|
|
337
|
+
V3.3.28: Prevents spawning embedding workers (1.4 GB each) when
|
|
338
|
+
the system is already under memory pressure. Returns True if safe.
|
|
339
|
+
"""
|
|
340
|
+
min_available_gb = float(os.environ.get("SLM_MIN_AVAILABLE_MEMORY_GB", "2.0"))
|
|
341
|
+
try:
|
|
342
|
+
if sys.platform == "darwin":
|
|
343
|
+
# macOS: use vm_stat to get free + inactive pages
|
|
344
|
+
import subprocess as _sp
|
|
345
|
+
result = _sp.run(["vm_stat"], capture_output=True, text=True, timeout=5)
|
|
346
|
+
if result.returncode == 0:
|
|
347
|
+
lines = result.stdout.split("\n")
|
|
348
|
+
page_size = 16384 # default on Apple Silicon
|
|
349
|
+
free_pages = 0
|
|
350
|
+
for line in lines:
|
|
351
|
+
if "page size of" in line:
|
|
352
|
+
try:
|
|
353
|
+
page_size = int(line.split()[-2])
|
|
354
|
+
except (ValueError, IndexError):
|
|
355
|
+
pass
|
|
356
|
+
if "Pages free" in line or "Pages inactive" in line:
|
|
357
|
+
try:
|
|
358
|
+
free_pages += int(line.split()[-1].rstrip("."))
|
|
359
|
+
except (ValueError, IndexError):
|
|
360
|
+
pass
|
|
361
|
+
available_gb = (free_pages * page_size) / (1024 ** 3)
|
|
362
|
+
if available_gb < min_available_gb:
|
|
363
|
+
logger.warning(
|
|
364
|
+
"Low memory (%.1f GB available, need %.1f GB) — "
|
|
365
|
+
"deferring embedding worker spawn",
|
|
366
|
+
available_gb, min_available_gb,
|
|
367
|
+
)
|
|
368
|
+
return False
|
|
369
|
+
else:
|
|
370
|
+
# Linux/other: use /proc/meminfo or psutil
|
|
371
|
+
try:
|
|
372
|
+
with open("/proc/meminfo") as f:
|
|
373
|
+
for line in f:
|
|
374
|
+
if line.startswith("MemAvailable:"):
|
|
375
|
+
available_kb = int(line.split()[1])
|
|
376
|
+
available_gb = available_kb / (1024 * 1024)
|
|
377
|
+
if available_gb < min_available_gb:
|
|
378
|
+
logger.warning(
|
|
379
|
+
"Low memory (%.1f GB available) — "
|
|
380
|
+
"deferring embedding worker spawn",
|
|
381
|
+
available_gb,
|
|
382
|
+
)
|
|
383
|
+
return False
|
|
384
|
+
break
|
|
385
|
+
except FileNotFoundError:
|
|
386
|
+
pass # Not Linux, allow through
|
|
387
|
+
except Exception:
|
|
388
|
+
pass # On error, allow through (don't block functionality)
|
|
389
|
+
return True
|
|
390
|
+
|
|
273
391
|
def _ensure_worker(self) -> None:
|
|
274
392
|
"""Spawn worker subprocess if not running."""
|
|
275
393
|
if self._worker_proc is not None and self._worker_proc.poll() is None:
|
|
276
394
|
return
|
|
277
395
|
self._worker_proc = None
|
|
396
|
+
|
|
397
|
+
# V3.3.28: Check memory pressure before spawning
|
|
398
|
+
if not self._check_memory_pressure():
|
|
399
|
+
logger.warning("Skipping embedding worker spawn due to memory pressure")
|
|
400
|
+
self._available = False
|
|
401
|
+
return
|
|
402
|
+
|
|
278
403
|
worker_module = "superlocalmemory.core.embedding_worker"
|
|
279
404
|
try:
|
|
280
405
|
env = {
|
|
@@ -76,15 +76,19 @@ def register_v33_tools(server, get_engine: Callable) -> None:
|
|
|
76
76
|
)
|
|
77
77
|
|
|
78
78
|
if dry_run:
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
79
|
+
rows = engine._db.execute(
|
|
80
|
+
"SELECT lifecycle_zone, COUNT(*) as cnt "
|
|
81
|
+
"FROM fact_retention WHERE profile_id = ? "
|
|
82
|
+
"GROUP BY lifecycle_zone",
|
|
83
|
+
(pid,),
|
|
84
|
+
)
|
|
82
85
|
zones = {"active": 0, "warm": 0, "cold": 0, "archive": 0, "forgotten": 0}
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
zones[
|
|
87
|
-
|
|
86
|
+
total = 0
|
|
87
|
+
for row in rows:
|
|
88
|
+
r = dict(row)
|
|
89
|
+
zones[r["lifecycle_zone"]] = int(r["cnt"])
|
|
90
|
+
total += int(r["cnt"])
|
|
91
|
+
result = {"total": total, "transitions": 0, "dry_run_zones": zones}
|
|
88
92
|
else:
|
|
89
93
|
result = scheduler.run_decay_cycle(pid, force=True)
|
|
90
94
|
|
|
@@ -399,9 +403,9 @@ def register_v33_tools(server, get_engine: Callable) -> None:
|
|
|
399
403
|
# 3. Behavioral pattern mining
|
|
400
404
|
try:
|
|
401
405
|
from superlocalmemory.learning.consolidation_worker import ConsolidationWorker
|
|
402
|
-
cw = ConsolidationWorker(engine._db, engine.
|
|
403
|
-
|
|
404
|
-
results["behavioral"] = {"patterns_mined":
|
|
406
|
+
cw = ConsolidationWorker(engine._db.db_path, engine._db.db_path.parent / "learning.db",)
|
|
407
|
+
count = cw._generate_patterns(pid, False)
|
|
408
|
+
results["behavioral"] = {"patterns_mined": count}
|
|
405
409
|
except Exception as exc:
|
|
406
410
|
results["behavioral"] = {"error": str(exc)}
|
|
407
411
|
|