superlocalmemory 2.5.0 → 2.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/mcp_server.py CHANGED
@@ -28,7 +28,9 @@ Usage:
28
28
  from mcp.server.fastmcp import FastMCP
29
29
  from mcp.types import ToolAnnotations
30
30
  import sys
31
+ import os
31
32
  import json
33
+ import re
32
34
  from pathlib import Path
33
35
  from typing import Optional
34
36
 
@@ -54,6 +56,25 @@ try:
54
56
  except ImportError:
55
57
  PROVENANCE_AVAILABLE = False
56
58
 
59
+ # Trust Scorer (v2.6 — enforcement)
60
+ try:
61
+ from trust_scorer import TrustScorer
62
+ TRUST_AVAILABLE = True
63
+ except ImportError:
64
+ TRUST_AVAILABLE = False
65
+
66
+ def _sanitize_error(error: Exception) -> str:
67
+ """Strip internal paths and structure from error messages."""
68
+ msg = str(error)
69
+ # Strip file paths containing claude-memory
70
+ msg = re.sub(r'/[\w./-]*claude-memory[\w./-]*', '[internal-path]', msg)
71
+ # Strip file paths containing SuperLocalMemory
72
+ msg = re.sub(r'/[\w./-]*SuperLocalMemory[\w./-]*', '[internal-path]', msg)
73
+ # Strip SQLite table names from error messages
74
+ msg = re.sub(r'table\s+\w+', 'table [redacted]', msg)
75
+ return msg
76
+
77
+
57
78
  # Parse command line arguments early (needed for port in constructor)
58
79
  import argparse as _argparse
59
80
  _parser = _argparse.ArgumentParser(add_help=False)
@@ -131,6 +152,19 @@ def get_provenance_tracker():
131
152
  return _provenance_tracker
132
153
 
133
154
 
155
+ _trust_scorer = None
156
+
157
+
158
+ def get_trust_scorer():
159
+ """Get shared TrustScorer singleton (v2.6+). Returns None if unavailable."""
160
+ global _trust_scorer
161
+ if not TRUST_AVAILABLE:
162
+ return None
163
+ if _trust_scorer is None:
164
+ _trust_scorer = TrustScorer.get_instance(DB_PATH)
165
+ return _trust_scorer
166
+
167
+
134
168
  def _register_mcp_agent(agent_name: str = "mcp-client"):
135
169
  """Register the calling MCP agent and record activity. Non-blocking."""
136
170
  registry = get_agent_registry()
@@ -188,6 +222,18 @@ async def remember(
188
222
  # Register MCP agent (v2.5 — agent tracking)
189
223
  _register_mcp_agent()
190
224
 
225
+ # Trust enforcement (v2.6) — block untrusted agents from writing
226
+ try:
227
+ trust = get_trust_scorer()
228
+ if trust and not trust.check_trust("mcp:mcp-client", "write"):
229
+ return {
230
+ "success": False,
231
+ "error": "Agent trust score too low for write operations",
232
+ "message": "Trust enforcement blocked this operation"
233
+ }
234
+ except Exception:
235
+ pass # Trust check failure should not block operations
236
+
191
237
  # Use existing MemoryStoreV2 class (no duplicate logic)
192
238
  store = get_store()
193
239
 
@@ -228,7 +274,7 @@ async def remember(
228
274
  except Exception as e:
229
275
  return {
230
276
  "success": False,
231
- "error": str(e),
277
+ "error": _sanitize_error(e),
232
278
  "message": "Failed to save memory"
233
279
  }
234
280
 
@@ -277,8 +323,17 @@ async def recall(
277
323
  # Use existing MemoryStoreV2 class
278
324
  store = get_store()
279
325
 
280
- # Call existing search method
281
- results = store.search(query, limit=limit)
326
+ # Hybrid search (opt-in via env var, v2.6)
327
+ _use_hybrid = os.environ.get('SLM_HYBRID_SEARCH', 'false').lower() == 'true'
328
+ if _use_hybrid:
329
+ try:
330
+ from hybrid_search import HybridSearchEngine
331
+ engine = HybridSearchEngine(store=store)
332
+ results = engine.search(query, limit=limit)
333
+ except (ImportError, Exception):
334
+ results = store.search(query, limit=limit)
335
+ else:
336
+ results = store.search(query, limit=limit)
282
337
 
283
338
  # Filter by minimum score
284
339
  filtered_results = [
@@ -297,7 +352,7 @@ async def recall(
297
352
  except Exception as e:
298
353
  return {
299
354
  "success": False,
300
- "error": str(e),
355
+ "error": _sanitize_error(e),
301
356
  "message": "Failed to search memories",
302
357
  "results": [],
303
358
  "count": 0
@@ -338,7 +393,7 @@ async def list_recent(limit: int = 10) -> dict:
338
393
  except Exception as e:
339
394
  return {
340
395
  "success": False,
341
- "error": str(e),
396
+ "error": _sanitize_error(e),
342
397
  "message": "Failed to list memories",
343
398
  "memories": [],
344
399
  "count": 0
@@ -377,7 +432,7 @@ async def get_status() -> dict:
377
432
  except Exception as e:
378
433
  return {
379
434
  "success": False,
380
- "error": str(e),
435
+ "error": _sanitize_error(e),
381
436
  "message": "Failed to get status"
382
437
  }
383
438
 
@@ -418,7 +473,7 @@ async def build_graph() -> dict:
418
473
  except Exception as e:
419
474
  return {
420
475
  "success": False,
421
- "error": str(e),
476
+ "error": _sanitize_error(e),
422
477
  "message": "Failed to build graph"
423
478
  }
424
479
 
@@ -486,7 +541,7 @@ async def switch_profile(name: str) -> dict:
486
541
  except Exception as e:
487
542
  return {
488
543
  "success": False,
489
- "error": str(e),
544
+ "error": _sanitize_error(e),
490
545
  "message": "Failed to switch profile"
491
546
  }
492
547
 
@@ -531,7 +586,7 @@ async def backup_status() -> dict:
531
586
  except Exception as e:
532
587
  return {
533
588
  "success": False,
534
- "error": str(e),
589
+ "error": _sanitize_error(e),
535
590
  "message": "Failed to get backup status"
536
591
  }
537
592
 
@@ -583,7 +638,7 @@ async def search(query: str) -> dict:
583
638
  return {"results": results}
584
639
 
585
640
  except Exception as e:
586
- return {"results": [], "error": str(e)}
641
+ return {"results": [], "error": _sanitize_error(e)}
587
642
 
588
643
 
589
644
  @mcp.tool(annotations=ToolAnnotations(
@@ -635,7 +690,7 @@ async def fetch(id: str) -> dict:
635
690
  }
636
691
 
637
692
  except Exception as e:
638
- raise ValueError(f"Failed to fetch memory {id}: {str(e)}")
693
+ raise ValueError(f"Failed to fetch memory {id}: {_sanitize_error(e)}")
639
694
 
640
695
 
641
696
  # ============================================================================
@@ -654,7 +709,7 @@ async def get_recent_memories_resource(limit: str) -> str:
654
709
  memories = store.list_all(limit=int(limit))
655
710
  return json.dumps(memories, indent=2)
656
711
  except Exception as e:
657
- return json.dumps({"error": str(e)}, indent=2)
712
+ return json.dumps({"error": _sanitize_error(e)}, indent=2)
658
713
 
659
714
 
660
715
  @mcp.resource("memory://stats")
@@ -669,7 +724,7 @@ async def get_stats_resource() -> str:
669
724
  stats = store.get_stats()
670
725
  return json.dumps(stats, indent=2)
671
726
  except Exception as e:
672
- return json.dumps({"error": str(e)}, indent=2)
727
+ return json.dumps({"error": _sanitize_error(e)}, indent=2)
673
728
 
674
729
 
675
730
  @mcp.resource("memory://graph/clusters")
@@ -685,7 +740,7 @@ async def get_clusters_resource() -> str:
685
740
  clusters = stats.get('clusters', [])
686
741
  return json.dumps(clusters, indent=2)
687
742
  except Exception as e:
688
- return json.dumps({"error": str(e)}, indent=2)
743
+ return json.dumps({"error": _sanitize_error(e)}, indent=2)
689
744
 
690
745
 
691
746
  @mcp.resource("memory://patterns/identity")
@@ -700,7 +755,7 @@ async def get_coding_identity_resource() -> str:
700
755
  patterns = learner.get_identity_context(min_confidence=0.5)
701
756
  return json.dumps(patterns, indent=2)
702
757
  except Exception as e:
703
- return json.dumps({"error": str(e)}, indent=2)
758
+ return json.dumps({"error": _sanitize_error(e)}, indent=2)
704
759
 
705
760
 
706
761
  # ============================================================================
@@ -742,7 +797,7 @@ async def coding_identity_prompt() -> str:
742
797
  return prompt
743
798
 
744
799
  except Exception as e:
745
- return f"# Coding Identity\n\nError loading patterns: {str(e)}"
800
+ return f"# Coding Identity\n\nError loading patterns: {_sanitize_error(e)}"
746
801
 
747
802
 
748
803
  @mcp.prompt()
@@ -780,7 +835,7 @@ async def project_context_prompt(project_name: str) -> str:
780
835
  return prompt
781
836
 
782
837
  except Exception as e:
783
- return f"# Project Context: {project_name}\n\nError loading context: {str(e)}"
838
+ return f"# Project Context: {project_name}\n\nError loading context: {_sanitize_error(e)}"
784
839
 
785
840
 
786
841
  # ============================================================================
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "superlocalmemory",
3
- "version": "2.5.0",
4
- "description": "Your AI Finally Remembers You - Local-first intelligent memory system for AI assistants. Works with Claude, Cursor, Windsurf, VS Code/Copilot, Codex, and 16+ AI tools. 100% local, zero cloud dependencies.",
3
+ "version": "2.6.0",
4
+ "description": "Your AI Finally Remembers You - Local-first intelligent memory system for AI assistants. Works with Claude, Cursor, Windsurf, VS Code/Copilot, Codex, and 17+ AI tools. 100% local, zero cloud dependencies.",
5
5
  "keywords": [
6
6
  "ai-memory",
7
7
  "claude-ai",
@@ -34,7 +34,7 @@
34
34
  "type": "git",
35
35
  "url": "https://github.com/varun369/SuperLocalMemoryV2.git"
36
36
  },
37
- "homepage": "https://github.com/varun369/SuperLocalMemoryV2#readme",
37
+ "homepage": "https://superlocalmemory.com",
38
38
  "bugs": {
39
39
  "url": "https://github.com/varun369/SuperLocalMemoryV2/issues"
40
40
  },
@@ -98,5 +98,8 @@
98
98
  "year": "2026",
99
99
  "github": "https://github.com/varun369",
100
100
  "required": "Attribution notice must be preserved in all copies and derivative works"
101
+ },
102
+ "dependencies": {
103
+ "docx": "^9.5.1"
101
104
  }
102
105
  }
@@ -0,0 +1,220 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ SuperLocalMemory V2 - Thumbnail Generator
4
+ Copyright (c) 2026 Varun Pratap Bhardwaj
5
+ Licensed under MIT License
6
+
7
+ Generates optimized thumbnail versions of all screenshots.
8
+ - Size: 320×180px (16:9 ratio)
9
+ - Format: PNG (for wiki/docs) and WebP (for website)
10
+ - Quality: High enough to recognize content
11
+ - File size: < 50KB per thumbnail
12
+ """
13
+
14
+ import os
15
+ import json
16
+ from pathlib import Path
17
+ from PIL import Image, ImageFilter, ImageOps
18
+ from datetime import datetime
19
+
20
+ # Configuration
21
+ SCREENSHOT_DIR = Path(__file__).parent.parent / "assets" / "screenshots"
22
+ THUMBNAIL_DIR = Path(__file__).parent.parent / "assets" / "thumbnails"
23
+ THUMBNAIL_SIZE = (320, 180) # 16:9 ratio
24
+ QUALITY_PNG = 95
25
+ QUALITY_WEBP = 85
26
+ MAX_FILESIZE = 50 * 1024 # 50KB
27
+
28
+ # Category mapping based on filename patterns
29
+ CATEGORY_MAP = {
30
+ "overview": "dashboard",
31
+ "timeline": "timeline",
32
+ "agents": "agents",
33
+ "patterns": "patterns",
34
+ "clusters": "clusters",
35
+ "memories": "memories",
36
+ "graph": "graph",
37
+ "filtered": "search",
38
+ "live-events": "events",
39
+ }
40
+
41
+ def get_category(filename):
42
+ """Extract category from filename."""
43
+ for pattern, category in CATEGORY_MAP.items():
44
+ if pattern in filename.lower():
45
+ return category
46
+ return "general"
47
+
48
+ def get_title(filename):
49
+ """Generate human-readable title from filename."""
50
+ # Remove extensions and convert dashes/underscores to spaces
51
+ name = Path(filename).stem
52
+ # Remove 'dashboard-' prefix if present
53
+ if name.startswith("dashboard-"):
54
+ name = name[9:]
55
+ # Remove '-dark' suffix
56
+ name = name.replace("-dark", "")
57
+ # Convert to title case
58
+ return " ".join(word.capitalize() for word in name.split("-"))
59
+
60
+ def get_description(filename, category):
61
+ """Generate description based on filename and category."""
62
+ descriptions = {
63
+ "overview": "Main dashboard with memory statistics and knowledge graph overview",
64
+ "timeline": "Chronological timeline of all stored memories and events",
65
+ "agents": "Agent connections and activity tracking",
66
+ "patterns": "Learned coding patterns and user preferences",
67
+ "clusters": "Knowledge graph clusters and relationships",
68
+ "memories": "Detailed memory list with search and filtering",
69
+ "graph": "Interactive knowledge graph visualization",
70
+ "search": "Advanced memory search and filtering interface",
71
+ "events": "Real-time live event stream from memory operations",
72
+ }
73
+ is_dark = "-dark" in filename.lower()
74
+ base_desc = descriptions.get(category, "Dashboard interface")
75
+ if is_dark:
76
+ base_desc += " (dark mode)"
77
+ return base_desc
78
+
79
+ def resize_and_crop(image, target_size):
80
+ """
81
+ Resize image to target size while maintaining aspect ratio.
82
+ Crops if necessary to achieve exact dimensions.
83
+ """
84
+ img_ratio = image.width / image.height
85
+ target_ratio = target_size[0] / target_size[1]
86
+
87
+ if img_ratio > target_ratio:
88
+ # Image is wider, crop width
89
+ new_height = target_size[1]
90
+ new_width = int(new_height * img_ratio)
91
+ resized = image.resize((new_width, new_height), Image.Resampling.LANCZOS)
92
+ left = (resized.width - target_size[0]) // 2
93
+ return resized.crop((left, 0, left + target_size[0], target_size[1]))
94
+ else:
95
+ # Image is taller, crop height
96
+ new_width = target_size[0]
97
+ new_height = int(new_width / img_ratio)
98
+ resized = image.resize((new_width, new_height), Image.Resampling.LANCZOS)
99
+ top = (resized.height - target_size[1]) // 2
100
+ return resized.crop((0, top, target_size[0], top + target_size[1]))
101
+
102
+ def apply_sharpening(image):
103
+ """Apply subtle sharpening to enhance detail."""
104
+ # Use UNSHARP_MASK equivalent with subtle settings
105
+ return image.filter(ImageFilter.UnsharpMask(radius=1, percent=100, threshold=3))
106
+
107
+ def generate_thumbnail(source_path, dest_dir, metadata):
108
+ """Generate PNG and WebP thumbnails for a single source image."""
109
+ try:
110
+ # Open image
111
+ with Image.open(source_path) as img:
112
+ # Convert RGBA to RGB if necessary (for PNG/WebP)
113
+ if img.mode == "RGBA":
114
+ background = Image.new("RGB", img.size, (255, 255, 255))
115
+ background.paste(img, mask=img.split()[3])
116
+ img = background
117
+ elif img.mode != "RGB":
118
+ img = img.convert("RGB")
119
+
120
+ # Resize and crop
121
+ thumbnail = resize_and_crop(img, THUMBNAIL_SIZE)
122
+
123
+ # Apply sharpening
124
+ thumbnail = apply_sharpening(thumbnail)
125
+
126
+ filename = source_path.stem
127
+
128
+ # Save PNG version
129
+ png_path = dest_dir / f"{filename}-thumb.png"
130
+ thumbnail.save(png_path, "PNG", quality=QUALITY_PNG, optimize=True)
131
+ png_size = png_path.stat().st_size
132
+
133
+ # Save WebP version
134
+ webp_path = dest_dir / f"{filename}-thumb.webp"
135
+ thumbnail.save(webp_path, "WEBP", quality=QUALITY_WEBP, method=6)
136
+ webp_size = webp_path.stat().st_size
137
+
138
+ # Check file sizes
139
+ if png_size > MAX_FILESIZE:
140
+ print(f"⚠️ PNG {filename}: {png_size/1024:.1f}KB (exceeds limit)")
141
+ if webp_size > MAX_FILESIZE:
142
+ print(f"⚠️ WebP {filename}: {webp_size/1024:.1f}KB (exceeds limit)")
143
+
144
+ print(f"✓ {filename}")
145
+ print(f" PNG: {png_size/1024:.1f}KB | WebP: {webp_size/1024:.1f}KB")
146
+
147
+ # Store metadata
148
+ category = get_category(filename)
149
+ metadata[filename] = {
150
+ "title": get_title(filename),
151
+ "description": get_description(source_path.name, category),
152
+ "category": category,
153
+ "full_image": f"../screenshots/dashboard/{source_path.name}",
154
+ "thumbnail_png": f"{filename}-thumb.png",
155
+ "thumbnail_webp": f"{filename}-thumb.webp",
156
+ "created": datetime.now().isoformat(),
157
+ "original_size": f"{img.width}×{img.height}",
158
+ "thumbnail_size": f"{THUMBNAIL_SIZE[0]}×{THUMBNAIL_SIZE[1]}",
159
+ "png_size_kb": round(png_size / 1024, 2),
160
+ "webp_size_kb": round(webp_size / 1024, 2),
161
+ }
162
+ return True
163
+ except Exception as e:
164
+ print(f"✗ {source_path.name}: {str(e)}")
165
+ return False
166
+
167
+ def main():
168
+ """Generate all thumbnails."""
169
+ # Ensure thumbnail directory exists
170
+ THUMBNAIL_DIR.mkdir(parents=True, exist_ok=True)
171
+
172
+ # Verify screenshot directory exists
173
+ if not SCREENSHOT_DIR.exists():
174
+ print(f"Error: Screenshot directory not found: {SCREENSHOT_DIR}")
175
+ return 1
176
+
177
+ # Find all images in screenshot directory
178
+ image_extensions = {".png", ".jpg", ".jpeg", ".webp"}
179
+ sources = sorted([
180
+ f for f in SCREENSHOT_DIR.glob("**/*")
181
+ if f.is_file() and f.suffix.lower() in image_extensions and not f.name.startswith(".")
182
+ ])
183
+
184
+ if not sources:
185
+ print(f"No images found in {SCREENSHOT_DIR}")
186
+ return 1
187
+
188
+ print(f"Found {len(sources)} images in {SCREENSHOT_DIR}")
189
+ print(f"Generating thumbnails to {THUMBNAIL_DIR}\n")
190
+
191
+ metadata = {}
192
+ successful = 0
193
+ failed = 0
194
+
195
+ for source in sources:
196
+ if generate_thumbnail(source, THUMBNAIL_DIR, metadata):
197
+ successful += 1
198
+ else:
199
+ failed += 1
200
+
201
+ # Save metadata index
202
+ index_path = THUMBNAIL_DIR / "index.json"
203
+ with open(index_path, "w") as f:
204
+ json.dump(metadata, f, indent=2, sort_keys=True)
205
+ print(f"\n✓ Saved metadata index to {index_path}")
206
+
207
+ # Print summary
208
+ print(f"\n{'='*60}")
209
+ print(f"Summary:")
210
+ print(f" Total processed: {len(sources)}")
211
+ print(f" Successful: {successful}")
212
+ print(f" Failed: {failed}")
213
+ print(f" PNG thumbnails: {len(list(THUMBNAIL_DIR.glob('*-thumb.png')))}")
214
+ print(f" WebP thumbnails: {len(list(THUMBNAIL_DIR.glob('*-thumb.webp')))}")
215
+ print(f" Total size: {sum(f.stat().st_size for f in THUMBNAIL_DIR.glob('*-thumb.*')) / 1024:.1f}KB")
216
+
217
+ return 0 if failed == 0 else 1
218
+
219
+ if __name__ == "__main__":
220
+ exit(main())
@@ -31,7 +31,7 @@ Protocols:
31
31
  cli — Command-line interface (slm command, bin/ scripts)
32
32
  rest — REST API (api_server.py)
33
33
  python — Direct Python import
34
- a2a — Agent-to-Agent Protocol (v2.6+)
34
+ a2a — Agent-to-Agent Protocol (v2.7+)
35
35
  """
36
36
 
37
37
  import json
@@ -323,6 +323,39 @@ class AgentRegistry:
323
323
  logger.error("Failed to list agents: %s", e)
324
324
  return []
325
325
 
326
+ def list_active_agents(self, timeout_minutes: int = 5) -> List[dict]:
327
+ """
328
+ List only active agents (seen within timeout_minutes).
329
+
330
+ Used by dashboard to filter out ghost/disconnected agents.
331
+ Default: agents seen within last 5 minutes are considered active.
332
+
333
+ Args:
334
+ timeout_minutes: Consider agents active if seen within this many minutes
335
+
336
+ Returns:
337
+ List of active agent dicts
338
+ """
339
+ try:
340
+ from db_connection_manager import DbConnectionManager
341
+ mgr = DbConnectionManager.get_instance(self.db_path)
342
+
343
+ with mgr.read_connection() as conn:
344
+ cursor = conn.cursor()
345
+ cursor.execute("""
346
+ SELECT agent_id, agent_name, protocol, first_seen, last_seen,
347
+ memories_written, memories_recalled, trust_score, metadata
348
+ FROM agent_registry
349
+ WHERE last_seen >= datetime('now', '-' || ? || ' minutes')
350
+ ORDER BY last_seen DESC
351
+ """, (timeout_minutes,))
352
+ rows = cursor.fetchall()
353
+
354
+ return [self._row_to_dict(row) for row in rows]
355
+ except Exception as e:
356
+ logger.error("Failed to list active agents: %s", e)
357
+ return []
358
+
326
359
  def get_stats(self) -> dict:
327
360
  """Get agent registry statistics."""
328
361
  try:
@@ -0,0 +1,63 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ SuperLocalMemory V2 - Optional API Key Authentication
4
+ Copyright (c) 2026 Varun Pratap Bhardwaj
5
+ Licensed under MIT License
6
+
7
+ Opt-in API key authentication for dashboard and API endpoints.
8
+ When ~/.claude-memory/api_key file exists, write endpoints require
9
+ X-SLM-API-Key header. Read endpoints remain open for backward compatibility.
10
+ """
11
+
12
+ import os
13
+ import hashlib
14
+ import logging
15
+ from pathlib import Path
16
+ from typing import Optional
17
+
18
+ logger = logging.getLogger(__name__)
19
+
20
+ MEMORY_DIR = Path.home() / ".claude-memory"
21
+ API_KEY_FILE = MEMORY_DIR / "api_key"
22
+
23
+
24
+ def _load_api_key_hash() -> Optional[str]:
25
+ """Load API key hash from file. Returns None if auth is not configured."""
26
+ if not API_KEY_FILE.exists():
27
+ return None
28
+ try:
29
+ key = API_KEY_FILE.read_text().strip()
30
+ if not key:
31
+ return None
32
+ return hashlib.sha256(key.encode()).hexdigest()
33
+ except Exception as e:
34
+ logger.warning("Failed to load API key: %s", e)
35
+ return None
36
+
37
+
38
+ def check_api_key(request_headers: dict, is_write: bool = False) -> bool:
39
+ """
40
+ Check if request is authorized.
41
+
42
+ Returns True if:
43
+ - No API key file exists (auth not configured — backward compatible)
44
+ - Request is a read operation (reads always allowed)
45
+ - Request has valid X-SLM-API-Key header matching the key file
46
+ """
47
+ key_hash = _load_api_key_hash()
48
+
49
+ # No key file = auth not configured = allow all (backward compat)
50
+ if key_hash is None:
51
+ return True
52
+
53
+ # Read operations always allowed
54
+ if not is_write:
55
+ return True
56
+
57
+ # Write operations require valid key
58
+ provided_key = request_headers.get("x-slm-api-key", "")
59
+ if not provided_key:
60
+ return False
61
+
62
+ provided_hash = hashlib.sha256(provided_key.encode()).hexdigest()
63
+ return provided_hash == key_hash
@@ -178,7 +178,7 @@ class CacheManager:
178
178
 
179
179
  # For other types, try JSON serialization size
180
180
  return len(json.dumps(value, default=str))
181
- except:
181
+ except Exception:
182
182
  # Fallback: assume moderate size
183
183
  return 1000
184
184
 
@@ -73,6 +73,7 @@ logger = logging.getLogger("superlocalmemory.db")
73
73
  DEFAULT_BUSY_TIMEOUT_MS = 5000
74
74
  DEFAULT_READ_POOL_SIZE = 4
75
75
  WRITE_QUEUE_SENTINEL = None # Signals the writer thread to stop
76
+ MAX_READ_CONNECTIONS = 50 # Maximum concurrent read connections
76
77
 
77
78
 
78
79
  class DbConnectionManager:
@@ -155,7 +156,7 @@ class DbConnectionManager:
155
156
  self._read_connections_lock = threading.Lock()
156
157
 
157
158
  # Write queue and dedicated writer thread
158
- self._write_queue: Queue = Queue()
159
+ self._write_queue: Queue = Queue(maxsize=1000)
159
160
  self._writer_thread = threading.Thread(
160
161
  target=self._writer_loop,
161
162
  name="slm-db-writer",
@@ -190,6 +191,8 @@ class DbConnectionManager:
190
191
  conn.execute(f"PRAGMA busy_timeout={DEFAULT_BUSY_TIMEOUT_MS}")
191
192
  # Sync mode NORMAL is safe with WAL and faster than FULL
192
193
  conn.execute("PRAGMA synchronous=NORMAL")
194
+ # Incremental auto-vacuum reclaims space without full rebuild (v2.6)
195
+ conn.execute("PRAGMA auto_vacuum=INCREMENTAL")
193
196
  conn.close()
194
197
  except Exception:
195
198
  conn.close()
@@ -252,7 +255,18 @@ class DbConnectionManager:
252
255
  self._remove_from_pool(conn)
253
256
  conn = None
254
257
 
255
- # Create new read connection for this thread
258
+ # Create new read connection for this thread (with pool limit)
259
+ with self._read_connections_lock:
260
+ if len(self._read_connections) >= MAX_READ_CONNECTIONS:
261
+ logger.warning(
262
+ "Read connection pool at capacity (%d). Reusing oldest connection.",
263
+ MAX_READ_CONNECTIONS
264
+ )
265
+ # Reuse the least recently used connection
266
+ conn = self._read_connections[0]
267
+ self._local.read_conn = conn
268
+ return conn
269
+
256
270
  conn = self._create_connection(readonly=True)
257
271
  self._local.read_conn = conn
258
272
 
package/src/event_bus.py CHANGED
@@ -131,6 +131,10 @@ class EventBus:
131
131
  self._listeners: List[Callable[[dict], None]] = []
132
132
  self._listeners_lock = threading.Lock()
133
133
 
134
+ # Auto-prune tracking: lightweight heuristic trigger
135
+ self._write_count = 0
136
+ self._last_prune = datetime.now()
137
+
134
138
  # Initialize schema
135
139
  self._init_schema()
136
140
 
@@ -266,6 +270,17 @@ class EventBus:
266
270
  self._notify_listeners(event)
267
271
 
268
272
  logger.debug("Event emitted: type=%s, id=%s, memory_id=%s", event_type, event_id, memory_id)
273
+
274
+ # Auto-prune every 100 events or every 24 hours, whichever comes first
275
+ self._write_count += 1
276
+ if self._write_count >= 100 or (datetime.now() - self._last_prune).total_seconds() > 86400:
277
+ try:
278
+ self.prune_events()
279
+ self._write_count = 0
280
+ self._last_prune = datetime.now()
281
+ except Exception:
282
+ pass # Don't let prune failures block event emission
283
+
269
284
  return event_id
270
285
 
271
286
  def _persist_event(self, event: dict) -> Optional[int]: