superlocalmemory 2.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (100) hide show
  1. package/ATTRIBUTION.md +140 -0
  2. package/CHANGELOG.md +1749 -0
  3. package/LICENSE +21 -0
  4. package/README.md +600 -0
  5. package/bin/aider-smart +72 -0
  6. package/bin/slm +202 -0
  7. package/bin/slm-npm +73 -0
  8. package/bin/slm.bat +195 -0
  9. package/bin/slm.cmd +10 -0
  10. package/bin/superlocalmemoryv2:list +3 -0
  11. package/bin/superlocalmemoryv2:profile +3 -0
  12. package/bin/superlocalmemoryv2:recall +3 -0
  13. package/bin/superlocalmemoryv2:remember +3 -0
  14. package/bin/superlocalmemoryv2:reset +3 -0
  15. package/bin/superlocalmemoryv2:status +3 -0
  16. package/completions/slm.bash +58 -0
  17. package/completions/slm.zsh +76 -0
  18. package/configs/antigravity-mcp.json +13 -0
  19. package/configs/chatgpt-desktop-mcp.json +7 -0
  20. package/configs/claude-desktop-mcp.json +15 -0
  21. package/configs/codex-mcp.toml +13 -0
  22. package/configs/cody-commands.json +29 -0
  23. package/configs/continue-mcp.yaml +14 -0
  24. package/configs/continue-skills.yaml +26 -0
  25. package/configs/cursor-mcp.json +15 -0
  26. package/configs/gemini-cli-mcp.json +11 -0
  27. package/configs/jetbrains-mcp.json +11 -0
  28. package/configs/opencode-mcp.json +12 -0
  29. package/configs/perplexity-mcp.json +9 -0
  30. package/configs/vscode-copilot-mcp.json +12 -0
  31. package/configs/windsurf-mcp.json +16 -0
  32. package/configs/zed-mcp.json +12 -0
  33. package/docs/ARCHITECTURE.md +877 -0
  34. package/docs/CLI-COMMANDS-REFERENCE.md +425 -0
  35. package/docs/COMPETITIVE-ANALYSIS.md +210 -0
  36. package/docs/COMPRESSION-README.md +390 -0
  37. package/docs/GRAPH-ENGINE.md +503 -0
  38. package/docs/MCP-MANUAL-SETUP.md +720 -0
  39. package/docs/MCP-TROUBLESHOOTING.md +787 -0
  40. package/docs/PATTERN-LEARNING.md +363 -0
  41. package/docs/PROFILES-GUIDE.md +453 -0
  42. package/docs/RESET-GUIDE.md +353 -0
  43. package/docs/SEARCH-ENGINE-V2.2.0.md +748 -0
  44. package/docs/SEARCH-INTEGRATION-GUIDE.md +502 -0
  45. package/docs/UI-SERVER.md +254 -0
  46. package/docs/UNIVERSAL-INTEGRATION.md +432 -0
  47. package/docs/V2.2.0-OPTIONAL-SEARCH.md +666 -0
  48. package/docs/WINDOWS-INSTALL-README.txt +34 -0
  49. package/docs/WINDOWS-POST-INSTALL.txt +45 -0
  50. package/docs/example_graph_usage.py +148 -0
  51. package/hooks/memory-list-skill.js +130 -0
  52. package/hooks/memory-profile-skill.js +284 -0
  53. package/hooks/memory-recall-skill.js +109 -0
  54. package/hooks/memory-remember-skill.js +127 -0
  55. package/hooks/memory-reset-skill.js +274 -0
  56. package/install-skills.sh +436 -0
  57. package/install.ps1 +417 -0
  58. package/install.sh +755 -0
  59. package/mcp_server.py +585 -0
  60. package/package.json +94 -0
  61. package/requirements-core.txt +24 -0
  62. package/requirements.txt +10 -0
  63. package/scripts/postinstall.js +126 -0
  64. package/scripts/preuninstall.js +57 -0
  65. package/skills/slm-build-graph/SKILL.md +423 -0
  66. package/skills/slm-list-recent/SKILL.md +348 -0
  67. package/skills/slm-recall/SKILL.md +325 -0
  68. package/skills/slm-remember/SKILL.md +194 -0
  69. package/skills/slm-status/SKILL.md +363 -0
  70. package/skills/slm-switch-profile/SKILL.md +442 -0
  71. package/src/__pycache__/cache_manager.cpython-312.pyc +0 -0
  72. package/src/__pycache__/embedding_engine.cpython-312.pyc +0 -0
  73. package/src/__pycache__/graph_engine.cpython-312.pyc +0 -0
  74. package/src/__pycache__/hnsw_index.cpython-312.pyc +0 -0
  75. package/src/__pycache__/hybrid_search.cpython-312.pyc +0 -0
  76. package/src/__pycache__/memory-profiles.cpython-312.pyc +0 -0
  77. package/src/__pycache__/memory-reset.cpython-312.pyc +0 -0
  78. package/src/__pycache__/memory_compression.cpython-312.pyc +0 -0
  79. package/src/__pycache__/memory_store_v2.cpython-312.pyc +0 -0
  80. package/src/__pycache__/migrate_v1_to_v2.cpython-312.pyc +0 -0
  81. package/src/__pycache__/pattern_learner.cpython-312.pyc +0 -0
  82. package/src/__pycache__/query_optimizer.cpython-312.pyc +0 -0
  83. package/src/__pycache__/search_engine_v2.cpython-312.pyc +0 -0
  84. package/src/__pycache__/setup_validator.cpython-312.pyc +0 -0
  85. package/src/__pycache__/tree_manager.cpython-312.pyc +0 -0
  86. package/src/cache_manager.py +520 -0
  87. package/src/embedding_engine.py +671 -0
  88. package/src/graph_engine.py +970 -0
  89. package/src/hnsw_index.py +626 -0
  90. package/src/hybrid_search.py +693 -0
  91. package/src/memory-profiles.py +518 -0
  92. package/src/memory-reset.py +485 -0
  93. package/src/memory_compression.py +999 -0
  94. package/src/memory_store_v2.py +1088 -0
  95. package/src/migrate_v1_to_v2.py +638 -0
  96. package/src/pattern_learner.py +898 -0
  97. package/src/query_optimizer.py +513 -0
  98. package/src/search_engine_v2.py +403 -0
  99. package/src/setup_validator.py +479 -0
  100. package/src/tree_manager.py +720 -0
@@ -0,0 +1,520 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ SuperLocalMemory V2 - Cache Manager
4
+
5
+ Copyright (c) 2026 Varun Pratap Bhardwaj
6
+ Solution Architect & Original Creator
7
+
8
+ Licensed under MIT License (see LICENSE file)
9
+ Repository: https://github.com/varun369/SuperLocalMemoryV2
10
+
11
+ ATTRIBUTION REQUIRED: This notice must be preserved in all copies.
12
+ """
13
+
14
+ """
15
+ Cache Manager - LRU Cache for Search Results
16
+
17
+ Implements Least Recently Used (LRU) cache for search query results to reduce
18
+ redundant computation and improve response times.
19
+
20
+ Key Features:
21
+ 1. LRU Eviction Policy: Automatically removes least recently used entries
22
+ 2. TTL Support: Optional time-to-live for cache entries
23
+ 3. Size-Based Eviction: Maximum cache size in number of entries
24
+ 4. Memory-Efficient: Uses OrderedDict for O(1) access and updates
25
+ 5. Thread-Safe: Optional thread safety for concurrent access
26
+
27
+ Performance Impact:
28
+ - Cache hit: ~0.1ms (negligible overhead)
29
+ - Cache miss: Standard search time
30
+ - Target cache hit rate: 30-50% for typical usage
31
+
32
+ Usage:
33
+ cache = CacheManager(max_size=100, ttl_seconds=300)
34
+
35
+ # Try cache first
36
+ result = cache.get("python web")
37
+ if result is None:
38
+ # Cache miss - perform search
39
+ result = search_engine.search("python web")
40
+ cache.put("python web", result)
41
+ """
42
+
43
+ import time
44
+ import hashlib
45
+ import json
46
+ from collections import OrderedDict
47
+ from typing import Any, Optional, Dict, Tuple
48
+ from threading import RLock
49
+
50
+
51
+ class CacheEntry:
52
+ """
53
+ Single cache entry with metadata.
54
+
55
+ Stores:
56
+ - value: Cached result
57
+ - timestamp: Creation time for TTL validation
58
+ - access_count: Number of times accessed (for analytics)
59
+ - size_estimate: Memory size estimate in bytes
60
+ """
61
+
62
+ __slots__ = ['value', 'timestamp', 'access_count', 'size_estimate']
63
+
64
+ def __init__(self, value: Any, size_estimate: int = 0):
65
+ """
66
+ Create cache entry.
67
+
68
+ Args:
69
+ value: Value to cache
70
+ size_estimate: Estimated size in bytes
71
+ """
72
+ self.value = value
73
+ self.timestamp = time.time()
74
+ self.access_count = 0
75
+ self.size_estimate = size_estimate
76
+
77
+ def is_expired(self, ttl_seconds: Optional[float]) -> bool:
78
+ """
79
+ Check if entry has exceeded TTL.
80
+
81
+ Args:
82
+ ttl_seconds: Time-to-live in seconds (None = no expiry)
83
+
84
+ Returns:
85
+ True if expired, False otherwise
86
+ """
87
+ if ttl_seconds is None:
88
+ return False
89
+
90
+ age = time.time() - self.timestamp
91
+ return age > ttl_seconds
92
+
93
+ def mark_accessed(self):
94
+ """Mark entry as accessed (increment counter)."""
95
+ self.access_count += 1
96
+
97
+
98
+ class CacheManager:
99
+ """
100
+ LRU cache manager for search results with TTL support.
101
+
102
+ Uses OrderedDict to maintain insertion/access order efficiently.
103
+ When cache is full, least recently used entry is evicted.
104
+
105
+ Thread-safe when thread_safe=True.
106
+ """
107
+
108
+ def __init__(
109
+ self,
110
+ max_size: int = 100,
111
+ ttl_seconds: Optional[float] = 300,
112
+ thread_safe: bool = False
113
+ ):
114
+ """
115
+ Initialize cache manager.
116
+
117
+ Args:
118
+ max_size: Maximum number of cache entries
119
+ ttl_seconds: Time-to-live for entries (None = no expiry)
120
+ thread_safe: Enable thread-safe operations
121
+ """
122
+ self.max_size = max_size
123
+ self.ttl_seconds = ttl_seconds
124
+ self.thread_safe = thread_safe
125
+
126
+ # LRU cache storage
127
+ self._cache: OrderedDict[str, CacheEntry] = OrderedDict()
128
+
129
+ # Thread safety lock
130
+ self._lock = RLock() if thread_safe else None
131
+
132
+ # Statistics
133
+ self._hits = 0
134
+ self._misses = 0
135
+ self._evictions = 0
136
+ self._total_size_estimate = 0
137
+
138
+ def _hash_key(self, query: str, **kwargs) -> str:
139
+ """
140
+ Generate cache key from query and parameters.
141
+
142
+ Args:
143
+ query: Search query
144
+ **kwargs: Additional parameters to include in key
145
+
146
+ Returns:
147
+ Hash string for cache key
148
+ """
149
+ # Create deterministic key from query + parameters
150
+ key_data = {
151
+ 'query': query,
152
+ **kwargs
153
+ }
154
+
155
+ # Sort keys for deterministic hashing
156
+ key_str = json.dumps(key_data, sort_keys=True)
157
+
158
+ # Hash for compact key
159
+ return hashlib.sha256(key_str.encode()).hexdigest()[:16]
160
+
161
+ def _estimate_size(self, value: Any) -> int:
162
+ """
163
+ Estimate memory size of cached value.
164
+
165
+ Rough estimate for monitoring memory usage.
166
+
167
+ Args:
168
+ value: Value to estimate
169
+
170
+ Returns:
171
+ Estimated size in bytes
172
+ """
173
+ try:
174
+ # For lists of tuples (typical search results)
175
+ if isinstance(value, list):
176
+ # Rough estimate: 100 bytes per result
177
+ return len(value) * 100
178
+
179
+ # For other types, try JSON serialization size
180
+ return len(json.dumps(value, default=str))
181
+ except:
182
+ # Fallback: assume moderate size
183
+ return 1000
184
+
185
+ def get(
186
+ self,
187
+ query: str,
188
+ **kwargs
189
+ ) -> Optional[Any]:
190
+ """
191
+ Get cached result for query.
192
+
193
+ Args:
194
+ query: Search query
195
+ **kwargs: Additional parameters used in cache key
196
+
197
+ Returns:
198
+ Cached result if found and valid, None otherwise
199
+ """
200
+ key = self._hash_key(query, **kwargs)
201
+
202
+ # Thread-safe access
203
+ if self._lock:
204
+ self._lock.acquire()
205
+
206
+ try:
207
+ # Check if key exists
208
+ if key not in self._cache:
209
+ self._misses += 1
210
+ return None
211
+
212
+ entry = self._cache[key]
213
+
214
+ # Check TTL expiry
215
+ if entry.is_expired(self.ttl_seconds):
216
+ # Remove expired entry
217
+ del self._cache[key]
218
+ self._total_size_estimate -= entry.size_estimate
219
+ self._misses += 1
220
+ return None
221
+
222
+ # Move to end (mark as recently used)
223
+ self._cache.move_to_end(key)
224
+ entry.mark_accessed()
225
+
226
+ self._hits += 1
227
+ return entry.value
228
+
229
+ finally:
230
+ if self._lock:
231
+ self._lock.release()
232
+
233
+ def put(
234
+ self,
235
+ query: str,
236
+ value: Any,
237
+ **kwargs
238
+ ) -> None:
239
+ """
240
+ Store result in cache.
241
+
242
+ Args:
243
+ query: Search query
244
+ value: Result to cache
245
+ **kwargs: Additional parameters used in cache key
246
+ """
247
+ key = self._hash_key(query, **kwargs)
248
+ size_estimate = self._estimate_size(value)
249
+
250
+ # Thread-safe access
251
+ if self._lock:
252
+ self._lock.acquire()
253
+
254
+ try:
255
+ # Check if key already exists (update)
256
+ if key in self._cache:
257
+ old_entry = self._cache[key]
258
+ self._total_size_estimate -= old_entry.size_estimate
259
+ del self._cache[key]
260
+
261
+ # Check if cache is full
262
+ if len(self._cache) >= self.max_size:
263
+ # Evict least recently used (first item)
264
+ evicted_key, evicted_entry = self._cache.popitem(last=False)
265
+ self._total_size_estimate -= evicted_entry.size_estimate
266
+ self._evictions += 1
267
+
268
+ # Add new entry (at end = most recently used)
269
+ entry = CacheEntry(value, size_estimate)
270
+ self._cache[key] = entry
271
+ self._total_size_estimate += size_estimate
272
+
273
+ finally:
274
+ if self._lock:
275
+ self._lock.release()
276
+
277
+ def invalidate(self, query: str, **kwargs) -> bool:
278
+ """
279
+ Remove specific entry from cache.
280
+
281
+ Args:
282
+ query: Search query
283
+ **kwargs: Additional parameters
284
+
285
+ Returns:
286
+ True if entry was removed, False if not found
287
+ """
288
+ key = self._hash_key(query, **kwargs)
289
+
290
+ if self._lock:
291
+ self._lock.acquire()
292
+
293
+ try:
294
+ if key in self._cache:
295
+ entry = self._cache[key]
296
+ del self._cache[key]
297
+ self._total_size_estimate -= entry.size_estimate
298
+ return True
299
+ return False
300
+
301
+ finally:
302
+ if self._lock:
303
+ self._lock.release()
304
+
305
+ def clear(self) -> None:
306
+ """Clear entire cache."""
307
+ if self._lock:
308
+ self._lock.acquire()
309
+
310
+ try:
311
+ self._cache.clear()
312
+ self._total_size_estimate = 0
313
+
314
+ finally:
315
+ if self._lock:
316
+ self._lock.release()
317
+
318
+ def evict_expired(self) -> int:
319
+ """
320
+ Manually evict all expired entries.
321
+
322
+ Returns:
323
+ Number of entries evicted
324
+ """
325
+ if self.ttl_seconds is None:
326
+ return 0
327
+
328
+ if self._lock:
329
+ self._lock.acquire()
330
+
331
+ try:
332
+ expired_keys = [
333
+ key for key, entry in self._cache.items()
334
+ if entry.is_expired(self.ttl_seconds)
335
+ ]
336
+
337
+ for key in expired_keys:
338
+ entry = self._cache[key]
339
+ del self._cache[key]
340
+ self._total_size_estimate -= entry.size_estimate
341
+
342
+ return len(expired_keys)
343
+
344
+ finally:
345
+ if self._lock:
346
+ self._lock.release()
347
+
348
+ def get_stats(self) -> Dict[str, Any]:
349
+ """
350
+ Get cache statistics.
351
+
352
+ Returns:
353
+ Dictionary with cache statistics
354
+ """
355
+ total_requests = self._hits + self._misses
356
+ hit_rate = self._hits / total_requests if total_requests > 0 else 0.0
357
+
358
+ # Average access count
359
+ avg_access_count = 0.0
360
+ if self._cache:
361
+ avg_access_count = sum(
362
+ entry.access_count for entry in self._cache.values()
363
+ ) / len(self._cache)
364
+
365
+ return {
366
+ 'max_size': self.max_size,
367
+ 'current_size': len(self._cache),
368
+ 'ttl_seconds': self.ttl_seconds,
369
+ 'hits': self._hits,
370
+ 'misses': self._misses,
371
+ 'hit_rate': hit_rate,
372
+ 'evictions': self._evictions,
373
+ 'total_size_estimate_kb': self._total_size_estimate / 1024,
374
+ 'avg_access_count': avg_access_count,
375
+ 'thread_safe': self.thread_safe
376
+ }
377
+
378
+ def get_top_queries(self, limit: int = 10) -> list:
379
+ """
380
+ Get most frequently accessed queries.
381
+
382
+ Args:
383
+ limit: Maximum number of queries to return
384
+
385
+ Returns:
386
+ List of (query_hash, access_count) tuples
387
+ """
388
+ if self._lock:
389
+ self._lock.acquire()
390
+
391
+ try:
392
+ queries = [
393
+ (key, entry.access_count)
394
+ for key, entry in self._cache.items()
395
+ ]
396
+
397
+ queries.sort(key=lambda x: x[1], reverse=True)
398
+ return queries[:limit]
399
+
400
+ finally:
401
+ if self._lock:
402
+ self._lock.release()
403
+
404
+
405
+ # CLI interface for testing
406
+ if __name__ == "__main__":
407
+ import random
408
+
409
+ print("Cache Manager - Demo")
410
+ print("=" * 60)
411
+
412
+ # Initialize cache
413
+ cache = CacheManager(max_size=5, ttl_seconds=10)
414
+
415
+ print("\nCache Configuration:")
416
+ stats = cache.get_stats()
417
+ print(f" Max Size: {stats['max_size']}")
418
+ print(f" TTL: {stats['ttl_seconds']}s")
419
+
420
+ # Simulate search queries
421
+ queries = [
422
+ "python programming",
423
+ "javascript web",
424
+ "machine learning",
425
+ "database sql",
426
+ "api rest"
427
+ ]
428
+
429
+ # Mock search results
430
+ def mock_search(query: str):
431
+ """Simulate search result."""
432
+ return [
433
+ (f"doc_{i}", random.random())
434
+ for i in range(3)
435
+ ]
436
+
437
+ print("\n" + "=" * 60)
438
+ print("Simulating Search Operations:")
439
+ print("=" * 60)
440
+
441
+ # First pass - all cache misses
442
+ print("\nPass 1 (Cold Cache):")
443
+ for query in queries:
444
+ result = cache.get(query)
445
+ if result is None:
446
+ print(f" MISS: '{query}' - performing search")
447
+ result = mock_search(query)
448
+ cache.put(query, result)
449
+ else:
450
+ print(f" HIT: '{query}'")
451
+
452
+ # Second pass - all cache hits
453
+ print("\nPass 2 (Warm Cache):")
454
+ for query in queries:
455
+ result = cache.get(query)
456
+ if result is None:
457
+ print(f" MISS: '{query}' - performing search")
458
+ result = mock_search(query)
459
+ cache.put(query, result)
460
+ else:
461
+ print(f" HIT: '{query}'")
462
+
463
+ # Third pass - add more queries to trigger eviction
464
+ print("\nPass 3 (Cache Overflow - LRU Eviction):")
465
+ extra_queries = [
466
+ "neural networks",
467
+ "cloud computing",
468
+ "devops kubernetes"
469
+ ]
470
+
471
+ for query in extra_queries:
472
+ result = cache.get(query)
473
+ if result is None:
474
+ print(f" MISS: '{query}' - performing search")
475
+ result = mock_search(query)
476
+ cache.put(query, result)
477
+
478
+ # Check if old queries were evicted
479
+ print("\nPass 4 (Check Evictions):")
480
+ for query in queries[:3]:
481
+ result = cache.get(query)
482
+ if result is None:
483
+ print(f" EVICTED: '{query}'")
484
+ else:
485
+ print(f" RETAINED: '{query}'")
486
+
487
+ # Display statistics
488
+ print("\n" + "=" * 60)
489
+ print("Cache Statistics:")
490
+ print("=" * 60)
491
+
492
+ stats = cache.get_stats()
493
+ for key, value in stats.items():
494
+ if isinstance(value, float):
495
+ print(f" {key}: {value:.2f}")
496
+ else:
497
+ print(f" {key}: {value}")
498
+
499
+ # Test TTL expiry
500
+ print("\n" + "=" * 60)
501
+ print("Testing TTL Expiry:")
502
+ print("=" * 60)
503
+
504
+ cache_ttl = CacheManager(max_size=10, ttl_seconds=2)
505
+ cache_ttl.put("test query", mock_search("test"))
506
+
507
+ print("\n Immediately after cache:")
508
+ result = cache_ttl.get("test query")
509
+ print(f" Result: {'HIT' if result else 'MISS'}")
510
+
511
+ print("\n After 3 seconds (exceeds TTL):")
512
+ time.sleep(3)
513
+ result = cache_ttl.get("test query")
514
+ print(f" Result: {'HIT' if result else 'MISS (expired)'}")
515
+
516
+ print("\n" + "=" * 60)
517
+ print("Performance Impact:")
518
+ print(" Cache hit: ~0.1ms overhead")
519
+ print(" Cache miss: Standard search time + 0.1ms")
520
+ print(" Target hit rate: 30-50% for typical usage")