spatial-memory-mcp 1.6.1__py3-none-any.whl → 1.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of spatial-memory-mcp might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  """Spatial Memory MCP Server - Vector-based semantic memory for LLMs."""
2
2
 
3
- __version__ = "1.6.1"
3
+ __version__ = "1.7.0"
4
4
  __author__ = "arman-tech"
5
5
 
6
6
  # Re-export core components for convenience
@@ -44,7 +44,7 @@ def run_migrate(args: argparse.Namespace) -> int:
44
44
  format="%(levelname)s: %(message)s",
45
45
  )
46
46
 
47
- print(f"Spatial Memory Migration Tool")
47
+ print("Spatial Memory Migration Tool")
48
48
  print(f"Target schema version: {CURRENT_SCHEMA_VERSION}")
49
49
  print(f"Database path: {settings.memory_path}")
50
50
  print()
@@ -109,7 +109,7 @@ def run_migrate(args: argparse.Namespace) -> int:
109
109
  return 1
110
110
 
111
111
  if result.migrations_applied:
112
- print(f"\nRolled back migrations:")
112
+ print("\nRolled back migrations:")
113
113
  for v in result.migrations_applied:
114
114
  print(f" - {v}")
115
115
  print(f"\nCurrent version: {result.current_version}")
@@ -204,7 +204,7 @@ def main() -> NoReturn:
204
204
  )
205
205
 
206
206
  # Server command (default)
207
- server_parser = subparsers.add_parser(
207
+ subparsers.add_parser(
208
208
  "serve",
209
209
  help="Start the MCP server (default if no command given)",
210
210
  )
@@ -606,6 +606,8 @@ class LanceDBMemoryRepository:
606
606
  tags=record.get("tags", []),
607
607
  importance=record["importance"],
608
608
  created_at=record["created_at"],
609
+ last_accessed=record.get("last_accessed"),
610
+ access_count=record.get("access_count", 0),
609
611
  metadata=record.get("metadata", {}),
610
612
  vector=vector,
611
613
  )
spatial_memory/config.py CHANGED
@@ -47,7 +47,7 @@ class Settings(BaseSettings):
47
47
  )
48
48
  embedding_backend: str = Field(
49
49
  default="auto",
50
- description="Embedding backend: 'auto' (ONNX if available, else PyTorch), 'onnx', or 'pytorch'",
50
+ description="Embedding backend: 'auto', 'onnx', or 'pytorch'",
51
51
  )
52
52
 
53
53
  # OpenAI (optional)
@@ -593,6 +593,43 @@ class Settings(BaseSettings):
593
593
  description="Maximum queue depth when backpressure is enabled",
594
594
  )
595
595
 
596
+ # =========================================================================
597
+ # v1.6.3: Auto-Decay Settings
598
+ # =========================================================================
599
+
600
+ auto_decay_enabled: bool = Field(
601
+ default=True,
602
+ description="Enable automatic decay calculation during recall operations",
603
+ )
604
+ auto_decay_persist_enabled: bool = Field(
605
+ default=True,
606
+ description="Persist decay updates to database (disable for read-only scenarios)",
607
+ )
608
+ auto_decay_persist_batch_size: int = Field(
609
+ default=100,
610
+ ge=10,
611
+ le=1000,
612
+ description="Batch size for persisting decay updates to database",
613
+ )
614
+ auto_decay_persist_flush_interval_seconds: float = Field(
615
+ default=5.0,
616
+ ge=1.0,
617
+ le=60.0,
618
+ description="Interval between background flush operations for decay updates",
619
+ )
620
+ auto_decay_min_change_threshold: float = Field(
621
+ default=0.01,
622
+ ge=0.001,
623
+ le=0.1,
624
+ description="Minimum importance change to trigger database persistence (1% default)",
625
+ )
626
+ auto_decay_max_queue_size: int = Field(
627
+ default=10000,
628
+ ge=1000,
629
+ le=100000,
630
+ description="Maximum queue size for pending decay updates (backpressure control)",
631
+ )
632
+
596
633
  model_config = {
597
634
  "env_prefix": "SPATIAL_MEMORY_",
598
635
  "env_file": ".env",
@@ -11,7 +11,6 @@ from spatial_memory.core.db_indexes import IndexManager
11
11
  from spatial_memory.core.db_search import SearchManager
12
12
  from spatial_memory.core.db_versioning import VersionManager
13
13
  from spatial_memory.core.embeddings import EmbeddingService
14
- from spatial_memory.core.rate_limiter import RateLimiter
15
14
  from spatial_memory.core.errors import (
16
15
  ClusteringError,
17
16
  ConfigurationError,
@@ -45,7 +44,7 @@ from spatial_memory.core.models import (
45
44
  VisualizationEdge,
46
45
  VisualizationNode,
47
46
  )
48
- from spatial_memory.core.utils import to_aware_utc, to_naive_utc, utc_now, utc_now_naive
47
+ from spatial_memory.core.rate_limiter import RateLimiter
49
48
  from spatial_memory.core.tracing import (
50
49
  RequestContext,
51
50
  TimingContext,
@@ -55,6 +54,7 @@ from spatial_memory.core.tracing import (
55
54
  request_context,
56
55
  set_context,
57
56
  )
57
+ from spatial_memory.core.utils import to_aware_utc, to_naive_utc, utc_now, utc_now_naive
58
58
 
59
59
  __all__ = [
60
60
  # Errors - Base
@@ -26,7 +26,6 @@ from spatial_memory.core.lifecycle_ops import (
26
26
  from spatial_memory.core.models import Memory, MemorySource
27
27
 
28
28
  if TYPE_CHECKING:
29
- import numpy as np
30
29
 
31
30
  from spatial_memory.ports.repositories import (
32
31
  EmbeddingServiceProtocol,
@@ -14,7 +14,6 @@ from __future__ import annotations
14
14
 
15
15
  import json
16
16
  import logging
17
- import math
18
17
  import threading
19
18
  import time
20
19
  import uuid
@@ -30,8 +29,8 @@ import lancedb.index
30
29
  import numpy as np
31
30
  import pyarrow as pa
32
31
  import pyarrow.parquet as pq
33
-
34
- from filelock import FileLock, Timeout as FileLockTimeout
32
+ from filelock import FileLock
33
+ from filelock import Timeout as FileLockTimeout
35
34
 
36
35
  from spatial_memory.core.connection_pool import ConnectionPool
37
36
  from spatial_memory.core.db_idempotency import IdempotencyManager, IdempotencyRecord
@@ -47,8 +46,12 @@ from spatial_memory.core.errors import (
47
46
  StorageError,
48
47
  ValidationError,
49
48
  )
50
- from spatial_memory.core.filesystem import detect_filesystem_type, get_filesystem_warning_message, is_network_filesystem
51
- from spatial_memory.core.utils import to_aware_utc, utc_now
49
+ from spatial_memory.core.filesystem import (
50
+ detect_filesystem_type,
51
+ get_filesystem_warning_message,
52
+ is_network_filesystem,
53
+ )
54
+ from spatial_memory.core.utils import utc_now
52
55
 
53
56
  # Import centralized validation functions
54
57
  from spatial_memory.core.validation import (
@@ -221,7 +224,7 @@ def with_write_lock(func: F) -> F:
221
224
  Uses RLock to allow nested calls (e.g., bulk_import -> insert_batch).
222
225
  """
223
226
  @wraps(func)
224
- def wrapper(self: "Database", *args: Any, **kwargs: Any) -> Any:
227
+ def wrapper(self: Database, *args: Any, **kwargs: Any) -> Any:
225
228
  with self._write_lock:
226
229
  return func(self, *args, **kwargs)
227
230
  return cast(F, wrapper)
@@ -235,7 +238,7 @@ def with_stale_connection_recovery(func: F) -> F:
235
238
  reconnects, and retries the operation once.
236
239
  """
237
240
  @wraps(func)
238
- def wrapper(self: "Database", *args: Any, **kwargs: Any) -> Any:
241
+ def wrapper(self: Database, *args: Any, **kwargs: Any) -> Any:
239
242
  try:
240
243
  return func(self, *args, **kwargs)
241
244
  except Exception as e:
@@ -372,7 +375,7 @@ class ProcessLockManager:
372
375
  self._set_depth(depth - 1)
373
376
  return False # Still holding
374
377
 
375
- def __enter__(self) -> "ProcessLockManager":
378
+ def __enter__(self) -> ProcessLockManager:
376
379
  """Enter context manager - acquire lock."""
377
380
  self.acquire()
378
381
  return self
@@ -397,7 +400,7 @@ def with_process_lock(func: F) -> F:
397
400
  ...
398
401
  """
399
402
  @wraps(func)
400
- def wrapper(self: "Database", *args: Any, **kwargs: Any) -> Any:
403
+ def wrapper(self: Database, *args: Any, **kwargs: Any) -> Any:
401
404
  if self._process_lock is None:
402
405
  return func(self, *args, **kwargs)
403
406
  with self._process_lock:
@@ -1705,7 +1708,7 @@ class Database:
1705
1708
  old_namespace = _validate_namespace(old_namespace)
1706
1709
  new_namespace = _validate_namespace(new_namespace)
1707
1710
  safe_old = _sanitize_string(old_namespace)
1708
- safe_new = _sanitize_string(new_namespace)
1711
+ _sanitize_string(new_namespace) # Validate but don't store unused result
1709
1712
 
1710
1713
  try:
1711
1714
  # Check if source namespace exists
@@ -1835,7 +1838,7 @@ class Database:
1835
1838
  if not memory_ids:
1836
1839
  return None
1837
1840
 
1838
- safe_namespace = _sanitize_string(target_namespace)
1841
+ _sanitize_string(target_namespace) # Validate namespace
1839
1842
  now = utc_now()
1840
1843
 
1841
1844
  # Process in batches for large rollbacks
@@ -380,7 +380,8 @@ class IndexManager:
380
380
  index_type = "IVF_FLAT"
381
381
  sample_rate = max(16, count // 4) # Lower sample rate for small data
382
382
  else:
383
- index_type = self._db.index_type if self._db.index_type in ("IVF_PQ", "IVF_FLAT") else "IVF_PQ"
383
+ valid_types = ("IVF_PQ", "IVF_FLAT")
384
+ index_type = self._db.index_type if self._db.index_type in valid_types else "IVF_PQ"
384
385
 
385
386
  # Ensure num_partitions < num_vectors for KMeans clustering
386
387
  if num_partitions >= count:
@@ -24,7 +24,6 @@ Usage:
24
24
 
25
25
  from __future__ import annotations
26
26
 
27
- import json
28
27
  import logging
29
28
  from abc import ABC, abstractmethod
30
29
  from dataclasses import dataclass, field
@@ -18,6 +18,8 @@ import numpy as np
18
18
  from spatial_memory.core.errors import StorageError, ValidationError
19
19
  from spatial_memory.core.validation import (
20
20
  sanitize_string as _sanitize_string,
21
+ )
22
+ from spatial_memory.core.validation import (
21
23
  validate_namespace as _validate_namespace,
22
24
  )
23
25
 
@@ -312,7 +314,8 @@ class SearchManager:
312
314
  ])
313
315
 
314
316
  # Execute search and get results
315
- # LanceDB returns results with _query_index to identify which query each result belongs to
317
+ # LanceDB returns results with _query_index to identify which query
318
+ # each result belongs to
316
319
  search = search.limit(limit_per_query)
317
320
  results_df = search.to_pandas()
318
321
 
@@ -265,13 +265,13 @@ class EmbeddingService:
265
265
  backend="onnx",
266
266
  )
267
267
  logger.info(
268
- f"Using ONNX Runtime backend (2-3x faster inference)"
268
+ "Using ONNX Runtime backend (2-3x faster inference)"
269
269
  )
270
270
  else:
271
271
  # Use default PyTorch backend
272
272
  self._model = SentenceTransformer(self.model_name)
273
273
  logger.info(
274
- f"Using PyTorch backend"
274
+ "Using PyTorch backend"
275
275
  )
276
276
 
277
277
  self._dimensions = self._model.get_sentence_embedding_dimension()
@@ -23,7 +23,6 @@ import re
23
23
  import stat
24
24
  import urllib.parse
25
25
  from collections.abc import Sequence
26
- from io import BufferedReader
27
26
  from pathlib import Path
28
27
  from typing import BinaryIO
29
28
 
@@ -52,6 +52,15 @@ class MemoryResult(BaseModel):
52
52
  tags: list[str] = Field(default_factory=list)
53
53
  importance: float
54
54
  created_at: datetime
55
+ last_accessed: datetime | None = Field(
56
+ default=None,
57
+ description="When the memory was last accessed (for auto-decay)",
58
+ )
59
+ access_count: int = Field(
60
+ default=0,
61
+ ge=0,
62
+ description="Number of times the memory has been accessed (for auto-decay)",
63
+ )
55
64
  metadata: dict[str, Any] = Field(default_factory=dict)
56
65
  vector: list[float] | None = Field(
57
66
  default=None,
@@ -582,6 +591,8 @@ class HybridMemoryMatch:
582
591
  vector_score: float | None = None
583
592
  fts_score: float | None = None
584
593
  combined_score: float = 0.0
594
+ last_accessed: datetime | None = None # For auto-decay
595
+ access_count: int = 0 # For auto-decay
585
596
 
586
597
 
587
598
  @dataclass
@@ -626,3 +637,23 @@ class ExportImportConfig:
626
637
  csv_include_vectors: bool = False
627
638
  max_export_records: int = 0 # 0 = unlimited
628
639
  max_import_records: int = 100_000 # Maximum records per import
640
+
641
+
642
+ @dataclass
643
+ class AutoDecayConfig:
644
+ """Configuration for automatic decay during recall operations.
645
+
646
+ Auto-decay calculates effective importance in real-time during searches,
647
+ re-ranking results based on time-decayed importance. Updates can optionally
648
+ be persisted to the database in the background.
649
+ """
650
+
651
+ enabled: bool = True
652
+ persist_enabled: bool = True
653
+ persist_batch_size: int = 100
654
+ persist_flush_interval_seconds: float = 5.0
655
+ min_change_threshold: float = 0.01 # Only persist changes > 1%
656
+ max_queue_size: int = 10000
657
+ half_life_days: float = 30.0
658
+ min_importance_floor: float = 0.1
659
+ access_weight: float = 0.3 # Weight of access count in slowing decay
@@ -13,6 +13,7 @@ from __future__ import annotations
13
13
 
14
14
  from typing import Any, TypedDict
15
15
 
16
+ from typing_extensions import NotRequired
16
17
 
17
18
  # =============================================================================
18
19
  # Nested TypedDicts (shared across multiple responses)
@@ -30,6 +31,7 @@ class MemoryResultDict(TypedDict):
30
31
  importance: float
31
32
  created_at: str # ISO 8601 format
32
33
  metadata: dict[str, Any]
34
+ effective_importance: NotRequired[float] # Time-decayed importance (auto-decay)
33
35
 
34
36
 
35
37
  class MemoryReferenceDict(TypedDict):
@@ -231,6 +233,7 @@ class HybridMemoryDict(TypedDict):
231
233
  metadata: dict[str, Any]
232
234
  vector_score: float | None
233
235
  fts_score: float | None
236
+ effective_importance: NotRequired[float] # Time-decayed importance (auto-decay)
234
237
 
235
238
 
236
239
  # =============================================================================
spatial_memory/factory.py CHANGED
@@ -22,7 +22,9 @@ from spatial_memory.config import Settings
22
22
  from spatial_memory.core.cache import ResponseCache
23
23
  from spatial_memory.core.database import Database
24
24
  from spatial_memory.core.embeddings import EmbeddingService
25
+ from spatial_memory.core.models import AutoDecayConfig
25
26
  from spatial_memory.core.rate_limiter import AgentAwareRateLimiter, RateLimiter
27
+ from spatial_memory.services.decay_manager import DecayManager
26
28
  from spatial_memory.services.export_import import ExportImportConfig, ExportImportService
27
29
  from spatial_memory.services.lifecycle import LifecycleConfig, LifecycleService
28
30
  from spatial_memory.services.memory import MemoryService
@@ -55,6 +57,7 @@ class ServiceContainer:
55
57
  lifecycle: Lifecycle service for decay/reinforce/consolidate.
56
58
  utility: Utility service for stats/namespaces/hybrid search.
57
59
  export_import: Export/import service for data portability.
60
+ decay_manager: Automatic decay manager for real-time importance decay.
58
61
  rate_limiter: Simple rate limiter (if per-agent disabled).
59
62
  agent_rate_limiter: Per-agent rate limiter (if enabled).
60
63
  cache: Response cache for read operations.
@@ -71,6 +74,7 @@ class ServiceContainer:
71
74
  lifecycle: LifecycleService
72
75
  utility: UtilityService
73
76
  export_import: ExportImportService
77
+ decay_manager: DecayManager | None
74
78
  rate_limiter: RateLimiter | None
75
79
  agent_rate_limiter: AgentAwareRateLimiter | None
76
80
  cache: ResponseCache | None
@@ -351,6 +355,35 @@ class ServiceFactory:
351
355
  self._settings.response_cache_regions_ttl,
352
356
  )
353
357
 
358
+ def create_decay_manager(
359
+ self,
360
+ repository: MemoryRepositoryProtocol,
361
+ ) -> DecayManager | None:
362
+ """Create the decay manager based on settings.
363
+
364
+ Args:
365
+ repository: Repository for persisting decay updates.
366
+
367
+ Returns:
368
+ DecayManager if auto-decay is enabled, None otherwise.
369
+ """
370
+ if not self._settings.auto_decay_enabled:
371
+ return None
372
+
373
+ config = AutoDecayConfig(
374
+ enabled=self._settings.auto_decay_enabled,
375
+ persist_enabled=self._settings.auto_decay_persist_enabled,
376
+ persist_batch_size=self._settings.auto_decay_persist_batch_size,
377
+ persist_flush_interval_seconds=self._settings.auto_decay_persist_flush_interval_seconds,
378
+ min_change_threshold=self._settings.auto_decay_min_change_threshold,
379
+ max_queue_size=self._settings.auto_decay_max_queue_size,
380
+ half_life_days=self._settings.decay_default_half_life_days,
381
+ min_importance_floor=self._settings.decay_min_importance_floor,
382
+ access_weight=0.3, # Default access weight
383
+ )
384
+
385
+ return DecayManager(repository=repository, config=config)
386
+
354
387
  def create_all(self) -> ServiceContainer:
355
388
  """Create all services with proper dependency wiring.
356
389
 
@@ -383,6 +416,9 @@ class ServiceFactory:
383
416
  utility = self.create_utility_service(repository, embeddings)
384
417
  export_import = self.create_export_import_service(repository, embeddings)
385
418
 
419
+ # Create decay manager
420
+ decay_manager = self.create_decay_manager(repository)
421
+
386
422
  # Create rate limiter
387
423
  rate_limiter, agent_rate_limiter, per_agent_enabled = self.create_rate_limiter()
388
424
 
@@ -398,6 +434,7 @@ class ServiceFactory:
398
434
  lifecycle=lifecycle,
399
435
  utility=utility,
400
436
  export_import=export_import,
437
+ decay_manager=decay_manager,
401
438
  rate_limiter=rate_limiter,
402
439
  agent_rate_limiter=agent_rate_limiter,
403
440
  cache=cache,
@@ -12,7 +12,6 @@ from typing import Any, Protocol
12
12
 
13
13
  import numpy as np
14
14
 
15
- from spatial_memory.core.errors import NamespaceNotFoundError
16
15
  from spatial_memory.core.models import Memory, MemoryResult
17
16
 
18
17
 
spatial_memory/server.py CHANGED
@@ -25,7 +25,6 @@ from mcp.types import TextContent, Tool
25
25
 
26
26
  from spatial_memory import __version__
27
27
  from spatial_memory.config import ConfigurationError, get_settings, validate_startup
28
- from spatial_memory.factory import ServiceFactory
29
28
  from spatial_memory.core.database import (
30
29
  clear_connection_cache,
31
30
  set_connection_pool_max_size,
@@ -46,6 +45,10 @@ from spatial_memory.core.errors import (
46
45
  SpatialMemoryError,
47
46
  ValidationError,
48
47
  )
48
+ from spatial_memory.core.health import HealthChecker
49
+ from spatial_memory.core.logging import configure_logging
50
+ from spatial_memory.core.metrics import is_available as metrics_available
51
+ from spatial_memory.core.metrics import record_request
49
52
  from spatial_memory.core.response_types import (
50
53
  ConsolidateResponse,
51
54
  DecayResponse,
@@ -71,15 +74,12 @@ from spatial_memory.core.response_types import (
71
74
  VisualizeResponse,
72
75
  WanderResponse,
73
76
  )
74
- from spatial_memory.core.health import HealthChecker
75
- from spatial_memory.core.logging import configure_logging
76
- from spatial_memory.core.metrics import is_available as metrics_available
77
- from spatial_memory.core.metrics import record_request
78
77
  from spatial_memory.core.tracing import (
79
78
  RequestContext,
80
79
  TimingContext,
81
80
  request_context,
82
81
  )
82
+ from spatial_memory.factory import ServiceFactory
83
83
  from spatial_memory.tools import TOOLS
84
84
 
85
85
  if TYPE_CHECKING:
@@ -198,6 +198,12 @@ class SpatialMemoryServer:
198
198
  self._cache = services.cache
199
199
  self._regions_cache_ttl = services.regions_cache_ttl
200
200
 
201
+ # Auto-decay manager
202
+ self._decay_manager = services.decay_manager
203
+ if self._decay_manager is not None:
204
+ self._decay_manager.start()
205
+ logger.info("Auto-decay manager started")
206
+
201
207
  # ThreadPoolExecutor for non-blocking embedding operations
202
208
  self._executor = ThreadPoolExecutor(
203
209
  max_workers=2,
@@ -430,21 +436,50 @@ class SpatialMemoryServer:
430
436
  namespace=arguments.get("namespace"),
431
437
  min_similarity=arguments.get("min_similarity", 0.0),
432
438
  )
439
+
440
+ # Convert to dict list for potential decay processing
441
+ memories_list = [
442
+ {
443
+ "id": m.id,
444
+ "content": m.content,
445
+ "similarity": m.similarity,
446
+ "namespace": m.namespace,
447
+ "tags": m.tags,
448
+ "importance": m.importance,
449
+ "created_at": m.created_at.isoformat(),
450
+ "metadata": m.metadata,
451
+ "last_accessed": m.last_accessed,
452
+ "access_count": m.access_count,
453
+ }
454
+ for m in recall_result.memories
455
+ ]
456
+
457
+ # Apply auto-decay if enabled (adds effective_importance, re-ranks)
458
+ if self._decay_manager is not None and self._decay_manager.enabled:
459
+ memories_list = self._decay_manager.apply_decay_to_results(
460
+ memories_list, rerank=True
461
+ )
462
+
463
+ # Build response - include effective_importance if present
464
+ response_memories = []
465
+ for m in memories_list:
466
+ mem_dict: dict[str, Any] = {
467
+ "id": m["id"],
468
+ "content": m["content"],
469
+ "similarity": m["similarity"],
470
+ "namespace": m["namespace"],
471
+ "tags": m["tags"],
472
+ "importance": m["importance"],
473
+ "created_at": m["created_at"],
474
+ "metadata": m["metadata"],
475
+ }
476
+ if "effective_importance" in m:
477
+ mem_dict["effective_importance"] = m["effective_importance"]
478
+ response_memories.append(mem_dict)
479
+
433
480
  return {
434
- "memories": [
435
- {
436
- "id": m.id,
437
- "content": m.content,
438
- "similarity": m.similarity,
439
- "namespace": m.namespace,
440
- "tags": m.tags,
441
- "importance": m.importance,
442
- "created_at": m.created_at.isoformat(),
443
- "metadata": m.metadata,
444
- }
445
- for m in recall_result.memories
446
- ],
447
- "total": recall_result.total,
481
+ "memories": response_memories, # type: ignore[typeddict-item]
482
+ "total": len(response_memories),
448
483
  }
449
484
 
450
485
  def _handle_nearby(self, arguments: dict[str, Any]) -> NearbyResponse:
@@ -926,27 +961,56 @@ class SpatialMemoryServer:
926
961
  namespace=arguments.get("namespace"),
927
962
  min_similarity=arguments.get("min_similarity", 0.0),
928
963
  )
964
+
965
+ # Convert to dict list for potential decay processing
966
+ memories_list = [
967
+ {
968
+ "id": m.id,
969
+ "content": m.content,
970
+ "similarity": m.similarity,
971
+ "namespace": m.namespace,
972
+ "tags": m.tags,
973
+ "importance": m.importance,
974
+ "created_at": m.created_at.isoformat() if m.created_at else None,
975
+ "metadata": m.metadata,
976
+ "vector_score": m.vector_score,
977
+ "fts_score": m.fts_score,
978
+ "last_accessed": m.last_accessed,
979
+ "access_count": m.access_count,
980
+ }
981
+ for m in hybrid_result.memories
982
+ ]
983
+
984
+ # Apply auto-decay if enabled (adds effective_importance, re-ranks)
985
+ if self._decay_manager is not None and self._decay_manager.enabled:
986
+ memories_list = self._decay_manager.apply_decay_to_results(
987
+ memories_list, rerank=True
988
+ )
989
+
990
+ # Build response - include effective_importance if present
991
+ response_memories = []
992
+ for m in memories_list:
993
+ mem_dict: dict[str, Any] = {
994
+ "id": m["id"],
995
+ "content": m["content"],
996
+ "similarity": m["similarity"],
997
+ "namespace": m["namespace"],
998
+ "tags": m["tags"],
999
+ "importance": m["importance"],
1000
+ "created_at": m["created_at"],
1001
+ "metadata": m["metadata"],
1002
+ "vector_score": m.get("vector_score"),
1003
+ "fts_score": m.get("fts_score"),
1004
+ }
1005
+ if "effective_importance" in m:
1006
+ mem_dict["effective_importance"] = m["effective_importance"]
1007
+ response_memories.append(mem_dict)
1008
+
929
1009
  return {
930
1010
  "query": hybrid_result.query,
931
1011
  "alpha": hybrid_result.alpha,
932
- "memories": [
933
- {
934
- "id": m.id,
935
- "content": m.content,
936
- "similarity": m.similarity,
937
- "namespace": m.namespace,
938
- "tags": m.tags,
939
- "importance": m.importance,
940
- "created_at": (
941
- m.created_at.isoformat() if m.created_at else None
942
- ),
943
- "metadata": m.metadata,
944
- "vector_score": m.vector_score,
945
- "fts_score": m.fts_score,
946
- }
947
- for m in hybrid_result.memories
948
- ],
949
- "total": hybrid_result.total,
1012
+ "memories": response_memories, # type: ignore[typeddict-item]
1013
+ "total": len(response_memories),
950
1014
  "search_type": hybrid_result.search_type,
951
1015
  }
952
1016
 
@@ -999,10 +1063,12 @@ class SpatialMemoryServer:
999
1063
  """
1000
1064
  return '''## Spatial Memory System
1001
1065
 
1002
- You have access to a persistent semantic memory system. Use it proactively to build cumulative knowledge across sessions.
1066
+ You have access to a persistent semantic memory system. Use it proactively to
1067
+ build cumulative knowledge across sessions.
1003
1068
 
1004
1069
  ### Session Start
1005
- At conversation start, call `recall` with the user's apparent task/context to load relevant memories. Present insights naturally:
1070
+ At conversation start, call `recall` with the user's apparent task/context to
1071
+ load relevant memories. Present insights naturally:
1006
1072
  - Good: "Based on previous work, you decided to use PostgreSQL because..."
1007
1073
  - Bad: "The database returned: [{id: '...', content: '...'}]"
1008
1074
 
@@ -1013,11 +1079,13 @@ After these events, ask briefly "Save this? y/n" (minimal friction):
1013
1079
  - **Patterns**: "This pattern works...", "The trick is...", "Always do X when..."
1014
1080
  - **Discoveries**: "I found that...", "Important:...", "TIL..."
1015
1081
 
1016
- Do NOT ask for trivial information. Only prompt for insights that would help future sessions.
1082
+ Do NOT ask for trivial information. Only prompt for insights that would help
1083
+ future sessions.
1017
1084
 
1018
1085
  ### Saving Memories
1019
1086
  When user confirms, save with:
1020
- - **Detailed content**: Include full context, reasoning, and specifics. Future agents need complete information.
1087
+ - **Detailed content**: Include full context, reasoning, and specifics. Future
1088
+ agents need complete information.
1021
1089
  - **Contextual namespace**: Use project name, or categories like "decisions", "errors", "patterns"
1022
1090
  - **Descriptive tags**: Technologies, concepts, error types involved
1023
1091
  - **High importance (0.8-1.0)**: For decisions and critical fixes
@@ -1054,6 +1122,10 @@ Then use `extract` to automatically capture important information.
1054
1122
 
1055
1123
  def close(self) -> None:
1056
1124
  """Clean up resources."""
1125
+ # Stop the decay manager (flushes pending updates)
1126
+ if self._decay_manager is not None:
1127
+ self._decay_manager.stop()
1128
+
1057
1129
  # Shutdown the thread pool executor
1058
1130
  if hasattr(self, "_executor"):
1059
1131
  self._executor.shutdown(wait=False)