spatial-memory-mcp 1.6.2__py3-none-any.whl → 1.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of spatial-memory-mcp might be problematic. Click here for more details.
- spatial_memory/__init__.py +1 -1
- spatial_memory/adapters/lancedb_repository.py +2 -0
- spatial_memory/config.py +37 -0
- spatial_memory/core/models.py +31 -0
- spatial_memory/core/response_types.py +4 -0
- spatial_memory/factory.py +37 -0
- spatial_memory/server.py +100 -32
- spatial_memory/services/decay_manager.py +406 -0
- spatial_memory/services/utility.py +12 -0
- {spatial_memory_mcp-1.6.2.dist-info → spatial_memory_mcp-1.7.0.dist-info}/METADATA +1 -1
- {spatial_memory_mcp-1.6.2.dist-info → spatial_memory_mcp-1.7.0.dist-info}/RECORD +14 -13
- {spatial_memory_mcp-1.6.2.dist-info → spatial_memory_mcp-1.7.0.dist-info}/WHEEL +0 -0
- {spatial_memory_mcp-1.6.2.dist-info → spatial_memory_mcp-1.7.0.dist-info}/entry_points.txt +0 -0
- {spatial_memory_mcp-1.6.2.dist-info → spatial_memory_mcp-1.7.0.dist-info}/licenses/LICENSE +0 -0
spatial_memory/__init__.py
CHANGED
|
@@ -606,6 +606,8 @@ class LanceDBMemoryRepository:
|
|
|
606
606
|
tags=record.get("tags", []),
|
|
607
607
|
importance=record["importance"],
|
|
608
608
|
created_at=record["created_at"],
|
|
609
|
+
last_accessed=record.get("last_accessed"),
|
|
610
|
+
access_count=record.get("access_count", 0),
|
|
609
611
|
metadata=record.get("metadata", {}),
|
|
610
612
|
vector=vector,
|
|
611
613
|
)
|
spatial_memory/config.py
CHANGED
|
@@ -593,6 +593,43 @@ class Settings(BaseSettings):
|
|
|
593
593
|
description="Maximum queue depth when backpressure is enabled",
|
|
594
594
|
)
|
|
595
595
|
|
|
596
|
+
# =========================================================================
|
|
597
|
+
# v1.6.3: Auto-Decay Settings
|
|
598
|
+
# =========================================================================
|
|
599
|
+
|
|
600
|
+
auto_decay_enabled: bool = Field(
|
|
601
|
+
default=True,
|
|
602
|
+
description="Enable automatic decay calculation during recall operations",
|
|
603
|
+
)
|
|
604
|
+
auto_decay_persist_enabled: bool = Field(
|
|
605
|
+
default=True,
|
|
606
|
+
description="Persist decay updates to database (disable for read-only scenarios)",
|
|
607
|
+
)
|
|
608
|
+
auto_decay_persist_batch_size: int = Field(
|
|
609
|
+
default=100,
|
|
610
|
+
ge=10,
|
|
611
|
+
le=1000,
|
|
612
|
+
description="Batch size for persisting decay updates to database",
|
|
613
|
+
)
|
|
614
|
+
auto_decay_persist_flush_interval_seconds: float = Field(
|
|
615
|
+
default=5.0,
|
|
616
|
+
ge=1.0,
|
|
617
|
+
le=60.0,
|
|
618
|
+
description="Interval between background flush operations for decay updates",
|
|
619
|
+
)
|
|
620
|
+
auto_decay_min_change_threshold: float = Field(
|
|
621
|
+
default=0.01,
|
|
622
|
+
ge=0.001,
|
|
623
|
+
le=0.1,
|
|
624
|
+
description="Minimum importance change to trigger database persistence (1% default)",
|
|
625
|
+
)
|
|
626
|
+
auto_decay_max_queue_size: int = Field(
|
|
627
|
+
default=10000,
|
|
628
|
+
ge=1000,
|
|
629
|
+
le=100000,
|
|
630
|
+
description="Maximum queue size for pending decay updates (backpressure control)",
|
|
631
|
+
)
|
|
632
|
+
|
|
596
633
|
model_config = {
|
|
597
634
|
"env_prefix": "SPATIAL_MEMORY_",
|
|
598
635
|
"env_file": ".env",
|
spatial_memory/core/models.py
CHANGED
|
@@ -52,6 +52,15 @@ class MemoryResult(BaseModel):
|
|
|
52
52
|
tags: list[str] = Field(default_factory=list)
|
|
53
53
|
importance: float
|
|
54
54
|
created_at: datetime
|
|
55
|
+
last_accessed: datetime | None = Field(
|
|
56
|
+
default=None,
|
|
57
|
+
description="When the memory was last accessed (for auto-decay)",
|
|
58
|
+
)
|
|
59
|
+
access_count: int = Field(
|
|
60
|
+
default=0,
|
|
61
|
+
ge=0,
|
|
62
|
+
description="Number of times the memory has been accessed (for auto-decay)",
|
|
63
|
+
)
|
|
55
64
|
metadata: dict[str, Any] = Field(default_factory=dict)
|
|
56
65
|
vector: list[float] | None = Field(
|
|
57
66
|
default=None,
|
|
@@ -582,6 +591,8 @@ class HybridMemoryMatch:
|
|
|
582
591
|
vector_score: float | None = None
|
|
583
592
|
fts_score: float | None = None
|
|
584
593
|
combined_score: float = 0.0
|
|
594
|
+
last_accessed: datetime | None = None # For auto-decay
|
|
595
|
+
access_count: int = 0 # For auto-decay
|
|
585
596
|
|
|
586
597
|
|
|
587
598
|
@dataclass
|
|
@@ -626,3 +637,23 @@ class ExportImportConfig:
|
|
|
626
637
|
csv_include_vectors: bool = False
|
|
627
638
|
max_export_records: int = 0 # 0 = unlimited
|
|
628
639
|
max_import_records: int = 100_000 # Maximum records per import
|
|
640
|
+
|
|
641
|
+
|
|
642
|
+
@dataclass
|
|
643
|
+
class AutoDecayConfig:
|
|
644
|
+
"""Configuration for automatic decay during recall operations.
|
|
645
|
+
|
|
646
|
+
Auto-decay calculates effective importance in real-time during searches,
|
|
647
|
+
re-ranking results based on time-decayed importance. Updates can optionally
|
|
648
|
+
be persisted to the database in the background.
|
|
649
|
+
"""
|
|
650
|
+
|
|
651
|
+
enabled: bool = True
|
|
652
|
+
persist_enabled: bool = True
|
|
653
|
+
persist_batch_size: int = 100
|
|
654
|
+
persist_flush_interval_seconds: float = 5.0
|
|
655
|
+
min_change_threshold: float = 0.01 # Only persist changes > 1%
|
|
656
|
+
max_queue_size: int = 10000
|
|
657
|
+
half_life_days: float = 30.0
|
|
658
|
+
min_importance_floor: float = 0.1
|
|
659
|
+
access_weight: float = 0.3 # Weight of access count in slowing decay
|
|
@@ -13,6 +13,8 @@ from __future__ import annotations
|
|
|
13
13
|
|
|
14
14
|
from typing import Any, TypedDict
|
|
15
15
|
|
|
16
|
+
from typing_extensions import NotRequired
|
|
17
|
+
|
|
16
18
|
# =============================================================================
|
|
17
19
|
# Nested TypedDicts (shared across multiple responses)
|
|
18
20
|
# =============================================================================
|
|
@@ -29,6 +31,7 @@ class MemoryResultDict(TypedDict):
|
|
|
29
31
|
importance: float
|
|
30
32
|
created_at: str # ISO 8601 format
|
|
31
33
|
metadata: dict[str, Any]
|
|
34
|
+
effective_importance: NotRequired[float] # Time-decayed importance (auto-decay)
|
|
32
35
|
|
|
33
36
|
|
|
34
37
|
class MemoryReferenceDict(TypedDict):
|
|
@@ -230,6 +233,7 @@ class HybridMemoryDict(TypedDict):
|
|
|
230
233
|
metadata: dict[str, Any]
|
|
231
234
|
vector_score: float | None
|
|
232
235
|
fts_score: float | None
|
|
236
|
+
effective_importance: NotRequired[float] # Time-decayed importance (auto-decay)
|
|
233
237
|
|
|
234
238
|
|
|
235
239
|
# =============================================================================
|
spatial_memory/factory.py
CHANGED
|
@@ -22,7 +22,9 @@ from spatial_memory.config import Settings
|
|
|
22
22
|
from spatial_memory.core.cache import ResponseCache
|
|
23
23
|
from spatial_memory.core.database import Database
|
|
24
24
|
from spatial_memory.core.embeddings import EmbeddingService
|
|
25
|
+
from spatial_memory.core.models import AutoDecayConfig
|
|
25
26
|
from spatial_memory.core.rate_limiter import AgentAwareRateLimiter, RateLimiter
|
|
27
|
+
from spatial_memory.services.decay_manager import DecayManager
|
|
26
28
|
from spatial_memory.services.export_import import ExportImportConfig, ExportImportService
|
|
27
29
|
from spatial_memory.services.lifecycle import LifecycleConfig, LifecycleService
|
|
28
30
|
from spatial_memory.services.memory import MemoryService
|
|
@@ -55,6 +57,7 @@ class ServiceContainer:
|
|
|
55
57
|
lifecycle: Lifecycle service for decay/reinforce/consolidate.
|
|
56
58
|
utility: Utility service for stats/namespaces/hybrid search.
|
|
57
59
|
export_import: Export/import service for data portability.
|
|
60
|
+
decay_manager: Automatic decay manager for real-time importance decay.
|
|
58
61
|
rate_limiter: Simple rate limiter (if per-agent disabled).
|
|
59
62
|
agent_rate_limiter: Per-agent rate limiter (if enabled).
|
|
60
63
|
cache: Response cache for read operations.
|
|
@@ -71,6 +74,7 @@ class ServiceContainer:
|
|
|
71
74
|
lifecycle: LifecycleService
|
|
72
75
|
utility: UtilityService
|
|
73
76
|
export_import: ExportImportService
|
|
77
|
+
decay_manager: DecayManager | None
|
|
74
78
|
rate_limiter: RateLimiter | None
|
|
75
79
|
agent_rate_limiter: AgentAwareRateLimiter | None
|
|
76
80
|
cache: ResponseCache | None
|
|
@@ -351,6 +355,35 @@ class ServiceFactory:
|
|
|
351
355
|
self._settings.response_cache_regions_ttl,
|
|
352
356
|
)
|
|
353
357
|
|
|
358
|
+
def create_decay_manager(
|
|
359
|
+
self,
|
|
360
|
+
repository: MemoryRepositoryProtocol,
|
|
361
|
+
) -> DecayManager | None:
|
|
362
|
+
"""Create the decay manager based on settings.
|
|
363
|
+
|
|
364
|
+
Args:
|
|
365
|
+
repository: Repository for persisting decay updates.
|
|
366
|
+
|
|
367
|
+
Returns:
|
|
368
|
+
DecayManager if auto-decay is enabled, None otherwise.
|
|
369
|
+
"""
|
|
370
|
+
if not self._settings.auto_decay_enabled:
|
|
371
|
+
return None
|
|
372
|
+
|
|
373
|
+
config = AutoDecayConfig(
|
|
374
|
+
enabled=self._settings.auto_decay_enabled,
|
|
375
|
+
persist_enabled=self._settings.auto_decay_persist_enabled,
|
|
376
|
+
persist_batch_size=self._settings.auto_decay_persist_batch_size,
|
|
377
|
+
persist_flush_interval_seconds=self._settings.auto_decay_persist_flush_interval_seconds,
|
|
378
|
+
min_change_threshold=self._settings.auto_decay_min_change_threshold,
|
|
379
|
+
max_queue_size=self._settings.auto_decay_max_queue_size,
|
|
380
|
+
half_life_days=self._settings.decay_default_half_life_days,
|
|
381
|
+
min_importance_floor=self._settings.decay_min_importance_floor,
|
|
382
|
+
access_weight=0.3, # Default access weight
|
|
383
|
+
)
|
|
384
|
+
|
|
385
|
+
return DecayManager(repository=repository, config=config)
|
|
386
|
+
|
|
354
387
|
def create_all(self) -> ServiceContainer:
|
|
355
388
|
"""Create all services with proper dependency wiring.
|
|
356
389
|
|
|
@@ -383,6 +416,9 @@ class ServiceFactory:
|
|
|
383
416
|
utility = self.create_utility_service(repository, embeddings)
|
|
384
417
|
export_import = self.create_export_import_service(repository, embeddings)
|
|
385
418
|
|
|
419
|
+
# Create decay manager
|
|
420
|
+
decay_manager = self.create_decay_manager(repository)
|
|
421
|
+
|
|
386
422
|
# Create rate limiter
|
|
387
423
|
rate_limiter, agent_rate_limiter, per_agent_enabled = self.create_rate_limiter()
|
|
388
424
|
|
|
@@ -398,6 +434,7 @@ class ServiceFactory:
|
|
|
398
434
|
lifecycle=lifecycle,
|
|
399
435
|
utility=utility,
|
|
400
436
|
export_import=export_import,
|
|
437
|
+
decay_manager=decay_manager,
|
|
401
438
|
rate_limiter=rate_limiter,
|
|
402
439
|
agent_rate_limiter=agent_rate_limiter,
|
|
403
440
|
cache=cache,
|
spatial_memory/server.py
CHANGED
|
@@ -198,6 +198,12 @@ class SpatialMemoryServer:
|
|
|
198
198
|
self._cache = services.cache
|
|
199
199
|
self._regions_cache_ttl = services.regions_cache_ttl
|
|
200
200
|
|
|
201
|
+
# Auto-decay manager
|
|
202
|
+
self._decay_manager = services.decay_manager
|
|
203
|
+
if self._decay_manager is not None:
|
|
204
|
+
self._decay_manager.start()
|
|
205
|
+
logger.info("Auto-decay manager started")
|
|
206
|
+
|
|
201
207
|
# ThreadPoolExecutor for non-blocking embedding operations
|
|
202
208
|
self._executor = ThreadPoolExecutor(
|
|
203
209
|
max_workers=2,
|
|
@@ -430,21 +436,50 @@ class SpatialMemoryServer:
|
|
|
430
436
|
namespace=arguments.get("namespace"),
|
|
431
437
|
min_similarity=arguments.get("min_similarity", 0.0),
|
|
432
438
|
)
|
|
439
|
+
|
|
440
|
+
# Convert to dict list for potential decay processing
|
|
441
|
+
memories_list = [
|
|
442
|
+
{
|
|
443
|
+
"id": m.id,
|
|
444
|
+
"content": m.content,
|
|
445
|
+
"similarity": m.similarity,
|
|
446
|
+
"namespace": m.namespace,
|
|
447
|
+
"tags": m.tags,
|
|
448
|
+
"importance": m.importance,
|
|
449
|
+
"created_at": m.created_at.isoformat(),
|
|
450
|
+
"metadata": m.metadata,
|
|
451
|
+
"last_accessed": m.last_accessed,
|
|
452
|
+
"access_count": m.access_count,
|
|
453
|
+
}
|
|
454
|
+
for m in recall_result.memories
|
|
455
|
+
]
|
|
456
|
+
|
|
457
|
+
# Apply auto-decay if enabled (adds effective_importance, re-ranks)
|
|
458
|
+
if self._decay_manager is not None and self._decay_manager.enabled:
|
|
459
|
+
memories_list = self._decay_manager.apply_decay_to_results(
|
|
460
|
+
memories_list, rerank=True
|
|
461
|
+
)
|
|
462
|
+
|
|
463
|
+
# Build response - include effective_importance if present
|
|
464
|
+
response_memories = []
|
|
465
|
+
for m in memories_list:
|
|
466
|
+
mem_dict: dict[str, Any] = {
|
|
467
|
+
"id": m["id"],
|
|
468
|
+
"content": m["content"],
|
|
469
|
+
"similarity": m["similarity"],
|
|
470
|
+
"namespace": m["namespace"],
|
|
471
|
+
"tags": m["tags"],
|
|
472
|
+
"importance": m["importance"],
|
|
473
|
+
"created_at": m["created_at"],
|
|
474
|
+
"metadata": m["metadata"],
|
|
475
|
+
}
|
|
476
|
+
if "effective_importance" in m:
|
|
477
|
+
mem_dict["effective_importance"] = m["effective_importance"]
|
|
478
|
+
response_memories.append(mem_dict)
|
|
479
|
+
|
|
433
480
|
return {
|
|
434
|
-
"memories": [
|
|
435
|
-
|
|
436
|
-
"id": m.id,
|
|
437
|
-
"content": m.content,
|
|
438
|
-
"similarity": m.similarity,
|
|
439
|
-
"namespace": m.namespace,
|
|
440
|
-
"tags": m.tags,
|
|
441
|
-
"importance": m.importance,
|
|
442
|
-
"created_at": m.created_at.isoformat(),
|
|
443
|
-
"metadata": m.metadata,
|
|
444
|
-
}
|
|
445
|
-
for m in recall_result.memories
|
|
446
|
-
],
|
|
447
|
-
"total": recall_result.total,
|
|
481
|
+
"memories": response_memories, # type: ignore[typeddict-item]
|
|
482
|
+
"total": len(response_memories),
|
|
448
483
|
}
|
|
449
484
|
|
|
450
485
|
def _handle_nearby(self, arguments: dict[str, Any]) -> NearbyResponse:
|
|
@@ -926,27 +961,56 @@ class SpatialMemoryServer:
|
|
|
926
961
|
namespace=arguments.get("namespace"),
|
|
927
962
|
min_similarity=arguments.get("min_similarity", 0.0),
|
|
928
963
|
)
|
|
964
|
+
|
|
965
|
+
# Convert to dict list for potential decay processing
|
|
966
|
+
memories_list = [
|
|
967
|
+
{
|
|
968
|
+
"id": m.id,
|
|
969
|
+
"content": m.content,
|
|
970
|
+
"similarity": m.similarity,
|
|
971
|
+
"namespace": m.namespace,
|
|
972
|
+
"tags": m.tags,
|
|
973
|
+
"importance": m.importance,
|
|
974
|
+
"created_at": m.created_at.isoformat() if m.created_at else None,
|
|
975
|
+
"metadata": m.metadata,
|
|
976
|
+
"vector_score": m.vector_score,
|
|
977
|
+
"fts_score": m.fts_score,
|
|
978
|
+
"last_accessed": m.last_accessed,
|
|
979
|
+
"access_count": m.access_count,
|
|
980
|
+
}
|
|
981
|
+
for m in hybrid_result.memories
|
|
982
|
+
]
|
|
983
|
+
|
|
984
|
+
# Apply auto-decay if enabled (adds effective_importance, re-ranks)
|
|
985
|
+
if self._decay_manager is not None and self._decay_manager.enabled:
|
|
986
|
+
memories_list = self._decay_manager.apply_decay_to_results(
|
|
987
|
+
memories_list, rerank=True
|
|
988
|
+
)
|
|
989
|
+
|
|
990
|
+
# Build response - include effective_importance if present
|
|
991
|
+
response_memories = []
|
|
992
|
+
for m in memories_list:
|
|
993
|
+
mem_dict: dict[str, Any] = {
|
|
994
|
+
"id": m["id"],
|
|
995
|
+
"content": m["content"],
|
|
996
|
+
"similarity": m["similarity"],
|
|
997
|
+
"namespace": m["namespace"],
|
|
998
|
+
"tags": m["tags"],
|
|
999
|
+
"importance": m["importance"],
|
|
1000
|
+
"created_at": m["created_at"],
|
|
1001
|
+
"metadata": m["metadata"],
|
|
1002
|
+
"vector_score": m.get("vector_score"),
|
|
1003
|
+
"fts_score": m.get("fts_score"),
|
|
1004
|
+
}
|
|
1005
|
+
if "effective_importance" in m:
|
|
1006
|
+
mem_dict["effective_importance"] = m["effective_importance"]
|
|
1007
|
+
response_memories.append(mem_dict)
|
|
1008
|
+
|
|
929
1009
|
return {
|
|
930
1010
|
"query": hybrid_result.query,
|
|
931
1011
|
"alpha": hybrid_result.alpha,
|
|
932
|
-
"memories": [
|
|
933
|
-
|
|
934
|
-
"id": m.id,
|
|
935
|
-
"content": m.content,
|
|
936
|
-
"similarity": m.similarity,
|
|
937
|
-
"namespace": m.namespace,
|
|
938
|
-
"tags": m.tags,
|
|
939
|
-
"importance": m.importance,
|
|
940
|
-
"created_at": (
|
|
941
|
-
m.created_at.isoformat() if m.created_at else None
|
|
942
|
-
),
|
|
943
|
-
"metadata": m.metadata,
|
|
944
|
-
"vector_score": m.vector_score,
|
|
945
|
-
"fts_score": m.fts_score,
|
|
946
|
-
}
|
|
947
|
-
for m in hybrid_result.memories
|
|
948
|
-
],
|
|
949
|
-
"total": hybrid_result.total,
|
|
1012
|
+
"memories": response_memories, # type: ignore[typeddict-item]
|
|
1013
|
+
"total": len(response_memories),
|
|
950
1014
|
"search_type": hybrid_result.search_type,
|
|
951
1015
|
}
|
|
952
1016
|
|
|
@@ -1058,6 +1122,10 @@ Then use `extract` to automatically capture important information.
|
|
|
1058
1122
|
|
|
1059
1123
|
def close(self) -> None:
|
|
1060
1124
|
"""Clean up resources."""
|
|
1125
|
+
# Stop the decay manager (flushes pending updates)
|
|
1126
|
+
if self._decay_manager is not None:
|
|
1127
|
+
self._decay_manager.stop()
|
|
1128
|
+
|
|
1061
1129
|
# Shutdown the thread pool executor
|
|
1062
1130
|
if hasattr(self, "_executor"):
|
|
1063
1131
|
self._executor.shutdown(wait=False)
|
|
@@ -0,0 +1,406 @@
|
|
|
1
|
+
"""Automatic decay manager for real-time importance decay.
|
|
2
|
+
|
|
3
|
+
This service provides automatic decay calculation during recall operations,
|
|
4
|
+
re-ranking search results based on time-decayed importance. Updates are
|
|
5
|
+
optionally persisted to the database in the background.
|
|
6
|
+
|
|
7
|
+
Architecture:
|
|
8
|
+
recall() / hybrid_recall()
|
|
9
|
+
│
|
|
10
|
+
▼
|
|
11
|
+
DecayManager.apply_decay_to_results() ← Real-time (~20-50μs)
|
|
12
|
+
│
|
|
13
|
+
┌────┴────┐
|
|
14
|
+
▼ ▼
|
|
15
|
+
[Re-ranked [Background Queue]
|
|
16
|
+
Results] │
|
|
17
|
+
▼
|
|
18
|
+
[Batch Persist Thread]
|
|
19
|
+
│
|
|
20
|
+
▼
|
|
21
|
+
[LanceDB Update]
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
from __future__ import annotations
|
|
25
|
+
|
|
26
|
+
import logging
|
|
27
|
+
import math
|
|
28
|
+
import threading
|
|
29
|
+
import time
|
|
30
|
+
from collections import deque
|
|
31
|
+
from dataclasses import dataclass
|
|
32
|
+
from datetime import datetime
|
|
33
|
+
from typing import TYPE_CHECKING, Any
|
|
34
|
+
|
|
35
|
+
from spatial_memory.core.models import AutoDecayConfig
|
|
36
|
+
from spatial_memory.core.utils import to_aware_utc, utc_now
|
|
37
|
+
|
|
38
|
+
if TYPE_CHECKING:
|
|
39
|
+
from spatial_memory.ports.repositories import MemoryRepositoryProtocol
|
|
40
|
+
|
|
41
|
+
logger = logging.getLogger(__name__)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
@dataclass
|
|
45
|
+
class DecayUpdate:
|
|
46
|
+
"""A pending decay update for a memory."""
|
|
47
|
+
|
|
48
|
+
memory_id: str
|
|
49
|
+
old_importance: float
|
|
50
|
+
new_importance: float
|
|
51
|
+
timestamp: float # time.monotonic() for deduplication
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class DecayManager:
|
|
55
|
+
"""Manages automatic decay calculation and persistence.
|
|
56
|
+
|
|
57
|
+
This service calculates effective importance during search operations
|
|
58
|
+
using exponential decay based on time since last access. Results are
|
|
59
|
+
re-ranked by multiplying similarity with effective importance.
|
|
60
|
+
|
|
61
|
+
Background persistence is optional and uses a daemon thread with
|
|
62
|
+
batched updates to minimize database overhead.
|
|
63
|
+
"""
|
|
64
|
+
|
|
65
|
+
def __init__(
|
|
66
|
+
self,
|
|
67
|
+
repository: MemoryRepositoryProtocol,
|
|
68
|
+
config: AutoDecayConfig | None = None,
|
|
69
|
+
) -> None:
|
|
70
|
+
"""Initialize the decay manager.
|
|
71
|
+
|
|
72
|
+
Args:
|
|
73
|
+
repository: Repository for persisting decay updates.
|
|
74
|
+
config: Configuration for decay behavior.
|
|
75
|
+
"""
|
|
76
|
+
self._repo = repository
|
|
77
|
+
self._config = config or AutoDecayConfig()
|
|
78
|
+
|
|
79
|
+
# Threading primitives
|
|
80
|
+
self._lock = threading.Lock()
|
|
81
|
+
self._shutdown_event = threading.Event()
|
|
82
|
+
self._worker_thread: threading.Thread | None = None
|
|
83
|
+
|
|
84
|
+
# Update queue with backpressure (deque with maxlen)
|
|
85
|
+
# Using maxlen for automatic backpressure - oldest items dropped
|
|
86
|
+
self._update_queue: deque[DecayUpdate] = deque(
|
|
87
|
+
maxlen=self._config.max_queue_size
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
# Track pending updates by memory_id for deduplication
|
|
91
|
+
self._pending_updates: dict[str, DecayUpdate] = {}
|
|
92
|
+
|
|
93
|
+
# Statistics
|
|
94
|
+
self._stats_lock = threading.Lock()
|
|
95
|
+
self._updates_queued = 0
|
|
96
|
+
self._updates_persisted = 0
|
|
97
|
+
self._updates_deduplicated = 0
|
|
98
|
+
|
|
99
|
+
@property
|
|
100
|
+
def enabled(self) -> bool:
|
|
101
|
+
"""Whether auto-decay is enabled."""
|
|
102
|
+
return self._config.enabled
|
|
103
|
+
|
|
104
|
+
@property
|
|
105
|
+
def persist_enabled(self) -> bool:
|
|
106
|
+
"""Whether persistence is enabled."""
|
|
107
|
+
return self._config.persist_enabled
|
|
108
|
+
|
|
109
|
+
def start(self) -> None:
|
|
110
|
+
"""Start the background persistence worker.
|
|
111
|
+
|
|
112
|
+
Safe to call multiple times - will only start if not already running.
|
|
113
|
+
"""
|
|
114
|
+
if not self._config.enabled or not self._config.persist_enabled:
|
|
115
|
+
logger.debug("Auto-decay persistence disabled, skipping worker start")
|
|
116
|
+
return
|
|
117
|
+
|
|
118
|
+
if self._worker_thread is not None and self._worker_thread.is_alive():
|
|
119
|
+
logger.debug("Decay worker already running")
|
|
120
|
+
return
|
|
121
|
+
|
|
122
|
+
self._shutdown_event.clear()
|
|
123
|
+
self._worker_thread = threading.Thread(
|
|
124
|
+
target=self._background_worker,
|
|
125
|
+
name="decay-persist-worker",
|
|
126
|
+
daemon=True,
|
|
127
|
+
)
|
|
128
|
+
self._worker_thread.start()
|
|
129
|
+
logger.info("Auto-decay background worker started")
|
|
130
|
+
|
|
131
|
+
def stop(self, timeout: float = 5.0) -> None:
|
|
132
|
+
"""Stop the background worker gracefully.
|
|
133
|
+
|
|
134
|
+
Flushes any pending updates before stopping.
|
|
135
|
+
|
|
136
|
+
Args:
|
|
137
|
+
timeout: Maximum time to wait for worker shutdown.
|
|
138
|
+
"""
|
|
139
|
+
if self._worker_thread is None or not self._worker_thread.is_alive():
|
|
140
|
+
return
|
|
141
|
+
|
|
142
|
+
logger.info("Stopping auto-decay background worker...")
|
|
143
|
+
self._shutdown_event.set()
|
|
144
|
+
|
|
145
|
+
# Wait for worker to finish
|
|
146
|
+
self._worker_thread.join(timeout=timeout)
|
|
147
|
+
|
|
148
|
+
if self._worker_thread.is_alive():
|
|
149
|
+
logger.warning("Decay worker did not stop within timeout")
|
|
150
|
+
else:
|
|
151
|
+
logger.info(
|
|
152
|
+
f"Auto-decay worker stopped. "
|
|
153
|
+
f"Queued: {self._updates_queued}, "
|
|
154
|
+
f"Persisted: {self._updates_persisted}, "
|
|
155
|
+
f"Deduplicated: {self._updates_deduplicated}"
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
def calculate_effective_importance(
|
|
159
|
+
self,
|
|
160
|
+
stored_importance: float,
|
|
161
|
+
last_accessed: datetime,
|
|
162
|
+
access_count: int,
|
|
163
|
+
) -> float:
|
|
164
|
+
"""Calculate time-decayed effective importance.
|
|
165
|
+
|
|
166
|
+
Uses exponential decay: importance * 2^(-days/half_life)
|
|
167
|
+
Access count slows decay via effective_half_life adjustment.
|
|
168
|
+
|
|
169
|
+
Args:
|
|
170
|
+
stored_importance: The stored importance value (0-1).
|
|
171
|
+
last_accessed: When the memory was last accessed.
|
|
172
|
+
access_count: Number of times the memory has been accessed.
|
|
173
|
+
|
|
174
|
+
Returns:
|
|
175
|
+
Effective importance after decay (clamped to min_importance_floor).
|
|
176
|
+
"""
|
|
177
|
+
if not self._config.enabled:
|
|
178
|
+
return stored_importance
|
|
179
|
+
|
|
180
|
+
# Calculate days since last access
|
|
181
|
+
# Normalize last_accessed to timezone-aware UTC (database may return naive)
|
|
182
|
+
now = utc_now()
|
|
183
|
+
last_accessed_aware = to_aware_utc(last_accessed)
|
|
184
|
+
delta = now - last_accessed_aware
|
|
185
|
+
days_since_access = delta.total_seconds() / 86400.0 # seconds in a day
|
|
186
|
+
|
|
187
|
+
if days_since_access <= 0:
|
|
188
|
+
return stored_importance
|
|
189
|
+
|
|
190
|
+
# Calculate effective half-life (access count slows decay)
|
|
191
|
+
# More accesses = longer effective half-life = slower decay
|
|
192
|
+
access_factor = 1.0 + self._config.access_weight * access_count
|
|
193
|
+
effective_half_life = self._config.half_life_days * access_factor
|
|
194
|
+
|
|
195
|
+
# Exponential decay: importance * 2^(-days/half_life)
|
|
196
|
+
decay_factor = math.pow(2, -days_since_access / effective_half_life)
|
|
197
|
+
effective_importance = stored_importance * decay_factor
|
|
198
|
+
|
|
199
|
+
# Clamp to minimum floor
|
|
200
|
+
return max(effective_importance, self._config.min_importance_floor)
|
|
201
|
+
|
|
202
|
+
def apply_decay_to_results(
|
|
203
|
+
self,
|
|
204
|
+
results: list[dict[str, Any]],
|
|
205
|
+
rerank: bool = True,
|
|
206
|
+
) -> list[dict[str, Any]]:
|
|
207
|
+
"""Apply decay to search results and optionally re-rank.
|
|
208
|
+
|
|
209
|
+
Calculates effective_importance for each result and optionally
|
|
210
|
+
re-ranks results by multiplying similarity with effective_importance.
|
|
211
|
+
|
|
212
|
+
Args:
|
|
213
|
+
results: List of memory result dictionaries.
|
|
214
|
+
rerank: Whether to re-rank by adjusted score (similarity × effective_importance).
|
|
215
|
+
|
|
216
|
+
Returns:
|
|
217
|
+
Results with effective_importance added, optionally re-ranked.
|
|
218
|
+
"""
|
|
219
|
+
if not self._config.enabled or not results:
|
|
220
|
+
return results
|
|
221
|
+
|
|
222
|
+
updates_to_queue: list[DecayUpdate] = []
|
|
223
|
+
|
|
224
|
+
for result in results:
|
|
225
|
+
# Extract required fields
|
|
226
|
+
stored_importance = result.get("importance", 0.5)
|
|
227
|
+
last_accessed = result.get("last_accessed")
|
|
228
|
+
access_count = result.get("access_count", 0)
|
|
229
|
+
memory_id = result.get("id", "")
|
|
230
|
+
|
|
231
|
+
# Handle datetime parsing if needed
|
|
232
|
+
if isinstance(last_accessed, str):
|
|
233
|
+
try:
|
|
234
|
+
last_accessed = datetime.fromisoformat(last_accessed.replace("Z", "+00:00"))
|
|
235
|
+
except (ValueError, AttributeError):
|
|
236
|
+
last_accessed = utc_now()
|
|
237
|
+
elif last_accessed is None:
|
|
238
|
+
last_accessed = utc_now()
|
|
239
|
+
|
|
240
|
+
# Calculate effective importance
|
|
241
|
+
effective_importance = self.calculate_effective_importance(
|
|
242
|
+
stored_importance=stored_importance,
|
|
243
|
+
last_accessed=last_accessed,
|
|
244
|
+
access_count=access_count,
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
# Add to result
|
|
248
|
+
result["effective_importance"] = effective_importance
|
|
249
|
+
|
|
250
|
+
# Check if we should queue an update
|
|
251
|
+
if self._config.persist_enabled and memory_id:
|
|
252
|
+
change = abs(stored_importance - effective_importance)
|
|
253
|
+
if change >= self._config.min_change_threshold:
|
|
254
|
+
updates_to_queue.append(
|
|
255
|
+
DecayUpdate(
|
|
256
|
+
memory_id=memory_id,
|
|
257
|
+
old_importance=stored_importance,
|
|
258
|
+
new_importance=effective_importance,
|
|
259
|
+
timestamp=time.monotonic(),
|
|
260
|
+
)
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
# Queue updates in bulk
|
|
264
|
+
if updates_to_queue:
|
|
265
|
+
self._queue_updates(updates_to_queue)
|
|
266
|
+
|
|
267
|
+
# Re-rank by adjusted score if requested
|
|
268
|
+
if rerank:
|
|
269
|
+
# Calculate adjusted score: similarity × effective_importance
|
|
270
|
+
for result in results:
|
|
271
|
+
similarity = result.get("similarity", 0.0)
|
|
272
|
+
effective = result.get("effective_importance", result.get("importance", 0.5))
|
|
273
|
+
result["_adjusted_score"] = similarity * effective
|
|
274
|
+
|
|
275
|
+
# Sort by adjusted score (descending)
|
|
276
|
+
results.sort(key=lambda r: r.get("_adjusted_score", 0.0), reverse=True)
|
|
277
|
+
|
|
278
|
+
# Remove temporary score field
|
|
279
|
+
for result in results:
|
|
280
|
+
result.pop("_adjusted_score", None)
|
|
281
|
+
|
|
282
|
+
return results
|
|
283
|
+
|
|
284
|
+
def _queue_updates(self, updates: list[DecayUpdate]) -> None:
|
|
285
|
+
"""Queue updates for background persistence with deduplication.
|
|
286
|
+
|
|
287
|
+
Latest update per memory_id wins - prevents duplicate writes.
|
|
288
|
+
|
|
289
|
+
Args:
|
|
290
|
+
updates: List of decay updates to queue.
|
|
291
|
+
"""
|
|
292
|
+
with self._lock:
|
|
293
|
+
for update in updates:
|
|
294
|
+
# Deduplicate: keep latest update per memory_id
|
|
295
|
+
existing = self._pending_updates.get(update.memory_id)
|
|
296
|
+
if existing is not None:
|
|
297
|
+
with self._stats_lock:
|
|
298
|
+
self._updates_deduplicated += 1
|
|
299
|
+
|
|
300
|
+
self._pending_updates[update.memory_id] = update
|
|
301
|
+
self._update_queue.append(update)
|
|
302
|
+
|
|
303
|
+
with self._stats_lock:
|
|
304
|
+
self._updates_queued += 1
|
|
305
|
+
|
|
306
|
+
def _background_worker(self) -> None:
|
|
307
|
+
"""Background worker that batches and persists decay updates."""
|
|
308
|
+
logger.debug("Decay background worker started")
|
|
309
|
+
|
|
310
|
+
while not self._shutdown_event.is_set():
|
|
311
|
+
try:
|
|
312
|
+
# Wait for flush interval or shutdown
|
|
313
|
+
self._shutdown_event.wait(timeout=self._config.persist_flush_interval_seconds)
|
|
314
|
+
|
|
315
|
+
# Collect batch of updates
|
|
316
|
+
batch = self._collect_batch()
|
|
317
|
+
|
|
318
|
+
if batch:
|
|
319
|
+
self._persist_batch(batch)
|
|
320
|
+
|
|
321
|
+
except Exception as e:
|
|
322
|
+
logger.error(f"Error in decay background worker: {e}", exc_info=True)
|
|
323
|
+
# Don't crash the worker on transient errors
|
|
324
|
+
time.sleep(1.0)
|
|
325
|
+
|
|
326
|
+
# Final flush on shutdown
|
|
327
|
+
try:
|
|
328
|
+
batch = self._collect_batch()
|
|
329
|
+
if batch:
|
|
330
|
+
logger.debug(f"Final flush: {len(batch)} updates")
|
|
331
|
+
self._persist_batch(batch)
|
|
332
|
+
except Exception as e:
|
|
333
|
+
logger.error(f"Error in final decay flush: {e}", exc_info=True)
|
|
334
|
+
|
|
335
|
+
logger.debug("Decay background worker stopped")
|
|
336
|
+
|
|
337
|
+
def _collect_batch(self) -> list[DecayUpdate]:
|
|
338
|
+
"""Collect a batch of updates for persistence.
|
|
339
|
+
|
|
340
|
+
Returns:
|
|
341
|
+
List of unique updates (latest per memory_id).
|
|
342
|
+
"""
|
|
343
|
+
with self._lock:
|
|
344
|
+
if not self._pending_updates:
|
|
345
|
+
return []
|
|
346
|
+
|
|
347
|
+
# Get unique updates (already deduplicated in _pending_updates)
|
|
348
|
+
batch = list(self._pending_updates.values())[:self._config.persist_batch_size]
|
|
349
|
+
|
|
350
|
+
# Clear processed updates from pending dict
|
|
351
|
+
for update in batch:
|
|
352
|
+
self._pending_updates.pop(update.memory_id, None)
|
|
353
|
+
|
|
354
|
+
return batch
|
|
355
|
+
|
|
356
|
+
def _persist_batch(self, batch: list[DecayUpdate]) -> None:
|
|
357
|
+
"""Persist a batch of decay updates to the database.
|
|
358
|
+
|
|
359
|
+
Args:
|
|
360
|
+
batch: List of decay updates to persist.
|
|
361
|
+
"""
|
|
362
|
+
if not batch:
|
|
363
|
+
return
|
|
364
|
+
|
|
365
|
+
# Build update tuples for batch update
|
|
366
|
+
updates = [
|
|
367
|
+
(update.memory_id, {"importance": update.new_importance})
|
|
368
|
+
for update in batch
|
|
369
|
+
]
|
|
370
|
+
|
|
371
|
+
try:
|
|
372
|
+
success_count, failed_ids = self._repo.update_batch(updates)
|
|
373
|
+
|
|
374
|
+
with self._stats_lock:
|
|
375
|
+
self._updates_persisted += success_count
|
|
376
|
+
|
|
377
|
+
if failed_ids:
|
|
378
|
+
logger.warning(f"Failed to persist decay for {len(failed_ids)} memories")
|
|
379
|
+
|
|
380
|
+
logger.debug(f"Persisted decay updates for {success_count} memories")
|
|
381
|
+
|
|
382
|
+
except Exception as e:
|
|
383
|
+
logger.error(f"Failed to persist decay batch: {e}")
|
|
384
|
+
# Re-queue failed updates? For now, just log and continue
|
|
385
|
+
# In a production system, you might want retry logic here
|
|
386
|
+
|
|
387
|
+
def get_stats(self) -> dict[str, Any]:
|
|
388
|
+
"""Get decay manager statistics.
|
|
389
|
+
|
|
390
|
+
Returns:
|
|
391
|
+
Dictionary with queue and persistence stats.
|
|
392
|
+
"""
|
|
393
|
+
with self._stats_lock:
|
|
394
|
+
return {
|
|
395
|
+
"enabled": self._config.enabled,
|
|
396
|
+
"persist_enabled": self._config.persist_enabled,
|
|
397
|
+
"updates_queued": self._updates_queued,
|
|
398
|
+
"updates_persisted": self._updates_persisted,
|
|
399
|
+
"updates_deduplicated": self._updates_deduplicated,
|
|
400
|
+
"pending_updates": len(self._pending_updates),
|
|
401
|
+
"queue_size": len(self._update_queue),
|
|
402
|
+
"queue_max_size": self._config.max_queue_size,
|
|
403
|
+
"worker_alive": (
|
|
404
|
+
self._worker_thread is not None and self._worker_thread.is_alive()
|
|
405
|
+
),
|
|
406
|
+
}
|
|
@@ -399,9 +399,21 @@ class UtilityService:
|
|
|
399
399
|
vector_score=getattr(result, "vector_score", None),
|
|
400
400
|
fts_score=getattr(result, "fts_score", None),
|
|
401
401
|
combined_score=result.similarity,
|
|
402
|
+
# For auto-decay support
|
|
403
|
+
last_accessed=result.last_accessed,
|
|
404
|
+
access_count=result.access_count,
|
|
402
405
|
)
|
|
403
406
|
)
|
|
404
407
|
|
|
408
|
+
# Update access stats for returned memories (batch for efficiency)
|
|
409
|
+
if memories:
|
|
410
|
+
memory_ids = [m.id for m in memories]
|
|
411
|
+
try:
|
|
412
|
+
self._repo.update_access_batch(memory_ids)
|
|
413
|
+
except Exception as e:
|
|
414
|
+
# Log but don't fail the search if access update fails
|
|
415
|
+
logger.warning(f"Failed to update access stats: {e}")
|
|
416
|
+
|
|
405
417
|
return HybridRecallResult(
|
|
406
418
|
query=query,
|
|
407
419
|
alpha=alpha,
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: spatial-memory-mcp
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.7.0
|
|
4
4
|
Summary: Spatial bidirectional persistent memory MCP server for LLMs - vector-based semantic memory as a navigable landscape
|
|
5
5
|
Project-URL: Homepage, https://github.com/arman-tech/spatial-memory-mcp
|
|
6
6
|
Project-URL: Repository, https://github.com/arman-tech/spatial-memory-mcp
|
|
@@ -1,12 +1,12 @@
|
|
|
1
|
-
spatial_memory/__init__.py,sha256=
|
|
1
|
+
spatial_memory/__init__.py,sha256=0CCA6unsjAbTtFOyOOwbmKs5MfQO4EUHA5RuLKx3p1w,2154
|
|
2
2
|
spatial_memory/__main__.py,sha256=1WK2evonr4Sv1wZNbBuQHQ_z14Aa9kJr2w3GwDlz04I,8279
|
|
3
|
-
spatial_memory/config.py,sha256=
|
|
4
|
-
spatial_memory/factory.py,sha256=
|
|
3
|
+
spatial_memory/config.py,sha256=cKxCVDUDryQNJmEiN9XT3h6k_CNVXsv4B6XDqhOEsfQ,23848
|
|
4
|
+
spatial_memory/factory.py,sha256=fTQqk3O1dGUig8mGle9to1bnsnzwG12_Vpb4VFGm-M8,17374
|
|
5
5
|
spatial_memory/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
6
|
-
spatial_memory/server.py,sha256=
|
|
6
|
+
spatial_memory/server.py,sha256=6rP9kUhbRWY-A2BtlU2yjLvf-p3Kil_ocsZXKgPPnKk,48591
|
|
7
7
|
spatial_memory/verify.py,sha256=4p4KQYRUBrKGax66n629hvb9Ul8sR2yXXnfyZLpNk-o,3939
|
|
8
8
|
spatial_memory/adapters/__init__.py,sha256=xNPQCOYVsTP2ogMEcYGbxGJYH9pdksB66MZ7ctK6KbM,187
|
|
9
|
-
spatial_memory/adapters/lancedb_repository.py,sha256=
|
|
9
|
+
spatial_memory/adapters/lancedb_repository.py,sha256=klKNwuhUUXTOifkSCABHjJFCHFKZoG5MqZNAAj3_3oQ,31360
|
|
10
10
|
spatial_memory/core/__init__.py,sha256=MWU1dWaE-8opHiNgTgn8Jd1MhCWfRTSbrHwlzR6lgkk,3033
|
|
11
11
|
spatial_memory/core/cache.py,sha256=QqG0hhpaDH0mzBLIEinGP3WkKsb70QfCLMGpptguWIM,10038
|
|
12
12
|
spatial_memory/core/circuit_breaker.py,sha256=zxqnOWiAnx_S7oX44UY57ihpk7V27fzW6rYgBn1BWp8,10734
|
|
@@ -28,9 +28,9 @@ spatial_memory/core/import_security.py,sha256=DiG0BsX65JGGf6B1T855SVz1TZB1ViI3JX
|
|
|
28
28
|
spatial_memory/core/lifecycle_ops.py,sha256=P8jc4JGBF39ayNikgLu0I3kGcJ3ph0VsvAHSxjp35FI,35836
|
|
29
29
|
spatial_memory/core/logging.py,sha256=JfFRzHmhZ2BPNSJiKIHGjfUeskFVo8Bj7nOKznvv0kU,6542
|
|
30
30
|
spatial_memory/core/metrics.py,sha256=8B26sAd2y6xrpaJr8mNgOAMzAZDd1uXOvAGxz_1nhfY,5383
|
|
31
|
-
spatial_memory/core/models.py,sha256=
|
|
31
|
+
spatial_memory/core/models.py,sha256=8o2IAhJGP08Id0uCucprXb5YyDOSQAR8f3cC22_4NT4,18496
|
|
32
32
|
spatial_memory/core/rate_limiter.py,sha256=5A3YI6C0_YrWZeSBBxF7Eu5HUD8rodS45301730doF0,10582
|
|
33
|
-
spatial_memory/core/response_types.py,sha256=
|
|
33
|
+
spatial_memory/core/response_types.py,sha256=mAElG4QYa4hd7xEk6w-LK94_bjyoj9EAL6NNNhub9Go,10815
|
|
34
34
|
spatial_memory/core/security.py,sha256=fjIYqsyzK4qqm7sI8FNEE2xx9WjNJnrAslOBLVRVwgs,19759
|
|
35
35
|
spatial_memory/core/spatial_ops.py,sha256=_xNrZzrbL7w2uAMJkQZEtTMjERQpo04cuSUoTNebiew,13360
|
|
36
36
|
spatial_memory/core/tracing.py,sha256=9O3WdUJfCl2sohYWlaQETrCO7_P_N3qY_MqSAnpQPl0,8438
|
|
@@ -40,15 +40,16 @@ spatial_memory/migrations/__init__.py,sha256=wljoV_u2PPx-dH6JOPmjEP5xEbwoM9HQ1WY
|
|
|
40
40
|
spatial_memory/ports/__init__.py,sha256=Lq9ht9AS4VwPbMojtd_FYkA7lUuCXmYHwv6sweJi9AQ,243
|
|
41
41
|
spatial_memory/ports/repositories.py,sha256=6rUxGUfeAVudNU9ugaVTKedPGQONaIL0TrSQt7XR5HU,19857
|
|
42
42
|
spatial_memory/services/__init__.py,sha256=9sYpYgQRTAq800I5ZS_GFcg5F_p3dUySe5fKN3S4vg4,1564
|
|
43
|
+
spatial_memory/services/decay_manager.py,sha256=ha2F07pLMMcxdjATupDaU2k-cNf9h5znB8GKMyt3GGE,14448
|
|
43
44
|
spatial_memory/services/export_import.py,sha256=Ui1RFujtEhrDYKGXsQragUtpVfxq0eLwjnabB1h-F7c,38211
|
|
44
45
|
spatial_memory/services/lifecycle.py,sha256=y1XSjJuCBou7wPdbRGyZsP30URw6iNqxjhlwYMLv79o,43010
|
|
45
46
|
spatial_memory/services/memory.py,sha256=MZ9NMNXBRYEA39vzT5gAB7PXJrGFwC-D98SLZe7Z50Q,12944
|
|
46
47
|
spatial_memory/services/spatial.py,sha256=x-d8ucSpP1sNQR9ywylMwF884hYPkRDfNDhDGfUodwI,42609
|
|
47
|
-
spatial_memory/services/utility.py,sha256=
|
|
48
|
+
spatial_memory/services/utility.py,sha256=MaYK9RVbmjIR_CWGHD_P5FEJnl6cgfTdFavDH6lit5A,15534
|
|
48
49
|
spatial_memory/tools/__init__.py,sha256=ZhFZp5j8HA4Qx5pVcRmgTcBbqY3X5TmgMNpSkyAMqJA,127
|
|
49
50
|
spatial_memory/tools/definitions.py,sha256=Ueg_BrRGJcp_jnQD95DiYFPkxU419XPkbjzQFDG3jtY,25397
|
|
50
|
-
spatial_memory_mcp-1.
|
|
51
|
-
spatial_memory_mcp-1.
|
|
52
|
-
spatial_memory_mcp-1.
|
|
53
|
-
spatial_memory_mcp-1.
|
|
54
|
-
spatial_memory_mcp-1.
|
|
51
|
+
spatial_memory_mcp-1.7.0.dist-info/METADATA,sha256=aQ10Z4wDfGI91SiKPLV4cWDxtiuRN2PyDnYsC3KnhqE,16772
|
|
52
|
+
spatial_memory_mcp-1.7.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
53
|
+
spatial_memory_mcp-1.7.0.dist-info/entry_points.txt,sha256=nJ4RJBB9SvhNktJdikcAS27fSwtKekpgPR4GTy2r1cE,64
|
|
54
|
+
spatial_memory_mcp-1.7.0.dist-info/licenses/LICENSE,sha256=g65vrroU3yJxekbYV8xmDj7KFrXAg89eCM8vcWrpKmU,1095
|
|
55
|
+
spatial_memory_mcp-1.7.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|