spatial-memory-mcp 1.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. spatial_memory/__init__.py +97 -0
  2. spatial_memory/__main__.py +271 -0
  3. spatial_memory/adapters/__init__.py +7 -0
  4. spatial_memory/adapters/lancedb_repository.py +880 -0
  5. spatial_memory/config.py +769 -0
  6. spatial_memory/core/__init__.py +118 -0
  7. spatial_memory/core/cache.py +317 -0
  8. spatial_memory/core/circuit_breaker.py +297 -0
  9. spatial_memory/core/connection_pool.py +220 -0
  10. spatial_memory/core/consolidation_strategies.py +401 -0
  11. spatial_memory/core/database.py +3072 -0
  12. spatial_memory/core/db_idempotency.py +242 -0
  13. spatial_memory/core/db_indexes.py +576 -0
  14. spatial_memory/core/db_migrations.py +588 -0
  15. spatial_memory/core/db_search.py +512 -0
  16. spatial_memory/core/db_versioning.py +178 -0
  17. spatial_memory/core/embeddings.py +558 -0
  18. spatial_memory/core/errors.py +317 -0
  19. spatial_memory/core/file_security.py +701 -0
  20. spatial_memory/core/filesystem.py +178 -0
  21. spatial_memory/core/health.py +289 -0
  22. spatial_memory/core/helpers.py +79 -0
  23. spatial_memory/core/import_security.py +433 -0
  24. spatial_memory/core/lifecycle_ops.py +1067 -0
  25. spatial_memory/core/logging.py +194 -0
  26. spatial_memory/core/metrics.py +192 -0
  27. spatial_memory/core/models.py +660 -0
  28. spatial_memory/core/rate_limiter.py +326 -0
  29. spatial_memory/core/response_types.py +500 -0
  30. spatial_memory/core/security.py +588 -0
  31. spatial_memory/core/spatial_ops.py +430 -0
  32. spatial_memory/core/tracing.py +300 -0
  33. spatial_memory/core/utils.py +110 -0
  34. spatial_memory/core/validation.py +406 -0
  35. spatial_memory/factory.py +444 -0
  36. spatial_memory/migrations/__init__.py +40 -0
  37. spatial_memory/ports/__init__.py +11 -0
  38. spatial_memory/ports/repositories.py +630 -0
  39. spatial_memory/py.typed +0 -0
  40. spatial_memory/server.py +1214 -0
  41. spatial_memory/services/__init__.py +70 -0
  42. spatial_memory/services/decay_manager.py +411 -0
  43. spatial_memory/services/export_import.py +1031 -0
  44. spatial_memory/services/lifecycle.py +1139 -0
  45. spatial_memory/services/memory.py +412 -0
  46. spatial_memory/services/spatial.py +1152 -0
  47. spatial_memory/services/utility.py +429 -0
  48. spatial_memory/tools/__init__.py +5 -0
  49. spatial_memory/tools/definitions.py +695 -0
  50. spatial_memory/verify.py +140 -0
  51. spatial_memory_mcp-1.9.1.dist-info/METADATA +509 -0
  52. spatial_memory_mcp-1.9.1.dist-info/RECORD +55 -0
  53. spatial_memory_mcp-1.9.1.dist-info/WHEEL +4 -0
  54. spatial_memory_mcp-1.9.1.dist-info/entry_points.txt +2 -0
  55. spatial_memory_mcp-1.9.1.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,70 @@
1
+ """Service layer for Spatial Memory MCP Server."""
2
+
3
+ from spatial_memory.core.errors import (
4
+ ConsolidationError,
5
+ DecayError,
6
+ ExtractionError,
7
+ NamespaceOperationError,
8
+ ReinforcementError,
9
+ )
10
+ from spatial_memory.services.export_import import (
11
+ ExportImportService,
12
+ )
13
+ from spatial_memory.services.lifecycle import (
14
+ ConsolidateResult,
15
+ ConsolidationGroupResult,
16
+ DecayedMemory,
17
+ DecayResult,
18
+ ExtractedMemory,
19
+ ExtractResult,
20
+ LifecycleConfig,
21
+ LifecycleService,
22
+ ReinforcedMemory,
23
+ ReinforceResult,
24
+ )
25
+ from spatial_memory.services.memory import (
26
+ ForgetResult,
27
+ MemoryService,
28
+ NearbyResult,
29
+ RecallResult,
30
+ RememberResult,
31
+ )
32
+ from spatial_memory.services.spatial import (
33
+ SpatialConfig,
34
+ SpatialService,
35
+ )
36
+ from spatial_memory.services.utility import (
37
+ UtilityService,
38
+ )
39
+
40
+ __all__ = [
41
+ # Lifecycle
42
+ "ConsolidateResult",
43
+ "ConsolidationError",
44
+ "ConsolidationGroupResult",
45
+ "DecayedMemory",
46
+ "DecayError",
47
+ "DecayResult",
48
+ "ExtractedMemory",
49
+ "ExtractionError",
50
+ "ExtractResult",
51
+ "LifecycleConfig",
52
+ "LifecycleService",
53
+ "ReinforcedMemory",
54
+ "ReinforcementError",
55
+ "ReinforceResult",
56
+ # Memory
57
+ "ForgetResult",
58
+ "MemoryService",
59
+ "NearbyResult",
60
+ "RecallResult",
61
+ "RememberResult",
62
+ # Spatial
63
+ "SpatialConfig",
64
+ "SpatialService",
65
+ # Utility
66
+ "NamespaceOperationError",
67
+ "UtilityService",
68
+ # Export/Import
69
+ "ExportImportService",
70
+ ]
@@ -0,0 +1,411 @@
1
+ """Automatic decay manager for real-time importance decay.
2
+
3
+ This service provides automatic decay calculation during recall operations,
4
+ re-ranking search results based on time-decayed importance. Updates are
5
+ optionally persisted to the database in the background.
6
+
7
+ Architecture:
8
+ recall() / hybrid_recall()
9
+
10
+
11
+ DecayManager.apply_decay_to_results() ← Real-time (~20-50μs)
12
+
13
+ ┌────┴────┐
14
+ ▼ ▼
15
+ [Re-ranked [Background Queue]
16
+ Results] │
17
+
18
+ [Batch Persist Thread]
19
+
20
+
21
+ [LanceDB Update]
22
+ """
23
+
24
+ from __future__ import annotations
25
+
26
+ import logging
27
+ import threading
28
+ import time
29
+ from collections import deque
30
+ from dataclasses import dataclass
31
+ from datetime import datetime
32
+ from typing import TYPE_CHECKING, Any
33
+
34
+ from spatial_memory.core.lifecycle_ops import apply_decay, calculate_decay_factor
35
+ from spatial_memory.core.models import AutoDecayConfig
36
+ from spatial_memory.core.utils import to_aware_utc, utc_now
37
+
38
+ if TYPE_CHECKING:
39
+ from spatial_memory.ports.repositories import MemoryRepositoryProtocol
40
+
41
+ logger = logging.getLogger(__name__)
42
+
43
+
44
+ @dataclass
45
+ class DecayUpdate:
46
+ """A pending decay update for a memory."""
47
+
48
+ memory_id: str
49
+ old_importance: float
50
+ new_importance: float
51
+ timestamp: float # time.monotonic() for deduplication
52
+
53
+
54
+ class DecayManager:
55
+ """Manages automatic decay calculation and persistence.
56
+
57
+ This service calculates effective importance during search operations
58
+ using exponential decay based on time since last access. Results are
59
+ re-ranked by multiplying similarity with effective importance.
60
+
61
+ Background persistence is optional and uses a daemon thread with
62
+ batched updates to minimize database overhead.
63
+ """
64
+
65
+ def __init__(
66
+ self,
67
+ repository: MemoryRepositoryProtocol,
68
+ config: AutoDecayConfig | None = None,
69
+ ) -> None:
70
+ """Initialize the decay manager.
71
+
72
+ Args:
73
+ repository: Repository for persisting decay updates.
74
+ config: Configuration for decay behavior.
75
+ """
76
+ self._repo = repository
77
+ self._config = config or AutoDecayConfig()
78
+
79
+ # Threading primitives
80
+ self._lock = threading.Lock()
81
+ self._shutdown_event = threading.Event()
82
+ self._worker_thread: threading.Thread | None = None
83
+
84
+ # Update queue with backpressure (deque with maxlen)
85
+ # Using maxlen for automatic backpressure - oldest items dropped
86
+ self._update_queue: deque[DecayUpdate] = deque(
87
+ maxlen=self._config.max_queue_size
88
+ )
89
+
90
+ # Track pending updates by memory_id for deduplication
91
+ self._pending_updates: dict[str, DecayUpdate] = {}
92
+
93
+ # Statistics
94
+ self._stats_lock = threading.Lock()
95
+ self._updates_queued = 0
96
+ self._updates_persisted = 0
97
+ self._updates_deduplicated = 0
98
+
99
+ @property
100
+ def enabled(self) -> bool:
101
+ """Whether auto-decay is enabled."""
102
+ return self._config.enabled
103
+
104
+ @property
105
+ def persist_enabled(self) -> bool:
106
+ """Whether persistence is enabled."""
107
+ return self._config.persist_enabled
108
+
109
+ def start(self) -> None:
110
+ """Start the background persistence worker.
111
+
112
+ Safe to call multiple times - will only start if not already running.
113
+ """
114
+ if not self._config.enabled or not self._config.persist_enabled:
115
+ logger.debug("Auto-decay persistence disabled, skipping worker start")
116
+ return
117
+
118
+ if self._worker_thread is not None and self._worker_thread.is_alive():
119
+ logger.debug("Decay worker already running")
120
+ return
121
+
122
+ self._shutdown_event.clear()
123
+ self._worker_thread = threading.Thread(
124
+ target=self._background_worker,
125
+ name="decay-persist-worker",
126
+ daemon=True,
127
+ )
128
+ self._worker_thread.start()
129
+ logger.info("Auto-decay background worker started")
130
+
131
+ def stop(self, timeout: float = 5.0) -> None:
132
+ """Stop the background worker gracefully.
133
+
134
+ Flushes any pending updates before stopping.
135
+
136
+ Args:
137
+ timeout: Maximum time to wait for worker shutdown.
138
+ """
139
+ if self._worker_thread is None or not self._worker_thread.is_alive():
140
+ return
141
+
142
+ logger.info("Stopping auto-decay background worker...")
143
+ self._shutdown_event.set()
144
+
145
+ # Wait for worker to finish
146
+ self._worker_thread.join(timeout=timeout)
147
+
148
+ if self._worker_thread.is_alive():
149
+ logger.warning("Decay worker did not stop within timeout")
150
+ else:
151
+ logger.info(
152
+ f"Auto-decay worker stopped. "
153
+ f"Queued: {self._updates_queued}, "
154
+ f"Persisted: {self._updates_persisted}, "
155
+ f"Deduplicated: {self._updates_deduplicated}"
156
+ )
157
+
158
+ def calculate_effective_importance(
159
+ self,
160
+ stored_importance: float,
161
+ last_accessed: datetime,
162
+ access_count: int,
163
+ ) -> float:
164
+ """Calculate time-decayed effective importance.
165
+
166
+ Uses the unified decay algorithm from lifecycle_ops, supporting
167
+ exponential, linear, and step decay functions with adaptive half-life
168
+ based on access count and importance.
169
+
170
+ Args:
171
+ stored_importance: The stored importance value (0-1).
172
+ last_accessed: When the memory was last accessed.
173
+ access_count: Number of times the memory has been accessed.
174
+
175
+ Returns:
176
+ Effective importance after decay (clamped to min_importance_floor).
177
+ """
178
+ if not self._config.enabled:
179
+ return stored_importance
180
+
181
+ # Calculate days since last access
182
+ # Normalize last_accessed to timezone-aware UTC (database may return naive)
183
+ now = utc_now()
184
+ last_accessed_aware = to_aware_utc(last_accessed)
185
+ delta = now - last_accessed_aware
186
+ days_since_access = delta.total_seconds() / 86400.0 # seconds in a day
187
+
188
+ if days_since_access <= 0:
189
+ return stored_importance
190
+
191
+ # Use the unified decay algorithm from lifecycle_ops
192
+ decay_factor = calculate_decay_factor(
193
+ days_since_access=days_since_access,
194
+ access_count=access_count,
195
+ base_importance=stored_importance,
196
+ decay_function=self._config.decay_function,
197
+ half_life_days=self._config.half_life_days,
198
+ access_weight=self._config.access_weight,
199
+ )
200
+
201
+ return apply_decay(
202
+ current_importance=stored_importance,
203
+ decay_factor=decay_factor,
204
+ min_importance=self._config.min_importance_floor,
205
+ )
206
+
207
+ def apply_decay_to_results(
208
+ self,
209
+ results: list[dict[str, Any]],
210
+ rerank: bool = True,
211
+ ) -> list[dict[str, Any]]:
212
+ """Apply decay to search results and optionally re-rank.
213
+
214
+ Calculates effective_importance for each result and optionally
215
+ re-ranks results by multiplying similarity with effective_importance.
216
+
217
+ Args:
218
+ results: List of memory result dictionaries.
219
+ rerank: Whether to re-rank by adjusted score (similarity × effective_importance).
220
+
221
+ Returns:
222
+ Results with effective_importance added, optionally re-ranked.
223
+ """
224
+ if not self._config.enabled or not results:
225
+ return results
226
+
227
+ updates_to_queue: list[DecayUpdate] = []
228
+
229
+ for result in results:
230
+ # Extract required fields
231
+ stored_importance = result.get("importance", 0.5)
232
+ last_accessed = result.get("last_accessed")
233
+ access_count = result.get("access_count", 0)
234
+ memory_id = result.get("id", "")
235
+
236
+ # Handle datetime parsing if needed
237
+ if isinstance(last_accessed, str):
238
+ try:
239
+ last_accessed = datetime.fromisoformat(last_accessed.replace("Z", "+00:00"))
240
+ except (ValueError, AttributeError):
241
+ last_accessed = utc_now()
242
+ elif last_accessed is None:
243
+ last_accessed = utc_now()
244
+
245
+ # Calculate effective importance
246
+ effective_importance = self.calculate_effective_importance(
247
+ stored_importance=stored_importance,
248
+ last_accessed=last_accessed,
249
+ access_count=access_count,
250
+ )
251
+
252
+ # Add to result
253
+ result["effective_importance"] = effective_importance
254
+
255
+ # Check if we should queue an update
256
+ if self._config.persist_enabled and memory_id:
257
+ change = abs(stored_importance - effective_importance)
258
+ if change >= self._config.min_change_threshold:
259
+ updates_to_queue.append(
260
+ DecayUpdate(
261
+ memory_id=memory_id,
262
+ old_importance=stored_importance,
263
+ new_importance=effective_importance,
264
+ timestamp=time.monotonic(),
265
+ )
266
+ )
267
+
268
+ # Queue updates in bulk
269
+ if updates_to_queue:
270
+ self._queue_updates(updates_to_queue)
271
+
272
+ # Re-rank by adjusted score if requested
273
+ if rerank:
274
+ # Calculate adjusted score: similarity × effective_importance
275
+ for result in results:
276
+ similarity = result.get("similarity", 0.0)
277
+ effective = result.get("effective_importance", result.get("importance", 0.5))
278
+ result["_adjusted_score"] = similarity * effective
279
+
280
+ # Sort by adjusted score (descending)
281
+ results.sort(key=lambda r: r.get("_adjusted_score", 0.0), reverse=True)
282
+
283
+ # Remove temporary score field
284
+ for result in results:
285
+ result.pop("_adjusted_score", None)
286
+
287
+ return results
288
+
289
+ def _queue_updates(self, updates: list[DecayUpdate]) -> None:
290
+ """Queue updates for background persistence with deduplication.
291
+
292
+ Latest update per memory_id wins - prevents duplicate writes.
293
+
294
+ Args:
295
+ updates: List of decay updates to queue.
296
+ """
297
+ with self._lock:
298
+ for update in updates:
299
+ # Deduplicate: keep latest update per memory_id
300
+ existing = self._pending_updates.get(update.memory_id)
301
+ if existing is not None:
302
+ with self._stats_lock:
303
+ self._updates_deduplicated += 1
304
+
305
+ self._pending_updates[update.memory_id] = update
306
+ self._update_queue.append(update)
307
+
308
+ with self._stats_lock:
309
+ self._updates_queued += 1
310
+
311
+ def _background_worker(self) -> None:
312
+ """Background worker that batches and persists decay updates."""
313
+ logger.debug("Decay background worker started")
314
+
315
+ while not self._shutdown_event.is_set():
316
+ try:
317
+ # Wait for flush interval or shutdown
318
+ self._shutdown_event.wait(timeout=self._config.persist_flush_interval_seconds)
319
+
320
+ # Collect batch of updates
321
+ batch = self._collect_batch()
322
+
323
+ if batch:
324
+ self._persist_batch(batch)
325
+
326
+ except Exception as e:
327
+ logger.error(f"Error in decay background worker: {e}", exc_info=True)
328
+ # Don't crash the worker on transient errors
329
+ time.sleep(1.0)
330
+
331
+ # Final flush on shutdown
332
+ try:
333
+ batch = self._collect_batch()
334
+ if batch:
335
+ logger.debug(f"Final flush: {len(batch)} updates")
336
+ self._persist_batch(batch)
337
+ except Exception as e:
338
+ logger.error(f"Error in final decay flush: {e}", exc_info=True)
339
+
340
+ logger.debug("Decay background worker stopped")
341
+
342
+ def _collect_batch(self) -> list[DecayUpdate]:
343
+ """Collect a batch of updates for persistence.
344
+
345
+ Returns:
346
+ List of unique updates (latest per memory_id).
347
+ """
348
+ with self._lock:
349
+ if not self._pending_updates:
350
+ return []
351
+
352
+ # Get unique updates (already deduplicated in _pending_updates)
353
+ batch = list(self._pending_updates.values())[:self._config.persist_batch_size]
354
+
355
+ # Clear processed updates from pending dict
356
+ for update in batch:
357
+ self._pending_updates.pop(update.memory_id, None)
358
+
359
+ return batch
360
+
361
+ def _persist_batch(self, batch: list[DecayUpdate]) -> None:
362
+ """Persist a batch of decay updates to the database.
363
+
364
+ Args:
365
+ batch: List of decay updates to persist.
366
+ """
367
+ if not batch:
368
+ return
369
+
370
+ # Build update tuples for batch update
371
+ updates = [
372
+ (update.memory_id, {"importance": update.new_importance})
373
+ for update in batch
374
+ ]
375
+
376
+ try:
377
+ success_count, failed_ids = self._repo.update_batch(updates)
378
+
379
+ with self._stats_lock:
380
+ self._updates_persisted += success_count
381
+
382
+ if failed_ids:
383
+ logger.warning(f"Failed to persist decay for {len(failed_ids)} memories")
384
+
385
+ logger.debug(f"Persisted decay updates for {success_count} memories")
386
+
387
+ except Exception as e:
388
+ logger.error(f"Failed to persist decay batch: {e}")
389
+ # Re-queue failed updates? For now, just log and continue
390
+ # In a production system, you might want retry logic here
391
+
392
+ def get_stats(self) -> dict[str, Any]:
393
+ """Get decay manager statistics.
394
+
395
+ Returns:
396
+ Dictionary with queue and persistence stats.
397
+ """
398
+ with self._stats_lock:
399
+ return {
400
+ "enabled": self._config.enabled,
401
+ "persist_enabled": self._config.persist_enabled,
402
+ "updates_queued": self._updates_queued,
403
+ "updates_persisted": self._updates_persisted,
404
+ "updates_deduplicated": self._updates_deduplicated,
405
+ "pending_updates": len(self._pending_updates),
406
+ "queue_size": len(self._update_queue),
407
+ "queue_max_size": self._config.max_queue_size,
408
+ "worker_alive": (
409
+ self._worker_thread is not None and self._worker_thread.is_alive()
410
+ ),
411
+ }