spatial-memory-mcp 1.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of spatial-memory-mcp might be problematic. Click here for more details.

Files changed (54) hide show
  1. spatial_memory/__init__.py +97 -0
  2. spatial_memory/__main__.py +270 -0
  3. spatial_memory/adapters/__init__.py +7 -0
  4. spatial_memory/adapters/lancedb_repository.py +878 -0
  5. spatial_memory/config.py +728 -0
  6. spatial_memory/core/__init__.py +118 -0
  7. spatial_memory/core/cache.py +317 -0
  8. spatial_memory/core/circuit_breaker.py +297 -0
  9. spatial_memory/core/connection_pool.py +220 -0
  10. spatial_memory/core/consolidation_strategies.py +402 -0
  11. spatial_memory/core/database.py +3069 -0
  12. spatial_memory/core/db_idempotency.py +242 -0
  13. spatial_memory/core/db_indexes.py +575 -0
  14. spatial_memory/core/db_migrations.py +584 -0
  15. spatial_memory/core/db_search.py +509 -0
  16. spatial_memory/core/db_versioning.py +177 -0
  17. spatial_memory/core/embeddings.py +557 -0
  18. spatial_memory/core/errors.py +317 -0
  19. spatial_memory/core/file_security.py +702 -0
  20. spatial_memory/core/filesystem.py +178 -0
  21. spatial_memory/core/health.py +289 -0
  22. spatial_memory/core/helpers.py +79 -0
  23. spatial_memory/core/import_security.py +432 -0
  24. spatial_memory/core/lifecycle_ops.py +1067 -0
  25. spatial_memory/core/logging.py +194 -0
  26. spatial_memory/core/metrics.py +192 -0
  27. spatial_memory/core/models.py +628 -0
  28. spatial_memory/core/rate_limiter.py +326 -0
  29. spatial_memory/core/response_types.py +497 -0
  30. spatial_memory/core/security.py +588 -0
  31. spatial_memory/core/spatial_ops.py +426 -0
  32. spatial_memory/core/tracing.py +300 -0
  33. spatial_memory/core/utils.py +110 -0
  34. spatial_memory/core/validation.py +403 -0
  35. spatial_memory/factory.py +407 -0
  36. spatial_memory/migrations/__init__.py +40 -0
  37. spatial_memory/ports/__init__.py +11 -0
  38. spatial_memory/ports/repositories.py +631 -0
  39. spatial_memory/py.typed +0 -0
  40. spatial_memory/server.py +1141 -0
  41. spatial_memory/services/__init__.py +70 -0
  42. spatial_memory/services/export_import.py +1023 -0
  43. spatial_memory/services/lifecycle.py +1120 -0
  44. spatial_memory/services/memory.py +412 -0
  45. spatial_memory/services/spatial.py +1147 -0
  46. spatial_memory/services/utility.py +409 -0
  47. spatial_memory/tools/__init__.py +5 -0
  48. spatial_memory/tools/definitions.py +695 -0
  49. spatial_memory/verify.py +140 -0
  50. spatial_memory_mcp-1.6.1.dist-info/METADATA +499 -0
  51. spatial_memory_mcp-1.6.1.dist-info/RECORD +54 -0
  52. spatial_memory_mcp-1.6.1.dist-info/WHEEL +4 -0
  53. spatial_memory_mcp-1.6.1.dist-info/entry_points.txt +2 -0
  54. spatial_memory_mcp-1.6.1.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,1141 @@
1
+ """MCP Server for Spatial Memory.
2
+
3
+ This module provides the MCP (Model Context Protocol) server implementation
4
+ that exposes memory operations as tools for LLM assistants.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import asyncio
10
+ import atexit
11
+ import json
12
+ import logging
13
+ import signal
14
+ import sys
15
+ import uuid
16
+ from collections.abc import Callable
17
+ from concurrent.futures import ThreadPoolExecutor
18
+ from dataclasses import asdict
19
+ from functools import partial
20
+ from typing import TYPE_CHECKING, Any
21
+
22
+ from mcp.server import Server
23
+ from mcp.server.stdio import stdio_server
24
+ from mcp.types import TextContent, Tool
25
+
26
+ from spatial_memory import __version__
27
+ from spatial_memory.config import ConfigurationError, get_settings, validate_startup
28
+ from spatial_memory.factory import ServiceFactory
29
+ from spatial_memory.core.database import (
30
+ clear_connection_cache,
31
+ set_connection_pool_max_size,
32
+ )
33
+ from spatial_memory.core.errors import (
34
+ ConsolidationError,
35
+ DecayError,
36
+ ExportError,
37
+ ExtractionError,
38
+ FileSizeLimitError,
39
+ ImportRecordLimitError,
40
+ MemoryImportError,
41
+ MemoryNotFoundError,
42
+ NamespaceNotFoundError,
43
+ NamespaceOperationError,
44
+ PathSecurityError,
45
+ ReinforcementError,
46
+ SpatialMemoryError,
47
+ ValidationError,
48
+ )
49
+ from spatial_memory.core.response_types import (
50
+ ConsolidateResponse,
51
+ DecayResponse,
52
+ DeleteNamespaceResponse,
53
+ ExportResponse,
54
+ ExtractResponse,
55
+ ForgetBatchResponse,
56
+ ForgetResponse,
57
+ HandlerResponse,
58
+ HealthResponse,
59
+ HybridRecallResponse,
60
+ ImportResponse,
61
+ JourneyResponse,
62
+ NamespacesResponse,
63
+ NearbyResponse,
64
+ RecallResponse,
65
+ RegionsResponse,
66
+ ReinforceResponse,
67
+ RememberBatchResponse,
68
+ RememberResponse,
69
+ RenameNamespaceResponse,
70
+ StatsResponse,
71
+ VisualizeResponse,
72
+ WanderResponse,
73
+ )
74
+ from spatial_memory.core.health import HealthChecker
75
+ from spatial_memory.core.logging import configure_logging
76
+ from spatial_memory.core.metrics import is_available as metrics_available
77
+ from spatial_memory.core.metrics import record_request
78
+ from spatial_memory.core.tracing import (
79
+ RequestContext,
80
+ TimingContext,
81
+ request_context,
82
+ )
83
+ from spatial_memory.tools import TOOLS
84
+
85
+ if TYPE_CHECKING:
86
+ from spatial_memory.ports.repositories import (
87
+ EmbeddingServiceProtocol,
88
+ MemoryRepositoryProtocol,
89
+ )
90
+
91
+ logger = logging.getLogger(__name__)
92
+
93
+ # Tools that can be cached (read-only operations)
94
+ CACHEABLE_TOOLS = frozenset({"recall", "nearby", "hybrid_recall", "regions"})
95
+
96
+ # Tools that invalidate cache by namespace
97
+ NAMESPACE_INVALIDATING_TOOLS = frozenset({"remember", "forget", "forget_batch"})
98
+
99
+ # Tools that invalidate entire cache
100
+ FULL_INVALIDATING_TOOLS = frozenset({"decay", "reinforce", "consolidate"})
101
+
102
+
103
+ def _generate_cache_key(tool_name: str, arguments: dict[str, Any]) -> str:
104
+ """Generate a cache key from tool name and arguments.
105
+
106
+ Args:
107
+ tool_name: Name of the tool.
108
+ arguments: Tool arguments (excluding _agent_id).
109
+
110
+ Returns:
111
+ A string cache key suitable for response caching.
112
+ """
113
+ # Remove _agent_id from cache key computation (same query from different agents = same result)
114
+ cache_args = {k: v for k, v in sorted(arguments.items()) if k != "_agent_id"}
115
+ # Create a stable string representation
116
+ args_str = json.dumps(cache_args, sort_keys=True, default=str)
117
+ return f"{tool_name}:{hash(args_str)}"
118
+
119
+
120
+ # Error type to response name mapping for standardized error responses
121
+ ERROR_MAPPINGS: dict[type[Exception], str] = {
122
+ MemoryNotFoundError: "MemoryNotFound",
123
+ ValidationError: "ValidationError",
124
+ DecayError: "DecayError",
125
+ ReinforcementError: "ReinforcementError",
126
+ ExtractionError: "ExtractionError",
127
+ ConsolidationError: "ConsolidationError",
128
+ ExportError: "ExportError",
129
+ MemoryImportError: "ImportError",
130
+ PathSecurityError: "PathSecurityError",
131
+ FileSizeLimitError: "FileSizeLimitError",
132
+ ImportRecordLimitError: "ImportRecordLimitError",
133
+ NamespaceNotFoundError: "NamespaceNotFound",
134
+ NamespaceOperationError: "NamespaceOperationError",
135
+ SpatialMemoryError: "SpatialMemoryError",
136
+ }
137
+
138
+
139
+ def _create_error_response(error: Exception, error_id: str | None = None) -> list[TextContent]:
140
+ """Create standardized error response for tool handlers."""
141
+ error_type = ERROR_MAPPINGS.get(type(error), "UnknownError")
142
+ response: dict[str, Any] = {
143
+ "error": error_type,
144
+ "message": str(error),
145
+ "isError": True,
146
+ }
147
+ if error_id:
148
+ response["error_id"] = error_id
149
+ return [TextContent(type="text", text=json.dumps(response))]
150
+
151
+
152
+ class SpatialMemoryServer:
153
+ """MCP Server for Spatial Memory operations.
154
+
155
+ Uses dependency injection for testability.
156
+ """
157
+
158
+ def __init__(
159
+ self,
160
+ repository: MemoryRepositoryProtocol | None = None,
161
+ embeddings: EmbeddingServiceProtocol | None = None,
162
+ ) -> None:
163
+ """Initialize the server.
164
+
165
+ Args:
166
+ repository: Optional repository (uses LanceDB if not provided).
167
+ embeddings: Optional embedding service (uses local model if not provided).
168
+ """
169
+ self._settings = get_settings()
170
+
171
+ # Configure connection pool size from settings
172
+ set_connection_pool_max_size(self._settings.connection_pool_max_size)
173
+
174
+ # Use ServiceFactory for dependency injection
175
+ factory = ServiceFactory(
176
+ settings=self._settings,
177
+ repository=repository,
178
+ embeddings=embeddings,
179
+ )
180
+ services = factory.create_all()
181
+
182
+ # Store service references
183
+ self._db = services.database
184
+ self._embeddings = services.embeddings
185
+ self._memory_service = services.memory
186
+ self._spatial_service = services.spatial
187
+ self._lifecycle_service = services.lifecycle
188
+ self._utility_service = services.utility
189
+ self._export_import_service = services.export_import
190
+
191
+ # Rate limiting
192
+ self._per_agent_rate_limiting = services.per_agent_rate_limiting
193
+ self._rate_limiter = services.rate_limiter
194
+ self._agent_rate_limiter = services.agent_rate_limiter
195
+
196
+ # Response cache
197
+ self._cache_enabled = services.cache_enabled
198
+ self._cache = services.cache
199
+ self._regions_cache_ttl = services.regions_cache_ttl
200
+
201
+ # ThreadPoolExecutor for non-blocking embedding operations
202
+ self._executor = ThreadPoolExecutor(
203
+ max_workers=2,
204
+ thread_name_prefix="embed-",
205
+ )
206
+
207
+ # Tool handler registry for dispatch pattern
208
+ self._tool_handlers: dict[str, Callable[[dict[str, Any]], HandlerResponse]] = {
209
+ "remember": self._handle_remember,
210
+ "remember_batch": self._handle_remember_batch,
211
+ "recall": self._handle_recall,
212
+ "nearby": self._handle_nearby,
213
+ "forget": self._handle_forget,
214
+ "forget_batch": self._handle_forget_batch,
215
+ "health": self._handle_health,
216
+ "journey": self._handle_journey,
217
+ "wander": self._handle_wander,
218
+ "regions": self._handle_regions,
219
+ "visualize": self._handle_visualize,
220
+ "decay": self._handle_decay,
221
+ "reinforce": self._handle_reinforce,
222
+ "extract": self._handle_extract,
223
+ "consolidate": self._handle_consolidate,
224
+ "stats": self._handle_stats,
225
+ "namespaces": self._handle_namespaces,
226
+ "delete_namespace": self._handle_delete_namespace,
227
+ "rename_namespace": self._handle_rename_namespace,
228
+ "export_memories": self._handle_export_memories,
229
+ "import_memories": self._handle_import_memories,
230
+ "hybrid_recall": self._handle_hybrid_recall,
231
+ }
232
+
233
+ # Log metrics availability
234
+ if metrics_available():
235
+ logger.info("Prometheus metrics enabled")
236
+ else:
237
+ logger.info("Prometheus metrics disabled (prometheus_client not installed)")
238
+
239
+ # Create MCP server with behavioral instructions
240
+ self._server = Server(
241
+ name="spatial-memory",
242
+ version=__version__,
243
+ instructions=self._get_server_instructions(),
244
+ )
245
+ self._setup_handlers()
246
+
247
+ async def _run_in_executor(self, func: Callable[..., Any], *args: Any) -> Any:
248
+ """Run a synchronous function in the thread pool executor.
249
+
250
+ This allows CPU-bound or blocking operations (like embedding generation)
251
+ to run without blocking the asyncio event loop.
252
+
253
+ Args:
254
+ func: The synchronous function to run.
255
+ *args: Arguments to pass to the function.
256
+
257
+ Returns:
258
+ The result of the function call.
259
+ """
260
+ loop = asyncio.get_running_loop()
261
+ return await loop.run_in_executor(self._executor, partial(func, *args))
262
+
263
+ async def _handle_tool_async(
264
+ self, name: str, arguments: dict[str, Any]
265
+ ) -> HandlerResponse:
266
+ """Handle tool call asynchronously by running handler in executor.
267
+
268
+ This wraps synchronous handlers to run in a thread pool, preventing
269
+ blocking operations from stalling the event loop.
270
+
271
+ Args:
272
+ name: Tool name.
273
+ arguments: Tool arguments.
274
+
275
+ Returns:
276
+ Tool result as typed dictionary.
277
+
278
+ Raises:
279
+ ValidationError: If tool name is unknown.
280
+ """
281
+ return await self._run_in_executor(self._handle_tool, name, arguments)
282
+
283
+ def _setup_handlers(self) -> None:
284
+ """Set up MCP tool handlers."""
285
+
286
+ @self._server.list_tools()
287
+ async def list_tools() -> list[Tool]:
288
+ """Return the list of available tools."""
289
+ return TOOLS
290
+
291
+ @self._server.call_tool()
292
+ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
293
+ """Handle tool calls with tracing, caching, and rate limiting."""
294
+ # Extract _agent_id for tracing and rate limiting (don't pass to handler)
295
+ agent_id = arguments.pop("_agent_id", None)
296
+
297
+ # Apply rate limiting
298
+ if self._per_agent_rate_limiting and self._agent_rate_limiter is not None:
299
+ if not self._agent_rate_limiter.wait(agent_id=agent_id, timeout=30.0):
300
+ return [TextContent(
301
+ type="text",
302
+ text=json.dumps({
303
+ "error": "RateLimitExceeded",
304
+ "message": "Too many requests. Please wait and try again.",
305
+ "isError": True,
306
+ })
307
+ )]
308
+ elif self._rate_limiter is not None:
309
+ if not self._rate_limiter.wait(timeout=30.0):
310
+ return [TextContent(
311
+ type="text",
312
+ text=json.dumps({
313
+ "error": "RateLimitExceeded",
314
+ "message": "Too many requests. Please wait and try again.",
315
+ "isError": True,
316
+ })
317
+ )]
318
+
319
+ # Use request context for tracing
320
+ namespace = arguments.get("namespace")
321
+ with request_context(tool_name=name, agent_id=agent_id, namespace=namespace) as ctx:
322
+ timing = TimingContext()
323
+ cache_hit = False
324
+
325
+ try:
326
+ # Check cache for cacheable tools
327
+ if self._cache_enabled and self._cache is not None and name in CACHEABLE_TOOLS:
328
+ cache_key = _generate_cache_key(name, arguments)
329
+ with timing.measure("cache_lookup"):
330
+ cached_result = self._cache.get(cache_key)
331
+ if cached_result is not None:
332
+ cache_hit = True
333
+ result = cached_result
334
+ else:
335
+ with timing.measure("handler"):
336
+ # Run handler in executor to avoid blocking event loop
337
+ result = await self._handle_tool_async(name, arguments)
338
+ # Cache the result with appropriate TTL
339
+ ttl = self._regions_cache_ttl if name == "regions" else None
340
+ self._cache.set(cache_key, result, ttl=ttl)
341
+ else:
342
+ with timing.measure("handler"):
343
+ # Run handler in executor to avoid blocking event loop
344
+ result = await self._handle_tool_async(name, arguments)
345
+
346
+ # Invalidate cache on mutations
347
+ if self._cache_enabled and self._cache is not None:
348
+ self._invalidate_cache_for_tool(name, arguments)
349
+
350
+ # Add _meta to response if enabled
351
+ if self._settings.include_request_meta:
352
+ result["_meta"] = self._build_meta(ctx, timing, cache_hit)
353
+
354
+ return [TextContent(type="text", text=json.dumps(result, default=str))]
355
+ except tuple(ERROR_MAPPINGS.keys()) as e:
356
+ return _create_error_response(e)
357
+ except Exception as e:
358
+ error_id = str(uuid.uuid4())[:8]
359
+ logger.error(f"Unexpected error [{error_id}] in {name}: {e}", exc_info=True)
360
+ return _create_error_response(e, error_id)
361
+
362
+ def _build_meta(
363
+ self,
364
+ ctx: RequestContext,
365
+ timing: TimingContext,
366
+ cache_hit: bool,
367
+ ) -> dict[str, Any]:
368
+ """Build the _meta object for response.
369
+
370
+ Args:
371
+ ctx: The request context.
372
+ timing: The timing context.
373
+ cache_hit: Whether this was a cache hit.
374
+
375
+ Returns:
376
+ Dictionary with request metadata.
377
+ """
378
+ meta: dict[str, Any] = {
379
+ "request_id": ctx.request_id,
380
+ "agent_id": ctx.agent_id,
381
+ "cache_hit": cache_hit,
382
+ }
383
+ if self._settings.include_timing_breakdown:
384
+ meta["timing_ms"] = timing.summary()
385
+ return meta
386
+
387
+ def _invalidate_cache_for_tool(self, name: str, arguments: dict[str, Any]) -> None:
388
+ """Invalidate cache entries based on the tool that was called.
389
+
390
+ Args:
391
+ name: Tool name.
392
+ arguments: Tool arguments.
393
+ """
394
+ if self._cache is None:
395
+ return
396
+
397
+ if name in FULL_INVALIDATING_TOOLS:
398
+ self._cache.invalidate_all()
399
+ elif name in NAMESPACE_INVALIDATING_TOOLS:
400
+ namespace = arguments.get("namespace", "default")
401
+ self._cache.invalidate_namespace(namespace)
402
+
403
+ # =========================================================================
404
+ # Tool Handler Methods
405
+ # =========================================================================
406
+
407
+ def _handle_remember(self, arguments: dict[str, Any]) -> RememberResponse:
408
+ """Handle remember tool call."""
409
+ remember_result = self._memory_service.remember(
410
+ content=arguments["content"],
411
+ namespace=arguments.get("namespace", "default"),
412
+ tags=arguments.get("tags"),
413
+ importance=arguments.get("importance", 0.5),
414
+ metadata=arguments.get("metadata"),
415
+ )
416
+ return asdict(remember_result) # type: ignore[return-value]
417
+
418
+ def _handle_remember_batch(self, arguments: dict[str, Any]) -> RememberBatchResponse:
419
+ """Handle remember_batch tool call."""
420
+ batch_result = self._memory_service.remember_batch(
421
+ memories=arguments["memories"],
422
+ )
423
+ return asdict(batch_result) # type: ignore[return-value]
424
+
425
+ def _handle_recall(self, arguments: dict[str, Any]) -> RecallResponse:
426
+ """Handle recall tool call."""
427
+ recall_result = self._memory_service.recall(
428
+ query=arguments["query"],
429
+ limit=arguments.get("limit", 5),
430
+ namespace=arguments.get("namespace"),
431
+ min_similarity=arguments.get("min_similarity", 0.0),
432
+ )
433
+ return {
434
+ "memories": [
435
+ {
436
+ "id": m.id,
437
+ "content": m.content,
438
+ "similarity": m.similarity,
439
+ "namespace": m.namespace,
440
+ "tags": m.tags,
441
+ "importance": m.importance,
442
+ "created_at": m.created_at.isoformat(),
443
+ "metadata": m.metadata,
444
+ }
445
+ for m in recall_result.memories
446
+ ],
447
+ "total": recall_result.total,
448
+ }
449
+
450
+ def _handle_nearby(self, arguments: dict[str, Any]) -> NearbyResponse:
451
+ """Handle nearby tool call."""
452
+ nearby_result = self._memory_service.nearby(
453
+ memory_id=arguments["memory_id"],
454
+ limit=arguments.get("limit", 5),
455
+ namespace=arguments.get("namespace"),
456
+ )
457
+ return {
458
+ "reference": {
459
+ "id": nearby_result.reference.id,
460
+ "content": nearby_result.reference.content,
461
+ "namespace": nearby_result.reference.namespace,
462
+ },
463
+ "neighbors": [
464
+ {
465
+ "id": n.id,
466
+ "content": n.content,
467
+ "similarity": n.similarity,
468
+ "namespace": n.namespace,
469
+ }
470
+ for n in nearby_result.neighbors
471
+ ],
472
+ }
473
+
474
+ def _handle_forget(self, arguments: dict[str, Any]) -> ForgetResponse:
475
+ """Handle forget tool call."""
476
+ forget_result = self._memory_service.forget(
477
+ memory_id=arguments["memory_id"],
478
+ )
479
+ return asdict(forget_result) # type: ignore[return-value]
480
+
481
+ def _handle_forget_batch(self, arguments: dict[str, Any]) -> ForgetBatchResponse:
482
+ """Handle forget_batch tool call."""
483
+ forget_batch_result = self._memory_service.forget_batch(
484
+ memory_ids=arguments["memory_ids"],
485
+ )
486
+ return asdict(forget_batch_result) # type: ignore[return-value]
487
+
488
+ def _handle_health(self, arguments: dict[str, Any]) -> HealthResponse:
489
+ """Handle health tool call."""
490
+ verbose = arguments.get("verbose", False)
491
+
492
+ health_checker = HealthChecker(
493
+ database=self._db,
494
+ embeddings=self._embeddings,
495
+ storage_path=self._settings.memory_path,
496
+ )
497
+
498
+ report = health_checker.get_health_report()
499
+
500
+ result: HealthResponse = {
501
+ "version": __version__,
502
+ "status": report.status.value,
503
+ "timestamp": report.timestamp.isoformat(),
504
+ "ready": health_checker.is_ready(),
505
+ "alive": health_checker.is_alive(),
506
+ }
507
+
508
+ if verbose:
509
+ result["checks"] = [
510
+ {
511
+ "name": check.name,
512
+ "status": check.status.value,
513
+ "message": check.message,
514
+ "latency_ms": check.latency_ms,
515
+ }
516
+ for check in report.checks
517
+ ]
518
+
519
+ return result
520
+
521
+ def _handle_journey(self, arguments: dict[str, Any]) -> JourneyResponse:
522
+ """Handle journey tool call."""
523
+ journey_result = self._spatial_service.journey(
524
+ start_id=arguments["start_id"],
525
+ end_id=arguments["end_id"],
526
+ steps=arguments.get("steps", 10),
527
+ namespace=arguments.get("namespace"),
528
+ )
529
+ return {
530
+ "start_id": journey_result.start_id,
531
+ "end_id": journey_result.end_id,
532
+ "steps": [
533
+ {
534
+ "step": s.step,
535
+ "t": s.t,
536
+ "nearby_memories": [
537
+ {
538
+ "id": m.id,
539
+ "content": m.content,
540
+ "similarity": m.similarity,
541
+ }
542
+ for m in s.nearby_memories
543
+ ],
544
+ "distance_to_path": s.distance_to_path,
545
+ }
546
+ for s in journey_result.steps
547
+ ],
548
+ "path_coverage": journey_result.path_coverage,
549
+ }
550
+
551
+ def _handle_wander(self, arguments: dict[str, Any]) -> WanderResponse:
552
+ """Handle wander tool call."""
553
+ start_id = arguments.get("start_id")
554
+ if start_id is None:
555
+ all_memories = self._memory_service.recall(
556
+ query="any topic",
557
+ limit=1,
558
+ namespace=arguments.get("namespace"),
559
+ )
560
+ if not all_memories.memories:
561
+ raise ValidationError("No memories available for wander")
562
+ start_id = all_memories.memories[0].id
563
+
564
+ wander_result = self._spatial_service.wander(
565
+ start_id=start_id,
566
+ steps=arguments.get("steps", 10),
567
+ temperature=arguments.get("temperature", 0.5),
568
+ namespace=arguments.get("namespace"),
569
+ )
570
+ return {
571
+ "start_id": wander_result.start_id,
572
+ "steps": [
573
+ {
574
+ "step": s.step,
575
+ "memory": {
576
+ "id": s.memory.id,
577
+ "content": s.memory.content,
578
+ "namespace": s.memory.namespace,
579
+ "tags": s.memory.tags,
580
+ "similarity": s.memory.similarity,
581
+ },
582
+ "similarity_to_previous": s.similarity_to_previous,
583
+ "selection_probability": s.selection_probability,
584
+ }
585
+ for s in wander_result.steps
586
+ ],
587
+ "total_distance": wander_result.total_distance,
588
+ }
589
+
590
+ def _handle_regions(self, arguments: dict[str, Any]) -> RegionsResponse:
591
+ """Handle regions tool call."""
592
+ regions_result = self._spatial_service.regions(
593
+ namespace=arguments.get("namespace"),
594
+ min_cluster_size=arguments.get("min_cluster_size", 3),
595
+ max_clusters=arguments.get("max_clusters"),
596
+ )
597
+ return {
598
+ "clusters": [
599
+ {
600
+ "cluster_id": c.cluster_id,
601
+ "size": c.size,
602
+ "keywords": c.keywords,
603
+ "representative_memory": {
604
+ "id": c.representative_memory.id,
605
+ "content": c.representative_memory.content,
606
+ },
607
+ "sample_memories": [
608
+ {
609
+ "id": m.id,
610
+ "content": m.content,
611
+ "similarity": m.similarity,
612
+ }
613
+ for m in c.sample_memories
614
+ ],
615
+ "coherence": c.coherence,
616
+ }
617
+ for c in regions_result.clusters
618
+ ],
619
+ "total_memories": regions_result.total_memories,
620
+ "noise_count": regions_result.noise_count,
621
+ "clustering_quality": regions_result.clustering_quality,
622
+ }
623
+
624
+ def _handle_visualize(self, arguments: dict[str, Any]) -> VisualizeResponse:
625
+ """Handle visualize tool call."""
626
+ visualize_result = self._spatial_service.visualize(
627
+ memory_ids=arguments.get("memory_ids"),
628
+ namespace=arguments.get("namespace"),
629
+ format=arguments.get("format", "json"),
630
+ dimensions=arguments.get("dimensions", 2),
631
+ include_edges=arguments.get("include_edges", True),
632
+ )
633
+ output_format = arguments.get("format", "json")
634
+ if output_format in ("mermaid", "svg"):
635
+ return {
636
+ "format": output_format,
637
+ "output": visualize_result.output,
638
+ "node_count": len(visualize_result.nodes),
639
+ }
640
+ return {
641
+ "nodes": [
642
+ {
643
+ "id": n.id,
644
+ "x": n.x,
645
+ "y": n.y,
646
+ "label": n.label,
647
+ "cluster": n.cluster,
648
+ "importance": n.importance,
649
+ }
650
+ for n in visualize_result.nodes
651
+ ],
652
+ "edges": [
653
+ {
654
+ "from_id": e.from_id,
655
+ "to_id": e.to_id,
656
+ "weight": e.weight,
657
+ }
658
+ for e in visualize_result.edges
659
+ ] if visualize_result.edges else [],
660
+ "bounds": visualize_result.bounds,
661
+ "format": visualize_result.format,
662
+ }
663
+
664
+ def _handle_decay(self, arguments: dict[str, Any]) -> DecayResponse:
665
+ """Handle decay tool call."""
666
+ decay_result = self._lifecycle_service.decay(
667
+ namespace=arguments.get("namespace"),
668
+ decay_function=arguments.get("decay_function", "exponential"),
669
+ half_life_days=arguments.get("half_life_days", 30.0),
670
+ min_importance=arguments.get("min_importance", 0.1),
671
+ access_weight=arguments.get("access_weight", 0.3),
672
+ dry_run=arguments.get("dry_run", True),
673
+ )
674
+ return {
675
+ "memories_analyzed": decay_result.memories_analyzed,
676
+ "memories_decayed": decay_result.memories_decayed,
677
+ "avg_decay_factor": decay_result.avg_decay_factor,
678
+ "decayed_memories": [
679
+ {
680
+ "id": m.id,
681
+ "content_preview": m.content_preview,
682
+ "old_importance": m.old_importance,
683
+ "new_importance": m.new_importance,
684
+ "decay_factor": m.decay_factor,
685
+ "days_since_access": m.days_since_access,
686
+ "access_count": m.access_count,
687
+ }
688
+ for m in decay_result.decayed_memories
689
+ ],
690
+ "dry_run": decay_result.dry_run,
691
+ }
692
+
693
+ def _handle_reinforce(self, arguments: dict[str, Any]) -> ReinforceResponse:
694
+ """Handle reinforce tool call."""
695
+ reinforce_result = self._lifecycle_service.reinforce(
696
+ memory_ids=arguments["memory_ids"],
697
+ boost_type=arguments.get("boost_type", "additive"),
698
+ boost_amount=arguments.get("boost_amount", 0.1),
699
+ update_access=arguments.get("update_access", True),
700
+ )
701
+ return {
702
+ "memories_reinforced": reinforce_result.memories_reinforced,
703
+ "avg_boost": reinforce_result.avg_boost,
704
+ "reinforced": [
705
+ {
706
+ "id": m.id,
707
+ "content_preview": m.content_preview,
708
+ "old_importance": m.old_importance,
709
+ "new_importance": m.new_importance,
710
+ "boost_applied": m.boost_applied,
711
+ }
712
+ for m in reinforce_result.reinforced
713
+ ],
714
+ "not_found": reinforce_result.not_found,
715
+ }
716
+
717
+ def _handle_extract(self, arguments: dict[str, Any]) -> ExtractResponse:
718
+ """Handle extract tool call."""
719
+ extract_result = self._lifecycle_service.extract(
720
+ text=arguments["text"],
721
+ namespace=arguments.get("namespace", "extracted"),
722
+ min_confidence=arguments.get("min_confidence", 0.5),
723
+ deduplicate=arguments.get("deduplicate", True),
724
+ dedup_threshold=arguments.get("dedup_threshold", 0.9),
725
+ )
726
+ return {
727
+ "candidates_found": extract_result.candidates_found,
728
+ "memories_created": extract_result.memories_created,
729
+ "deduplicated_count": extract_result.deduplicated_count,
730
+ "extractions": [
731
+ {
732
+ "content": e.content,
733
+ "confidence": e.confidence,
734
+ "pattern_matched": e.pattern_matched,
735
+ "start_pos": e.start_pos,
736
+ "end_pos": e.end_pos,
737
+ "stored": e.stored,
738
+ "memory_id": e.memory_id,
739
+ }
740
+ for e in extract_result.extractions
741
+ ],
742
+ }
743
+
744
+ def _handle_consolidate(self, arguments: dict[str, Any]) -> ConsolidateResponse:
745
+ """Handle consolidate tool call."""
746
+ consolidate_result = self._lifecycle_service.consolidate(
747
+ namespace=arguments["namespace"],
748
+ similarity_threshold=arguments.get("similarity_threshold", 0.85),
749
+ strategy=arguments.get("strategy", "keep_highest_importance"),
750
+ dry_run=arguments.get("dry_run", True),
751
+ max_groups=arguments.get("max_groups", 50),
752
+ )
753
+ return {
754
+ "groups_found": consolidate_result.groups_found,
755
+ "memories_merged": consolidate_result.memories_merged,
756
+ "memories_deleted": consolidate_result.memories_deleted,
757
+ "groups": [
758
+ {
759
+ "representative_id": g.representative_id,
760
+ "member_ids": g.member_ids,
761
+ "avg_similarity": g.avg_similarity,
762
+ "action_taken": g.action_taken,
763
+ }
764
+ for g in consolidate_result.groups
765
+ ],
766
+ "dry_run": consolidate_result.dry_run,
767
+ }
768
+
769
+ def _handle_stats(self, arguments: dict[str, Any]) -> StatsResponse:
770
+ """Handle stats tool call."""
771
+ stats_result = self._utility_service.stats(
772
+ namespace=arguments.get("namespace"),
773
+ include_index_details=arguments.get("include_index_details", True),
774
+ )
775
+ return {
776
+ "total_memories": stats_result.total_memories,
777
+ "memories_by_namespace": stats_result.memories_by_namespace,
778
+ "storage_bytes": stats_result.storage_bytes,
779
+ "storage_mb": stats_result.storage_mb,
780
+ "estimated_vector_bytes": stats_result.estimated_vector_bytes,
781
+ "has_vector_index": stats_result.has_vector_index,
782
+ "has_fts_index": stats_result.has_fts_index,
783
+ "indices": [
784
+ {
785
+ "name": idx.name,
786
+ "index_type": idx.index_type,
787
+ "column": idx.column,
788
+ "num_indexed_rows": idx.num_indexed_rows,
789
+ "status": idx.status,
790
+ }
791
+ for idx in stats_result.indices
792
+ ] if stats_result.indices else [],
793
+ "num_fragments": stats_result.num_fragments,
794
+ "needs_compaction": stats_result.needs_compaction,
795
+ "table_version": stats_result.table_version,
796
+ "oldest_memory_date": (
797
+ stats_result.oldest_memory_date.isoformat()
798
+ if stats_result.oldest_memory_date else None
799
+ ),
800
+ "newest_memory_date": (
801
+ stats_result.newest_memory_date.isoformat()
802
+ if stats_result.newest_memory_date else None
803
+ ),
804
+ "avg_content_length": stats_result.avg_content_length,
805
+ }
806
+
807
+ def _handle_namespaces(self, arguments: dict[str, Any]) -> NamespacesResponse:
808
+ """Handle namespaces tool call."""
809
+ namespaces_result = self._utility_service.namespaces(
810
+ include_stats=arguments.get("include_stats", True),
811
+ )
812
+ return {
813
+ "namespaces": [
814
+ {
815
+ "name": ns.name,
816
+ "memory_count": ns.memory_count,
817
+ "oldest_memory": (
818
+ ns.oldest_memory.isoformat() if ns.oldest_memory else None
819
+ ),
820
+ "newest_memory": (
821
+ ns.newest_memory.isoformat() if ns.newest_memory else None
822
+ ),
823
+ }
824
+ for ns in namespaces_result.namespaces
825
+ ],
826
+ "total_namespaces": namespaces_result.total_namespaces,
827
+ "total_memories": namespaces_result.total_memories,
828
+ }
829
+
830
+ def _handle_delete_namespace(self, arguments: dict[str, Any]) -> DeleteNamespaceResponse:
831
+ """Handle delete_namespace tool call."""
832
+ delete_result = self._utility_service.delete_namespace(
833
+ namespace=arguments["namespace"],
834
+ confirm=arguments.get("confirm", False),
835
+ dry_run=arguments.get("dry_run", True),
836
+ )
837
+ return {
838
+ "namespace": delete_result.namespace,
839
+ "memories_deleted": delete_result.memories_deleted,
840
+ "success": delete_result.success,
841
+ "message": delete_result.message,
842
+ "dry_run": delete_result.dry_run,
843
+ }
844
+
845
+ def _handle_rename_namespace(self, arguments: dict[str, Any]) -> RenameNamespaceResponse:
846
+ """Handle rename_namespace tool call."""
847
+ rename_result = self._utility_service.rename_namespace(
848
+ old_namespace=arguments["old_namespace"],
849
+ new_namespace=arguments["new_namespace"],
850
+ )
851
+ return {
852
+ "old_namespace": rename_result.old_namespace,
853
+ "new_namespace": rename_result.new_namespace,
854
+ "memories_renamed": rename_result.memories_renamed,
855
+ "success": rename_result.success,
856
+ "message": rename_result.message,
857
+ }
858
+
859
+ def _handle_export_memories(self, arguments: dict[str, Any]) -> ExportResponse:
860
+ """Handle export_memories tool call."""
861
+ export_result = self._export_import_service.export_memories(
862
+ output_path=arguments["output_path"],
863
+ format=arguments.get("format"),
864
+ namespace=arguments.get("namespace"),
865
+ include_vectors=arguments.get("include_vectors", True),
866
+ )
867
+ return {
868
+ "format": export_result.format,
869
+ "output_path": export_result.output_path,
870
+ "memories_exported": export_result.memories_exported,
871
+ "file_size_bytes": export_result.file_size_bytes,
872
+ "file_size_mb": export_result.file_size_mb,
873
+ "namespaces_included": export_result.namespaces_included,
874
+ "duration_seconds": export_result.duration_seconds,
875
+ "compression": export_result.compression,
876
+ }
877
+
878
+ def _handle_import_memories(self, arguments: dict[str, Any]) -> ImportResponse:
879
+ """Handle import_memories tool call."""
880
+ dry_run = arguments.get("dry_run", True)
881
+ import_result = self._export_import_service.import_memories(
882
+ source_path=arguments["source_path"],
883
+ format=arguments.get("format"),
884
+ namespace_override=arguments.get("namespace_override"),
885
+ deduplicate=arguments.get("deduplicate", False),
886
+ dedup_threshold=arguments.get("dedup_threshold", 0.95),
887
+ validate=arguments.get("validate", True),
888
+ regenerate_embeddings=arguments.get("regenerate_embeddings", False),
889
+ dry_run=dry_run,
890
+ )
891
+ return {
892
+ "source_path": import_result.source_path,
893
+ "format": import_result.format,
894
+ "total_records_in_file": import_result.total_records_in_file,
895
+ "memories_imported": import_result.memories_imported,
896
+ "memories_skipped": import_result.memories_skipped,
897
+ "memories_failed": import_result.memories_failed,
898
+ "validation_errors": [
899
+ {
900
+ "row_number": err.row_number,
901
+ "field": err.field,
902
+ "error": err.error,
903
+ "value": str(err.value) if err.value is not None else None,
904
+ }
905
+ for err in import_result.validation_errors
906
+ ] if import_result.validation_errors else [],
907
+ "namespace_override": import_result.namespace_override,
908
+ "duration_seconds": import_result.duration_seconds,
909
+ "dry_run": dry_run,
910
+ "imported_memories": [
911
+ {
912
+ "id": m.id,
913
+ "content_preview": m.content_preview,
914
+ "namespace": m.namespace,
915
+ }
916
+ for m in import_result.imported_memories[:10]
917
+ ] if import_result.imported_memories else [],
918
+ }
919
+
920
+ def _handle_hybrid_recall(self, arguments: dict[str, Any]) -> HybridRecallResponse:
921
+ """Handle hybrid_recall tool call."""
922
+ hybrid_result = self._utility_service.hybrid_recall(
923
+ query=arguments["query"],
924
+ alpha=arguments.get("alpha", 0.5),
925
+ limit=arguments.get("limit", 5),
926
+ namespace=arguments.get("namespace"),
927
+ min_similarity=arguments.get("min_similarity", 0.0),
928
+ )
929
+ return {
930
+ "query": hybrid_result.query,
931
+ "alpha": hybrid_result.alpha,
932
+ "memories": [
933
+ {
934
+ "id": m.id,
935
+ "content": m.content,
936
+ "similarity": m.similarity,
937
+ "namespace": m.namespace,
938
+ "tags": m.tags,
939
+ "importance": m.importance,
940
+ "created_at": (
941
+ m.created_at.isoformat() if m.created_at else None
942
+ ),
943
+ "metadata": m.metadata,
944
+ "vector_score": m.vector_score,
945
+ "fts_score": m.fts_score,
946
+ }
947
+ for m in hybrid_result.memories
948
+ ],
949
+ "total": hybrid_result.total,
950
+ "search_type": hybrid_result.search_type,
951
+ }
952
+
953
+ # =========================================================================
954
+ # Tool Routing
955
+ # =========================================================================
956
+
957
+ def _handle_tool(self, name: str, arguments: dict[str, Any]) -> HandlerResponse:
958
+ """Route tool call to appropriate handler.
959
+
960
+ Args:
961
+ name: Tool name.
962
+ arguments: Tool arguments.
963
+
964
+ Returns:
965
+ Tool result as typed dictionary.
966
+
967
+ Raises:
968
+ ValidationError: If tool name is unknown.
969
+ """
970
+ # Record metrics for this tool call
971
+ with record_request(name, "success"):
972
+ return self._handle_tool_impl(name, arguments)
973
+
974
+ def _handle_tool_impl(self, name: str, arguments: dict[str, Any]) -> HandlerResponse:
975
+ """Implementation of tool routing using dispatch pattern.
976
+
977
+ Args:
978
+ name: Tool name.
979
+ arguments: Tool arguments.
980
+
981
+ Returns:
982
+ Tool result as dictionary.
983
+
984
+ Raises:
985
+ ValidationError: If tool name is unknown.
986
+ """
987
+ handler = self._tool_handlers.get(name)
988
+ if handler is None:
989
+ raise ValidationError(f"Unknown tool: {name}")
990
+ return handler(arguments)
991
+
992
+ @staticmethod
993
+ def _get_server_instructions() -> str:
994
+ """Return behavioral instructions for Claude when using spatial-memory.
995
+
996
+ These instructions are automatically injected into Claude's system prompt
997
+ when the MCP server connects, enabling proactive memory management without
998
+ requiring user configuration.
999
+ """
1000
+ return '''## Spatial Memory System
1001
+
1002
+ You have access to a persistent semantic memory system. Use it proactively to build cumulative knowledge across sessions.
1003
+
1004
+ ### Session Start
1005
+ At conversation start, call `recall` with the user's apparent task/context to load relevant memories. Present insights naturally:
1006
+ - Good: "Based on previous work, you decided to use PostgreSQL because..."
1007
+ - Bad: "The database returned: [{id: '...', content: '...'}]"
1008
+
1009
+ ### Recognizing Memory-Worthy Moments
1010
+ After these events, ask briefly "Save this? y/n" (minimal friction):
1011
+ - **Decisions**: "Let's use X...", "We decided...", "The approach is..."
1012
+ - **Solutions**: "The fix was...", "It failed because...", "The error was..."
1013
+ - **Patterns**: "This pattern works...", "The trick is...", "Always do X when..."
1014
+ - **Discoveries**: "I found that...", "Important:...", "TIL..."
1015
+
1016
+ Do NOT ask for trivial information. Only prompt for insights that would help future sessions.
1017
+
1018
+ ### Saving Memories
1019
+ When user confirms, save with:
1020
+ - **Detailed content**: Include full context, reasoning, and specifics. Future agents need complete information.
1021
+ - **Contextual namespace**: Use project name, or categories like "decisions", "errors", "patterns"
1022
+ - **Descriptive tags**: Technologies, concepts, error types involved
1023
+ - **High importance (0.8-1.0)**: For decisions and critical fixes
1024
+ - **Medium importance (0.5-0.7)**: For patterns and learnings
1025
+
1026
+ ### Synthesizing Answers
1027
+ When using `recall` or `hybrid_recall`, present results as natural knowledge:
1028
+ - Integrate memories into your response conversationally
1029
+ - Reference prior decisions: "You previously decided X because Y"
1030
+ - Don't expose raw JSON or tool mechanics to the user
1031
+
1032
+ ### Auto-Extract for Long Sessions
1033
+ For significant problem-solving conversations (debugging sessions, architecture discussions), offer:
1034
+ "This session had good learnings. Extract key memories? y/n"
1035
+ Then use `extract` to automatically capture important information.
1036
+
1037
+ ### Tool Selection Guide
1038
+ - `remember`: Store a single memory with full context
1039
+ - `recall`: Semantic search for relevant memories
1040
+ - `hybrid_recall`: Combined keyword + semantic search (better for specific terms)
1041
+ - `extract`: Auto-extract memories from conversation text
1042
+ - `nearby`: Find memories similar to a known memory
1043
+ - `regions`: Discover topic clusters in memory space
1044
+ - `journey`: Navigate conceptual path between two memories'''
1045
+
1046
+ async def run(self) -> None:
1047
+ """Run the MCP server using stdio transport."""
1048
+ async with stdio_server() as (read_stream, write_stream):
1049
+ await self._server.run(
1050
+ read_stream,
1051
+ write_stream,
1052
+ self._server.create_initialization_options(),
1053
+ )
1054
+
1055
+ def close(self) -> None:
1056
+ """Clean up resources."""
1057
+ # Shutdown the thread pool executor
1058
+ if hasattr(self, "_executor"):
1059
+ self._executor.shutdown(wait=False)
1060
+
1061
+ if self._db is not None:
1062
+ self._db.close()
1063
+
1064
+
1065
+ def create_server(
1066
+ repository: MemoryRepositoryProtocol | None = None,
1067
+ embeddings: EmbeddingServiceProtocol | None = None,
1068
+ ) -> SpatialMemoryServer:
1069
+ """Create a new SpatialMemoryServer instance.
1070
+
1071
+ This factory function allows dependency injection for testing.
1072
+
1073
+ Args:
1074
+ repository: Optional repository implementation.
1075
+ embeddings: Optional embedding service implementation.
1076
+
1077
+ Returns:
1078
+ Configured SpatialMemoryServer instance.
1079
+ """
1080
+ return SpatialMemoryServer(repository=repository, embeddings=embeddings)
1081
+
1082
+
1083
+ async def main() -> None:
1084
+ """Main entry point for the MCP server."""
1085
+ # Get settings
1086
+ settings = get_settings()
1087
+
1088
+ # Validate configuration
1089
+ try:
1090
+ warnings = validate_startup(settings)
1091
+ # Use basic logging temporarily for startup validation
1092
+ logging.basicConfig(level=settings.log_level)
1093
+ logger = logging.getLogger(__name__)
1094
+ for warning in warnings:
1095
+ logger.warning(f"Configuration warning: {warning}")
1096
+ except ConfigurationError as e:
1097
+ # Use basic logging for error
1098
+ logging.basicConfig(level=logging.ERROR)
1099
+ logger = logging.getLogger(__name__)
1100
+ logger.error(f"Configuration error: {e}")
1101
+ sys.exit(1)
1102
+
1103
+ # Configure logging properly
1104
+ configure_logging(
1105
+ level=settings.log_level,
1106
+ json_format=settings.log_format == "json",
1107
+ )
1108
+
1109
+ server = create_server()
1110
+ cleanup_done = False
1111
+
1112
+ def cleanup() -> None:
1113
+ """Cleanup function for server resources."""
1114
+ nonlocal cleanup_done
1115
+ if cleanup_done:
1116
+ return
1117
+ cleanup_done = True
1118
+ logger.info("Cleaning up server resources...")
1119
+ server.close()
1120
+ clear_connection_cache()
1121
+ logger.info("Server shutdown complete")
1122
+
1123
+ def handle_shutdown(signum: int, frame: Any) -> None:
1124
+ """Handle shutdown signals gracefully."""
1125
+ sig_name = signal.Signals(signum).name
1126
+ logger.info(f"Received {sig_name}, initiating graceful shutdown...")
1127
+
1128
+ # Register signal handlers for logging (both platforms use same code)
1129
+ signal.signal(signal.SIGINT, handle_shutdown)
1130
+ signal.signal(signal.SIGTERM, handle_shutdown)
1131
+
1132
+ # Register atexit as a safety net for cleanup
1133
+ atexit.register(cleanup)
1134
+
1135
+ try:
1136
+ await server.run()
1137
+ except asyncio.CancelledError:
1138
+ logger.info("Server task cancelled")
1139
+ finally:
1140
+ cleanup()
1141
+ atexit.unregister(cleanup) # Prevent double cleanup