spatial-memory-mcp 1.9.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- spatial_memory/__init__.py +97 -0
- spatial_memory/__main__.py +271 -0
- spatial_memory/adapters/__init__.py +7 -0
- spatial_memory/adapters/lancedb_repository.py +880 -0
- spatial_memory/config.py +769 -0
- spatial_memory/core/__init__.py +118 -0
- spatial_memory/core/cache.py +317 -0
- spatial_memory/core/circuit_breaker.py +297 -0
- spatial_memory/core/connection_pool.py +220 -0
- spatial_memory/core/consolidation_strategies.py +401 -0
- spatial_memory/core/database.py +3072 -0
- spatial_memory/core/db_idempotency.py +242 -0
- spatial_memory/core/db_indexes.py +576 -0
- spatial_memory/core/db_migrations.py +588 -0
- spatial_memory/core/db_search.py +512 -0
- spatial_memory/core/db_versioning.py +178 -0
- spatial_memory/core/embeddings.py +558 -0
- spatial_memory/core/errors.py +317 -0
- spatial_memory/core/file_security.py +701 -0
- spatial_memory/core/filesystem.py +178 -0
- spatial_memory/core/health.py +289 -0
- spatial_memory/core/helpers.py +79 -0
- spatial_memory/core/import_security.py +433 -0
- spatial_memory/core/lifecycle_ops.py +1067 -0
- spatial_memory/core/logging.py +194 -0
- spatial_memory/core/metrics.py +192 -0
- spatial_memory/core/models.py +660 -0
- spatial_memory/core/rate_limiter.py +326 -0
- spatial_memory/core/response_types.py +500 -0
- spatial_memory/core/security.py +588 -0
- spatial_memory/core/spatial_ops.py +430 -0
- spatial_memory/core/tracing.py +300 -0
- spatial_memory/core/utils.py +110 -0
- spatial_memory/core/validation.py +406 -0
- spatial_memory/factory.py +444 -0
- spatial_memory/migrations/__init__.py +40 -0
- spatial_memory/ports/__init__.py +11 -0
- spatial_memory/ports/repositories.py +630 -0
- spatial_memory/py.typed +0 -0
- spatial_memory/server.py +1214 -0
- spatial_memory/services/__init__.py +70 -0
- spatial_memory/services/decay_manager.py +411 -0
- spatial_memory/services/export_import.py +1031 -0
- spatial_memory/services/lifecycle.py +1139 -0
- spatial_memory/services/memory.py +412 -0
- spatial_memory/services/spatial.py +1152 -0
- spatial_memory/services/utility.py +429 -0
- spatial_memory/tools/__init__.py +5 -0
- spatial_memory/tools/definitions.py +695 -0
- spatial_memory/verify.py +140 -0
- spatial_memory_mcp-1.9.1.dist-info/METADATA +509 -0
- spatial_memory_mcp-1.9.1.dist-info/RECORD +55 -0
- spatial_memory_mcp-1.9.1.dist-info/WHEEL +4 -0
- spatial_memory_mcp-1.9.1.dist-info/entry_points.txt +2 -0
- spatial_memory_mcp-1.9.1.dist-info/licenses/LICENSE +21 -0
spatial_memory/server.py
ADDED
|
@@ -0,0 +1,1214 @@
|
|
|
1
|
+
"""MCP Server for Spatial Memory.
|
|
2
|
+
|
|
3
|
+
This module provides the MCP (Model Context Protocol) server implementation
|
|
4
|
+
that exposes memory operations as tools for LLM assistants.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import asyncio
|
|
10
|
+
import atexit
|
|
11
|
+
import json
|
|
12
|
+
import logging
|
|
13
|
+
import signal
|
|
14
|
+
import sys
|
|
15
|
+
import uuid
|
|
16
|
+
from collections.abc import Callable
|
|
17
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
18
|
+
from dataclasses import asdict
|
|
19
|
+
from functools import partial
|
|
20
|
+
from typing import TYPE_CHECKING, Any
|
|
21
|
+
|
|
22
|
+
from mcp.server import Server
|
|
23
|
+
from mcp.server.stdio import stdio_server
|
|
24
|
+
from mcp.types import TextContent, Tool
|
|
25
|
+
|
|
26
|
+
from spatial_memory import __version__
|
|
27
|
+
from spatial_memory.config import ConfigurationError, get_settings, validate_startup
|
|
28
|
+
from spatial_memory.core.database import (
|
|
29
|
+
clear_connection_cache,
|
|
30
|
+
set_connection_pool_max_size,
|
|
31
|
+
)
|
|
32
|
+
from spatial_memory.core.errors import (
|
|
33
|
+
ConsolidationError,
|
|
34
|
+
DecayError,
|
|
35
|
+
ExportError,
|
|
36
|
+
ExtractionError,
|
|
37
|
+
FileSizeLimitError,
|
|
38
|
+
ImportRecordLimitError,
|
|
39
|
+
MemoryImportError,
|
|
40
|
+
MemoryNotFoundError,
|
|
41
|
+
NamespaceNotFoundError,
|
|
42
|
+
NamespaceOperationError,
|
|
43
|
+
PathSecurityError,
|
|
44
|
+
ReinforcementError,
|
|
45
|
+
SpatialMemoryError,
|
|
46
|
+
ValidationError,
|
|
47
|
+
)
|
|
48
|
+
from spatial_memory.core.health import HealthChecker
|
|
49
|
+
from spatial_memory.core.logging import configure_logging
|
|
50
|
+
from spatial_memory.core.metrics import is_available as metrics_available
|
|
51
|
+
from spatial_memory.core.metrics import record_request
|
|
52
|
+
from spatial_memory.core.response_types import (
|
|
53
|
+
ConsolidateResponse,
|
|
54
|
+
DecayResponse,
|
|
55
|
+
DeleteNamespaceResponse,
|
|
56
|
+
ExportResponse,
|
|
57
|
+
ExtractResponse,
|
|
58
|
+
ForgetBatchResponse,
|
|
59
|
+
ForgetResponse,
|
|
60
|
+
HandlerResponse,
|
|
61
|
+
HealthResponse,
|
|
62
|
+
HybridRecallResponse,
|
|
63
|
+
ImportResponse,
|
|
64
|
+
JourneyResponse,
|
|
65
|
+
NamespacesResponse,
|
|
66
|
+
NearbyResponse,
|
|
67
|
+
RecallResponse,
|
|
68
|
+
RegionsResponse,
|
|
69
|
+
ReinforceResponse,
|
|
70
|
+
RememberBatchResponse,
|
|
71
|
+
RememberResponse,
|
|
72
|
+
RenameNamespaceResponse,
|
|
73
|
+
StatsResponse,
|
|
74
|
+
VisualizeResponse,
|
|
75
|
+
WanderResponse,
|
|
76
|
+
)
|
|
77
|
+
from spatial_memory.core.tracing import (
|
|
78
|
+
RequestContext,
|
|
79
|
+
TimingContext,
|
|
80
|
+
request_context,
|
|
81
|
+
)
|
|
82
|
+
from spatial_memory.factory import ServiceFactory
|
|
83
|
+
from spatial_memory.tools import TOOLS
|
|
84
|
+
|
|
85
|
+
if TYPE_CHECKING:
|
|
86
|
+
from spatial_memory.ports.repositories import (
|
|
87
|
+
EmbeddingServiceProtocol,
|
|
88
|
+
MemoryRepositoryProtocol,
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
logger = logging.getLogger(__name__)
|
|
92
|
+
|
|
93
|
+
# Tools that can be cached (read-only operations)
|
|
94
|
+
CACHEABLE_TOOLS = frozenset({"recall", "nearby", "hybrid_recall", "regions"})
|
|
95
|
+
|
|
96
|
+
# Tools that invalidate cache by namespace
|
|
97
|
+
NAMESPACE_INVALIDATING_TOOLS = frozenset({"remember", "forget", "forget_batch"})
|
|
98
|
+
|
|
99
|
+
# Tools that invalidate entire cache
|
|
100
|
+
FULL_INVALIDATING_TOOLS = frozenset({"decay", "reinforce", "consolidate"})
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def _generate_cache_key(tool_name: str, arguments: dict[str, Any]) -> str:
|
|
104
|
+
"""Generate a cache key from tool name and arguments.
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
tool_name: Name of the tool.
|
|
108
|
+
arguments: Tool arguments (excluding _agent_id).
|
|
109
|
+
|
|
110
|
+
Returns:
|
|
111
|
+
A string cache key suitable for response caching.
|
|
112
|
+
"""
|
|
113
|
+
# Remove _agent_id from cache key computation (same query from different agents = same result)
|
|
114
|
+
cache_args = {k: v for k, v in sorted(arguments.items()) if k != "_agent_id"}
|
|
115
|
+
# Create a stable string representation
|
|
116
|
+
args_str = json.dumps(cache_args, sort_keys=True, default=str)
|
|
117
|
+
return f"{tool_name}:{hash(args_str)}"
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
# Error type to response name mapping for standardized error responses
|
|
121
|
+
ERROR_MAPPINGS: dict[type[Exception], str] = {
|
|
122
|
+
MemoryNotFoundError: "MemoryNotFound",
|
|
123
|
+
ValidationError: "ValidationError",
|
|
124
|
+
DecayError: "DecayError",
|
|
125
|
+
ReinforcementError: "ReinforcementError",
|
|
126
|
+
ExtractionError: "ExtractionError",
|
|
127
|
+
ConsolidationError: "ConsolidationError",
|
|
128
|
+
ExportError: "ExportError",
|
|
129
|
+
MemoryImportError: "ImportError",
|
|
130
|
+
PathSecurityError: "PathSecurityError",
|
|
131
|
+
FileSizeLimitError: "FileSizeLimitError",
|
|
132
|
+
ImportRecordLimitError: "ImportRecordLimitError",
|
|
133
|
+
NamespaceNotFoundError: "NamespaceNotFound",
|
|
134
|
+
NamespaceOperationError: "NamespaceOperationError",
|
|
135
|
+
SpatialMemoryError: "SpatialMemoryError",
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def _create_error_response(error: Exception, error_id: str | None = None) -> list[TextContent]:
|
|
140
|
+
"""Create standardized error response for tool handlers."""
|
|
141
|
+
error_type = ERROR_MAPPINGS.get(type(error), "UnknownError")
|
|
142
|
+
response: dict[str, Any] = {
|
|
143
|
+
"error": error_type,
|
|
144
|
+
"message": str(error),
|
|
145
|
+
"isError": True,
|
|
146
|
+
}
|
|
147
|
+
if error_id:
|
|
148
|
+
response["error_id"] = error_id
|
|
149
|
+
return [TextContent(type="text", text=json.dumps(response))]
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
class SpatialMemoryServer:
|
|
153
|
+
"""MCP Server for Spatial Memory operations.
|
|
154
|
+
|
|
155
|
+
Uses dependency injection for testability.
|
|
156
|
+
"""
|
|
157
|
+
|
|
158
|
+
def __init__(
|
|
159
|
+
self,
|
|
160
|
+
repository: MemoryRepositoryProtocol | None = None,
|
|
161
|
+
embeddings: EmbeddingServiceProtocol | None = None,
|
|
162
|
+
) -> None:
|
|
163
|
+
"""Initialize the server.
|
|
164
|
+
|
|
165
|
+
Args:
|
|
166
|
+
repository: Optional repository (uses LanceDB if not provided).
|
|
167
|
+
embeddings: Optional embedding service (uses local model if not provided).
|
|
168
|
+
"""
|
|
169
|
+
self._settings = get_settings()
|
|
170
|
+
|
|
171
|
+
# Configure connection pool size from settings
|
|
172
|
+
set_connection_pool_max_size(self._settings.connection_pool_max_size)
|
|
173
|
+
|
|
174
|
+
# Use ServiceFactory for dependency injection
|
|
175
|
+
factory = ServiceFactory(
|
|
176
|
+
settings=self._settings,
|
|
177
|
+
repository=repository,
|
|
178
|
+
embeddings=embeddings,
|
|
179
|
+
)
|
|
180
|
+
services = factory.create_all()
|
|
181
|
+
|
|
182
|
+
# Store service references
|
|
183
|
+
self._db = services.database
|
|
184
|
+
self._embeddings = services.embeddings
|
|
185
|
+
self._memory_service = services.memory
|
|
186
|
+
self._spatial_service = services.spatial
|
|
187
|
+
self._lifecycle_service = services.lifecycle
|
|
188
|
+
self._utility_service = services.utility
|
|
189
|
+
self._export_import_service = services.export_import
|
|
190
|
+
|
|
191
|
+
# Rate limiting
|
|
192
|
+
self._per_agent_rate_limiting = services.per_agent_rate_limiting
|
|
193
|
+
self._rate_limiter = services.rate_limiter
|
|
194
|
+
self._agent_rate_limiter = services.agent_rate_limiter
|
|
195
|
+
|
|
196
|
+
# Response cache
|
|
197
|
+
self._cache_enabled = services.cache_enabled
|
|
198
|
+
self._cache = services.cache
|
|
199
|
+
self._regions_cache_ttl = services.regions_cache_ttl
|
|
200
|
+
|
|
201
|
+
# Auto-decay manager
|
|
202
|
+
self._decay_manager = services.decay_manager
|
|
203
|
+
if self._decay_manager is not None:
|
|
204
|
+
self._decay_manager.start()
|
|
205
|
+
logger.info("Auto-decay manager started")
|
|
206
|
+
|
|
207
|
+
# ThreadPoolExecutor for non-blocking embedding operations
|
|
208
|
+
self._executor = ThreadPoolExecutor(
|
|
209
|
+
max_workers=2,
|
|
210
|
+
thread_name_prefix="embed-",
|
|
211
|
+
)
|
|
212
|
+
|
|
213
|
+
# Tool handler registry for dispatch pattern
|
|
214
|
+
self._tool_handlers: dict[str, Callable[[dict[str, Any]], HandlerResponse]] = {
|
|
215
|
+
"remember": self._handle_remember,
|
|
216
|
+
"remember_batch": self._handle_remember_batch,
|
|
217
|
+
"recall": self._handle_recall,
|
|
218
|
+
"nearby": self._handle_nearby,
|
|
219
|
+
"forget": self._handle_forget,
|
|
220
|
+
"forget_batch": self._handle_forget_batch,
|
|
221
|
+
"health": self._handle_health,
|
|
222
|
+
"journey": self._handle_journey,
|
|
223
|
+
"wander": self._handle_wander,
|
|
224
|
+
"regions": self._handle_regions,
|
|
225
|
+
"visualize": self._handle_visualize,
|
|
226
|
+
"decay": self._handle_decay,
|
|
227
|
+
"reinforce": self._handle_reinforce,
|
|
228
|
+
"extract": self._handle_extract,
|
|
229
|
+
"consolidate": self._handle_consolidate,
|
|
230
|
+
"stats": self._handle_stats,
|
|
231
|
+
"namespaces": self._handle_namespaces,
|
|
232
|
+
"delete_namespace": self._handle_delete_namespace,
|
|
233
|
+
"rename_namespace": self._handle_rename_namespace,
|
|
234
|
+
"export_memories": self._handle_export_memories,
|
|
235
|
+
"import_memories": self._handle_import_memories,
|
|
236
|
+
"hybrid_recall": self._handle_hybrid_recall,
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
# Log metrics availability
|
|
240
|
+
if metrics_available():
|
|
241
|
+
logger.info("Prometheus metrics enabled")
|
|
242
|
+
else:
|
|
243
|
+
logger.info("Prometheus metrics disabled (prometheus_client not installed)")
|
|
244
|
+
|
|
245
|
+
# Create MCP server with behavioral instructions
|
|
246
|
+
self._server = Server(
|
|
247
|
+
name="spatial-memory",
|
|
248
|
+
version=__version__,
|
|
249
|
+
instructions=self._get_server_instructions(),
|
|
250
|
+
)
|
|
251
|
+
self._setup_handlers()
|
|
252
|
+
|
|
253
|
+
async def _run_in_executor(self, func: Callable[..., Any], *args: Any) -> Any:
|
|
254
|
+
"""Run a synchronous function in the thread pool executor.
|
|
255
|
+
|
|
256
|
+
This allows CPU-bound or blocking operations (like embedding generation)
|
|
257
|
+
to run without blocking the asyncio event loop.
|
|
258
|
+
|
|
259
|
+
Args:
|
|
260
|
+
func: The synchronous function to run.
|
|
261
|
+
*args: Arguments to pass to the function.
|
|
262
|
+
|
|
263
|
+
Returns:
|
|
264
|
+
The result of the function call.
|
|
265
|
+
"""
|
|
266
|
+
loop = asyncio.get_running_loop()
|
|
267
|
+
return await loop.run_in_executor(self._executor, partial(func, *args))
|
|
268
|
+
|
|
269
|
+
async def _handle_tool_async(
|
|
270
|
+
self, name: str, arguments: dict[str, Any]
|
|
271
|
+
) -> HandlerResponse:
|
|
272
|
+
"""Handle tool call asynchronously by running handler in executor.
|
|
273
|
+
|
|
274
|
+
This wraps synchronous handlers to run in a thread pool, preventing
|
|
275
|
+
blocking operations from stalling the event loop.
|
|
276
|
+
|
|
277
|
+
Args:
|
|
278
|
+
name: Tool name.
|
|
279
|
+
arguments: Tool arguments.
|
|
280
|
+
|
|
281
|
+
Returns:
|
|
282
|
+
Tool result as typed dictionary.
|
|
283
|
+
|
|
284
|
+
Raises:
|
|
285
|
+
ValidationError: If tool name is unknown.
|
|
286
|
+
"""
|
|
287
|
+
result: HandlerResponse = await self._run_in_executor(self._handle_tool, name, arguments)
|
|
288
|
+
return result
|
|
289
|
+
|
|
290
|
+
def _setup_handlers(self) -> None:
|
|
291
|
+
"""Set up MCP tool handlers."""
|
|
292
|
+
|
|
293
|
+
@self._server.list_tools() # type: ignore[no-untyped-call, untyped-decorator]
|
|
294
|
+
async def list_tools() -> list[Tool]:
|
|
295
|
+
"""Return the list of available tools."""
|
|
296
|
+
return TOOLS
|
|
297
|
+
|
|
298
|
+
@self._server.call_tool() # type: ignore[untyped-decorator]
|
|
299
|
+
async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
|
|
300
|
+
"""Handle tool calls with tracing, caching, and rate limiting."""
|
|
301
|
+
# Extract _agent_id for tracing and rate limiting (don't pass to handler)
|
|
302
|
+
agent_id = arguments.pop("_agent_id", None)
|
|
303
|
+
|
|
304
|
+
# Apply rate limiting
|
|
305
|
+
if self._per_agent_rate_limiting and self._agent_rate_limiter is not None:
|
|
306
|
+
if not self._agent_rate_limiter.wait(agent_id=agent_id, timeout=30.0):
|
|
307
|
+
return [TextContent(
|
|
308
|
+
type="text",
|
|
309
|
+
text=json.dumps({
|
|
310
|
+
"error": "RateLimitExceeded",
|
|
311
|
+
"message": "Too many requests. Please wait and try again.",
|
|
312
|
+
"isError": True,
|
|
313
|
+
})
|
|
314
|
+
)]
|
|
315
|
+
elif self._rate_limiter is not None:
|
|
316
|
+
if not self._rate_limiter.wait(timeout=30.0):
|
|
317
|
+
return [TextContent(
|
|
318
|
+
type="text",
|
|
319
|
+
text=json.dumps({
|
|
320
|
+
"error": "RateLimitExceeded",
|
|
321
|
+
"message": "Too many requests. Please wait and try again.",
|
|
322
|
+
"isError": True,
|
|
323
|
+
})
|
|
324
|
+
)]
|
|
325
|
+
|
|
326
|
+
# Use request context for tracing
|
|
327
|
+
namespace = arguments.get("namespace")
|
|
328
|
+
with request_context(tool_name=name, agent_id=agent_id, namespace=namespace) as ctx:
|
|
329
|
+
timing = TimingContext()
|
|
330
|
+
cache_hit = False
|
|
331
|
+
|
|
332
|
+
try:
|
|
333
|
+
# Check cache for cacheable tools
|
|
334
|
+
if self._cache_enabled and self._cache is not None and name in CACHEABLE_TOOLS:
|
|
335
|
+
cache_key = _generate_cache_key(name, arguments)
|
|
336
|
+
with timing.measure("cache_lookup"):
|
|
337
|
+
cached_result = self._cache.get(cache_key)
|
|
338
|
+
if cached_result is not None:
|
|
339
|
+
cache_hit = True
|
|
340
|
+
result = cached_result
|
|
341
|
+
else:
|
|
342
|
+
with timing.measure("handler"):
|
|
343
|
+
# Run handler in executor to avoid blocking event loop
|
|
344
|
+
result = await self._handle_tool_async(name, arguments)
|
|
345
|
+
# Cache the result with appropriate TTL
|
|
346
|
+
ttl = self._regions_cache_ttl if name == "regions" else None
|
|
347
|
+
self._cache.set(cache_key, result, ttl=ttl)
|
|
348
|
+
else:
|
|
349
|
+
with timing.measure("handler"):
|
|
350
|
+
# Run handler in executor to avoid blocking event loop
|
|
351
|
+
result = await self._handle_tool_async(name, arguments)
|
|
352
|
+
|
|
353
|
+
# Invalidate cache on mutations
|
|
354
|
+
if self._cache_enabled and self._cache is not None:
|
|
355
|
+
self._invalidate_cache_for_tool(name, arguments)
|
|
356
|
+
|
|
357
|
+
# Add _meta to response if enabled
|
|
358
|
+
if self._settings.include_request_meta:
|
|
359
|
+
result["_meta"] = self._build_meta(ctx, timing, cache_hit)
|
|
360
|
+
|
|
361
|
+
return [TextContent(type="text", text=json.dumps(result, default=str))]
|
|
362
|
+
except tuple(ERROR_MAPPINGS.keys()) as e:
|
|
363
|
+
return _create_error_response(e)
|
|
364
|
+
except Exception as e:
|
|
365
|
+
error_id = str(uuid.uuid4())[:8]
|
|
366
|
+
logger.error(f"Unexpected error [{error_id}] in {name}: {e}", exc_info=True)
|
|
367
|
+
return _create_error_response(e, error_id)
|
|
368
|
+
|
|
369
|
+
def _build_meta(
|
|
370
|
+
self,
|
|
371
|
+
ctx: RequestContext,
|
|
372
|
+
timing: TimingContext,
|
|
373
|
+
cache_hit: bool,
|
|
374
|
+
) -> dict[str, Any]:
|
|
375
|
+
"""Build the _meta object for response.
|
|
376
|
+
|
|
377
|
+
Args:
|
|
378
|
+
ctx: The request context.
|
|
379
|
+
timing: The timing context.
|
|
380
|
+
cache_hit: Whether this was a cache hit.
|
|
381
|
+
|
|
382
|
+
Returns:
|
|
383
|
+
Dictionary with request metadata.
|
|
384
|
+
"""
|
|
385
|
+
meta: dict[str, Any] = {
|
|
386
|
+
"request_id": ctx.request_id,
|
|
387
|
+
"agent_id": ctx.agent_id,
|
|
388
|
+
"cache_hit": cache_hit,
|
|
389
|
+
}
|
|
390
|
+
if self._settings.include_timing_breakdown:
|
|
391
|
+
meta["timing_ms"] = timing.summary()
|
|
392
|
+
return meta
|
|
393
|
+
|
|
394
|
+
def _invalidate_cache_for_tool(self, name: str, arguments: dict[str, Any]) -> None:
|
|
395
|
+
"""Invalidate cache entries based on the tool that was called.
|
|
396
|
+
|
|
397
|
+
Args:
|
|
398
|
+
name: Tool name.
|
|
399
|
+
arguments: Tool arguments.
|
|
400
|
+
"""
|
|
401
|
+
if self._cache is None:
|
|
402
|
+
return
|
|
403
|
+
|
|
404
|
+
if name in FULL_INVALIDATING_TOOLS:
|
|
405
|
+
self._cache.invalidate_all()
|
|
406
|
+
elif name in NAMESPACE_INVALIDATING_TOOLS:
|
|
407
|
+
namespace = arguments.get("namespace", "default")
|
|
408
|
+
self._cache.invalidate_namespace(namespace)
|
|
409
|
+
|
|
410
|
+
# =========================================================================
|
|
411
|
+
# Tool Handler Methods
|
|
412
|
+
# =========================================================================
|
|
413
|
+
|
|
414
|
+
def _handle_remember(self, arguments: dict[str, Any]) -> RememberResponse:
|
|
415
|
+
"""Handle remember tool call."""
|
|
416
|
+
remember_result = self._memory_service.remember(
|
|
417
|
+
content=arguments["content"],
|
|
418
|
+
namespace=arguments.get("namespace", "default"),
|
|
419
|
+
tags=arguments.get("tags"),
|
|
420
|
+
importance=arguments.get("importance", 0.5),
|
|
421
|
+
metadata=arguments.get("metadata"),
|
|
422
|
+
)
|
|
423
|
+
return asdict(remember_result) # type: ignore[return-value]
|
|
424
|
+
|
|
425
|
+
def _handle_remember_batch(self, arguments: dict[str, Any]) -> RememberBatchResponse:
|
|
426
|
+
"""Handle remember_batch tool call."""
|
|
427
|
+
batch_result = self._memory_service.remember_batch(
|
|
428
|
+
memories=arguments["memories"],
|
|
429
|
+
)
|
|
430
|
+
return asdict(batch_result) # type: ignore[return-value]
|
|
431
|
+
|
|
432
|
+
def _handle_recall(self, arguments: dict[str, Any]) -> RecallResponse:
|
|
433
|
+
"""Handle recall tool call."""
|
|
434
|
+
recall_result = self._memory_service.recall(
|
|
435
|
+
query=arguments["query"],
|
|
436
|
+
limit=arguments.get("limit", 5),
|
|
437
|
+
namespace=arguments.get("namespace"),
|
|
438
|
+
min_similarity=arguments.get("min_similarity", 0.0),
|
|
439
|
+
)
|
|
440
|
+
|
|
441
|
+
# Convert to dict list for potential decay processing
|
|
442
|
+
memories_list = [
|
|
443
|
+
{
|
|
444
|
+
"id": m.id,
|
|
445
|
+
"content": m.content,
|
|
446
|
+
"similarity": m.similarity,
|
|
447
|
+
"namespace": m.namespace,
|
|
448
|
+
"tags": m.tags,
|
|
449
|
+
"importance": m.importance,
|
|
450
|
+
"created_at": m.created_at.isoformat(),
|
|
451
|
+
"metadata": m.metadata,
|
|
452
|
+
"last_accessed": m.last_accessed,
|
|
453
|
+
"access_count": m.access_count,
|
|
454
|
+
}
|
|
455
|
+
for m in recall_result.memories
|
|
456
|
+
]
|
|
457
|
+
|
|
458
|
+
# Apply auto-decay if enabled (adds effective_importance, re-ranks)
|
|
459
|
+
if self._decay_manager is not None and self._decay_manager.enabled:
|
|
460
|
+
memories_list = self._decay_manager.apply_decay_to_results(
|
|
461
|
+
memories_list, rerank=True
|
|
462
|
+
)
|
|
463
|
+
|
|
464
|
+
# Build response - include effective_importance if present
|
|
465
|
+
response_memories = []
|
|
466
|
+
for m in memories_list:
|
|
467
|
+
mem_dict: dict[str, Any] = {
|
|
468
|
+
"id": m["id"],
|
|
469
|
+
"content": m["content"],
|
|
470
|
+
"similarity": m["similarity"],
|
|
471
|
+
"namespace": m["namespace"],
|
|
472
|
+
"tags": m["tags"],
|
|
473
|
+
"importance": m["importance"],
|
|
474
|
+
"created_at": m["created_at"],
|
|
475
|
+
"metadata": m["metadata"],
|
|
476
|
+
}
|
|
477
|
+
if "effective_importance" in m:
|
|
478
|
+
mem_dict["effective_importance"] = m["effective_importance"]
|
|
479
|
+
response_memories.append(mem_dict)
|
|
480
|
+
|
|
481
|
+
return {
|
|
482
|
+
"memories": response_memories, # type: ignore[typeddict-item]
|
|
483
|
+
"total": len(response_memories),
|
|
484
|
+
}
|
|
485
|
+
|
|
486
|
+
def _handle_nearby(self, arguments: dict[str, Any]) -> NearbyResponse:
|
|
487
|
+
"""Handle nearby tool call."""
|
|
488
|
+
nearby_result = self._memory_service.nearby(
|
|
489
|
+
memory_id=arguments["memory_id"],
|
|
490
|
+
limit=arguments.get("limit", 5),
|
|
491
|
+
namespace=arguments.get("namespace"),
|
|
492
|
+
)
|
|
493
|
+
return {
|
|
494
|
+
"reference": {
|
|
495
|
+
"id": nearby_result.reference.id,
|
|
496
|
+
"content": nearby_result.reference.content,
|
|
497
|
+
"namespace": nearby_result.reference.namespace,
|
|
498
|
+
},
|
|
499
|
+
"neighbors": [
|
|
500
|
+
{
|
|
501
|
+
"id": n.id,
|
|
502
|
+
"content": n.content,
|
|
503
|
+
"similarity": n.similarity,
|
|
504
|
+
"namespace": n.namespace,
|
|
505
|
+
}
|
|
506
|
+
for n in nearby_result.neighbors
|
|
507
|
+
],
|
|
508
|
+
}
|
|
509
|
+
|
|
510
|
+
def _handle_forget(self, arguments: dict[str, Any]) -> ForgetResponse:
|
|
511
|
+
"""Handle forget tool call."""
|
|
512
|
+
forget_result = self._memory_service.forget(
|
|
513
|
+
memory_id=arguments["memory_id"],
|
|
514
|
+
)
|
|
515
|
+
return asdict(forget_result) # type: ignore[return-value]
|
|
516
|
+
|
|
517
|
+
def _handle_forget_batch(self, arguments: dict[str, Any]) -> ForgetBatchResponse:
|
|
518
|
+
"""Handle forget_batch tool call."""
|
|
519
|
+
forget_batch_result = self._memory_service.forget_batch(
|
|
520
|
+
memory_ids=arguments["memory_ids"],
|
|
521
|
+
)
|
|
522
|
+
return asdict(forget_batch_result) # type: ignore[return-value]
|
|
523
|
+
|
|
524
|
+
def _handle_health(self, arguments: dict[str, Any]) -> HealthResponse:
|
|
525
|
+
"""Handle health tool call."""
|
|
526
|
+
verbose = arguments.get("verbose", False)
|
|
527
|
+
|
|
528
|
+
health_checker = HealthChecker(
|
|
529
|
+
database=self._db,
|
|
530
|
+
embeddings=self._embeddings,
|
|
531
|
+
storage_path=self._settings.memory_path,
|
|
532
|
+
)
|
|
533
|
+
|
|
534
|
+
report = health_checker.get_health_report()
|
|
535
|
+
|
|
536
|
+
result: HealthResponse = {
|
|
537
|
+
"version": __version__,
|
|
538
|
+
"status": report.status.value,
|
|
539
|
+
"timestamp": report.timestamp.isoformat(),
|
|
540
|
+
"ready": health_checker.is_ready(),
|
|
541
|
+
"alive": health_checker.is_alive(),
|
|
542
|
+
}
|
|
543
|
+
|
|
544
|
+
if verbose:
|
|
545
|
+
result["checks"] = [
|
|
546
|
+
{
|
|
547
|
+
"name": check.name,
|
|
548
|
+
"status": check.status.value,
|
|
549
|
+
"message": check.message,
|
|
550
|
+
"latency_ms": check.latency_ms,
|
|
551
|
+
}
|
|
552
|
+
for check in report.checks
|
|
553
|
+
]
|
|
554
|
+
|
|
555
|
+
return result
|
|
556
|
+
|
|
557
|
+
def _handle_journey(self, arguments: dict[str, Any]) -> JourneyResponse:
|
|
558
|
+
"""Handle journey tool call."""
|
|
559
|
+
journey_result = self._spatial_service.journey(
|
|
560
|
+
start_id=arguments["start_id"],
|
|
561
|
+
end_id=arguments["end_id"],
|
|
562
|
+
steps=arguments.get("steps", 10),
|
|
563
|
+
namespace=arguments.get("namespace"),
|
|
564
|
+
)
|
|
565
|
+
return {
|
|
566
|
+
"start_id": journey_result.start_id,
|
|
567
|
+
"end_id": journey_result.end_id,
|
|
568
|
+
"steps": [
|
|
569
|
+
{
|
|
570
|
+
"step": s.step,
|
|
571
|
+
"t": s.t,
|
|
572
|
+
"nearby_memories": [
|
|
573
|
+
{
|
|
574
|
+
"id": m.id,
|
|
575
|
+
"content": m.content,
|
|
576
|
+
"similarity": m.similarity,
|
|
577
|
+
}
|
|
578
|
+
for m in s.nearby_memories
|
|
579
|
+
],
|
|
580
|
+
"distance_to_path": s.distance_to_path,
|
|
581
|
+
}
|
|
582
|
+
for s in journey_result.steps
|
|
583
|
+
],
|
|
584
|
+
"path_coverage": journey_result.path_coverage,
|
|
585
|
+
}
|
|
586
|
+
|
|
587
|
+
def _handle_wander(self, arguments: dict[str, Any]) -> WanderResponse:
|
|
588
|
+
"""Handle wander tool call."""
|
|
589
|
+
start_id = arguments.get("start_id")
|
|
590
|
+
if start_id is None:
|
|
591
|
+
all_memories = self._memory_service.recall(
|
|
592
|
+
query="any topic",
|
|
593
|
+
limit=1,
|
|
594
|
+
namespace=arguments.get("namespace"),
|
|
595
|
+
)
|
|
596
|
+
if not all_memories.memories:
|
|
597
|
+
raise ValidationError("No memories available for wander")
|
|
598
|
+
start_id = all_memories.memories[0].id
|
|
599
|
+
|
|
600
|
+
wander_result = self._spatial_service.wander(
|
|
601
|
+
start_id=start_id,
|
|
602
|
+
steps=arguments.get("steps", 10),
|
|
603
|
+
temperature=arguments.get("temperature", 0.5),
|
|
604
|
+
namespace=arguments.get("namespace"),
|
|
605
|
+
)
|
|
606
|
+
return {
|
|
607
|
+
"start_id": wander_result.start_id,
|
|
608
|
+
"steps": [
|
|
609
|
+
{
|
|
610
|
+
"step": s.step,
|
|
611
|
+
"memory": {
|
|
612
|
+
"id": s.memory.id,
|
|
613
|
+
"content": s.memory.content,
|
|
614
|
+
"namespace": s.memory.namespace,
|
|
615
|
+
"tags": s.memory.tags,
|
|
616
|
+
"similarity": s.memory.similarity,
|
|
617
|
+
},
|
|
618
|
+
"similarity_to_previous": s.similarity_to_previous,
|
|
619
|
+
"selection_probability": s.selection_probability,
|
|
620
|
+
}
|
|
621
|
+
for s in wander_result.steps
|
|
622
|
+
],
|
|
623
|
+
"total_distance": wander_result.total_distance,
|
|
624
|
+
}
|
|
625
|
+
|
|
626
|
+
def _handle_regions(self, arguments: dict[str, Any]) -> RegionsResponse:
|
|
627
|
+
"""Handle regions tool call."""
|
|
628
|
+
regions_result = self._spatial_service.regions(
|
|
629
|
+
namespace=arguments.get("namespace"),
|
|
630
|
+
min_cluster_size=arguments.get("min_cluster_size", 3),
|
|
631
|
+
max_clusters=arguments.get("max_clusters"),
|
|
632
|
+
)
|
|
633
|
+
return {
|
|
634
|
+
"clusters": [
|
|
635
|
+
{
|
|
636
|
+
"cluster_id": c.cluster_id,
|
|
637
|
+
"size": c.size,
|
|
638
|
+
"keywords": c.keywords,
|
|
639
|
+
"representative_memory": {
|
|
640
|
+
"id": c.representative_memory.id,
|
|
641
|
+
"content": c.representative_memory.content,
|
|
642
|
+
},
|
|
643
|
+
"sample_memories": [
|
|
644
|
+
{
|
|
645
|
+
"id": m.id,
|
|
646
|
+
"content": m.content,
|
|
647
|
+
"similarity": m.similarity,
|
|
648
|
+
}
|
|
649
|
+
for m in c.sample_memories
|
|
650
|
+
],
|
|
651
|
+
"coherence": c.coherence,
|
|
652
|
+
}
|
|
653
|
+
for c in regions_result.clusters
|
|
654
|
+
],
|
|
655
|
+
"total_memories": regions_result.total_memories,
|
|
656
|
+
"noise_count": regions_result.noise_count,
|
|
657
|
+
"clustering_quality": regions_result.clustering_quality,
|
|
658
|
+
}
|
|
659
|
+
|
|
660
|
+
def _handle_visualize(self, arguments: dict[str, Any]) -> VisualizeResponse:
|
|
661
|
+
"""Handle visualize tool call."""
|
|
662
|
+
visualize_result = self._spatial_service.visualize(
|
|
663
|
+
memory_ids=arguments.get("memory_ids"),
|
|
664
|
+
namespace=arguments.get("namespace"),
|
|
665
|
+
format=arguments.get("format", "json"),
|
|
666
|
+
dimensions=arguments.get("dimensions", 2),
|
|
667
|
+
include_edges=arguments.get("include_edges", True),
|
|
668
|
+
)
|
|
669
|
+
output_format = arguments.get("format", "json")
|
|
670
|
+
if output_format in ("mermaid", "svg"):
|
|
671
|
+
return {
|
|
672
|
+
"format": output_format,
|
|
673
|
+
"output": visualize_result.output,
|
|
674
|
+
"node_count": len(visualize_result.nodes),
|
|
675
|
+
}
|
|
676
|
+
return {
|
|
677
|
+
"nodes": [
|
|
678
|
+
{
|
|
679
|
+
"id": n.id,
|
|
680
|
+
"x": n.x,
|
|
681
|
+
"y": n.y,
|
|
682
|
+
"label": n.label,
|
|
683
|
+
"cluster": n.cluster,
|
|
684
|
+
"importance": n.importance,
|
|
685
|
+
}
|
|
686
|
+
for n in visualize_result.nodes
|
|
687
|
+
],
|
|
688
|
+
"edges": [
|
|
689
|
+
{
|
|
690
|
+
"from_id": e.from_id,
|
|
691
|
+
"to_id": e.to_id,
|
|
692
|
+
"weight": e.weight,
|
|
693
|
+
}
|
|
694
|
+
for e in visualize_result.edges
|
|
695
|
+
] if visualize_result.edges else [],
|
|
696
|
+
"bounds": visualize_result.bounds,
|
|
697
|
+
"format": visualize_result.format,
|
|
698
|
+
}
|
|
699
|
+
|
|
700
|
+
def _handle_decay(self, arguments: dict[str, Any]) -> DecayResponse:
|
|
701
|
+
"""Handle decay tool call."""
|
|
702
|
+
decay_result = self._lifecycle_service.decay(
|
|
703
|
+
namespace=arguments.get("namespace"),
|
|
704
|
+
decay_function=arguments.get("decay_function", "exponential"),
|
|
705
|
+
half_life_days=arguments.get("half_life_days", 30.0),
|
|
706
|
+
min_importance=arguments.get("min_importance", 0.1),
|
|
707
|
+
access_weight=arguments.get("access_weight", 0.3),
|
|
708
|
+
dry_run=arguments.get("dry_run", True),
|
|
709
|
+
)
|
|
710
|
+
return {
|
|
711
|
+
"memories_analyzed": decay_result.memories_analyzed,
|
|
712
|
+
"memories_decayed": decay_result.memories_decayed,
|
|
713
|
+
"avg_decay_factor": decay_result.avg_decay_factor,
|
|
714
|
+
"decayed_memories": [
|
|
715
|
+
{
|
|
716
|
+
"id": m.id,
|
|
717
|
+
"content_preview": m.content_preview,
|
|
718
|
+
"old_importance": m.old_importance,
|
|
719
|
+
"new_importance": m.new_importance,
|
|
720
|
+
"decay_factor": m.decay_factor,
|
|
721
|
+
"days_since_access": m.days_since_access,
|
|
722
|
+
"access_count": m.access_count,
|
|
723
|
+
}
|
|
724
|
+
for m in decay_result.decayed_memories
|
|
725
|
+
],
|
|
726
|
+
"dry_run": decay_result.dry_run,
|
|
727
|
+
}
|
|
728
|
+
|
|
729
|
+
def _handle_reinforce(self, arguments: dict[str, Any]) -> ReinforceResponse:
|
|
730
|
+
"""Handle reinforce tool call."""
|
|
731
|
+
reinforce_result = self._lifecycle_service.reinforce(
|
|
732
|
+
memory_ids=arguments["memory_ids"],
|
|
733
|
+
boost_type=arguments.get("boost_type", "additive"),
|
|
734
|
+
boost_amount=arguments.get("boost_amount", 0.1),
|
|
735
|
+
update_access=arguments.get("update_access", True),
|
|
736
|
+
)
|
|
737
|
+
return {
|
|
738
|
+
"memories_reinforced": reinforce_result.memories_reinforced,
|
|
739
|
+
"avg_boost": reinforce_result.avg_boost,
|
|
740
|
+
"reinforced": [
|
|
741
|
+
{
|
|
742
|
+
"id": m.id,
|
|
743
|
+
"content_preview": m.content_preview,
|
|
744
|
+
"old_importance": m.old_importance,
|
|
745
|
+
"new_importance": m.new_importance,
|
|
746
|
+
"boost_applied": m.boost_applied,
|
|
747
|
+
}
|
|
748
|
+
for m in reinforce_result.reinforced
|
|
749
|
+
],
|
|
750
|
+
"not_found": reinforce_result.not_found,
|
|
751
|
+
}
|
|
752
|
+
|
|
753
|
+
def _handle_extract(self, arguments: dict[str, Any]) -> ExtractResponse:
|
|
754
|
+
"""Handle extract tool call."""
|
|
755
|
+
extract_result = self._lifecycle_service.extract(
|
|
756
|
+
text=arguments["text"],
|
|
757
|
+
namespace=arguments.get("namespace", "extracted"),
|
|
758
|
+
min_confidence=arguments.get("min_confidence", 0.5),
|
|
759
|
+
deduplicate=arguments.get("deduplicate", True),
|
|
760
|
+
dedup_threshold=arguments.get("dedup_threshold", 0.9),
|
|
761
|
+
)
|
|
762
|
+
return {
|
|
763
|
+
"candidates_found": extract_result.candidates_found,
|
|
764
|
+
"memories_created": extract_result.memories_created,
|
|
765
|
+
"deduplicated_count": extract_result.deduplicated_count,
|
|
766
|
+
"extractions": [
|
|
767
|
+
{
|
|
768
|
+
"content": e.content,
|
|
769
|
+
"confidence": e.confidence,
|
|
770
|
+
"pattern_matched": e.pattern_matched,
|
|
771
|
+
"start_pos": e.start_pos,
|
|
772
|
+
"end_pos": e.end_pos,
|
|
773
|
+
"stored": e.stored,
|
|
774
|
+
"memory_id": e.memory_id,
|
|
775
|
+
}
|
|
776
|
+
for e in extract_result.extractions
|
|
777
|
+
],
|
|
778
|
+
}
|
|
779
|
+
|
|
780
|
+
def _handle_consolidate(self, arguments: dict[str, Any]) -> ConsolidateResponse:
|
|
781
|
+
"""Handle consolidate tool call."""
|
|
782
|
+
consolidate_result = self._lifecycle_service.consolidate(
|
|
783
|
+
namespace=arguments["namespace"],
|
|
784
|
+
similarity_threshold=arguments.get("similarity_threshold", 0.85),
|
|
785
|
+
strategy=arguments.get("strategy", "keep_highest_importance"),
|
|
786
|
+
dry_run=arguments.get("dry_run", True),
|
|
787
|
+
max_groups=arguments.get("max_groups", 50),
|
|
788
|
+
)
|
|
789
|
+
return {
|
|
790
|
+
"groups_found": consolidate_result.groups_found,
|
|
791
|
+
"memories_merged": consolidate_result.memories_merged,
|
|
792
|
+
"memories_deleted": consolidate_result.memories_deleted,
|
|
793
|
+
"groups": [
|
|
794
|
+
{
|
|
795
|
+
"representative_id": g.representative_id,
|
|
796
|
+
"member_ids": g.member_ids,
|
|
797
|
+
"avg_similarity": g.avg_similarity,
|
|
798
|
+
"action_taken": g.action_taken,
|
|
799
|
+
}
|
|
800
|
+
for g in consolidate_result.groups
|
|
801
|
+
],
|
|
802
|
+
"dry_run": consolidate_result.dry_run,
|
|
803
|
+
}
|
|
804
|
+
|
|
805
|
+
def _handle_stats(self, arguments: dict[str, Any]) -> StatsResponse:
|
|
806
|
+
"""Handle stats tool call."""
|
|
807
|
+
stats_result = self._utility_service.stats(
|
|
808
|
+
namespace=arguments.get("namespace"),
|
|
809
|
+
include_index_details=arguments.get("include_index_details", True),
|
|
810
|
+
)
|
|
811
|
+
return {
|
|
812
|
+
"total_memories": stats_result.total_memories,
|
|
813
|
+
"memories_by_namespace": stats_result.memories_by_namespace,
|
|
814
|
+
"storage_bytes": stats_result.storage_bytes,
|
|
815
|
+
"storage_mb": stats_result.storage_mb,
|
|
816
|
+
"estimated_vector_bytes": stats_result.estimated_vector_bytes,
|
|
817
|
+
"has_vector_index": stats_result.has_vector_index,
|
|
818
|
+
"has_fts_index": stats_result.has_fts_index,
|
|
819
|
+
"indices": [
|
|
820
|
+
{
|
|
821
|
+
"name": idx.name,
|
|
822
|
+
"index_type": idx.index_type,
|
|
823
|
+
"column": idx.column,
|
|
824
|
+
"num_indexed_rows": idx.num_indexed_rows,
|
|
825
|
+
"status": idx.status,
|
|
826
|
+
}
|
|
827
|
+
for idx in stats_result.indices
|
|
828
|
+
] if stats_result.indices else [],
|
|
829
|
+
"num_fragments": stats_result.num_fragments,
|
|
830
|
+
"needs_compaction": stats_result.needs_compaction,
|
|
831
|
+
"table_version": stats_result.table_version,
|
|
832
|
+
"oldest_memory_date": (
|
|
833
|
+
stats_result.oldest_memory_date.isoformat()
|
|
834
|
+
if stats_result.oldest_memory_date else None
|
|
835
|
+
),
|
|
836
|
+
"newest_memory_date": (
|
|
837
|
+
stats_result.newest_memory_date.isoformat()
|
|
838
|
+
if stats_result.newest_memory_date else None
|
|
839
|
+
),
|
|
840
|
+
"avg_content_length": stats_result.avg_content_length,
|
|
841
|
+
}
|
|
842
|
+
|
|
843
|
+
def _handle_namespaces(self, arguments: dict[str, Any]) -> NamespacesResponse:
|
|
844
|
+
"""Handle namespaces tool call."""
|
|
845
|
+
namespaces_result = self._utility_service.namespaces(
|
|
846
|
+
include_stats=arguments.get("include_stats", True),
|
|
847
|
+
)
|
|
848
|
+
return {
|
|
849
|
+
"namespaces": [
|
|
850
|
+
{
|
|
851
|
+
"name": ns.name,
|
|
852
|
+
"memory_count": ns.memory_count,
|
|
853
|
+
"oldest_memory": (
|
|
854
|
+
ns.oldest_memory.isoformat() if ns.oldest_memory else None
|
|
855
|
+
),
|
|
856
|
+
"newest_memory": (
|
|
857
|
+
ns.newest_memory.isoformat() if ns.newest_memory else None
|
|
858
|
+
),
|
|
859
|
+
}
|
|
860
|
+
for ns in namespaces_result.namespaces
|
|
861
|
+
],
|
|
862
|
+
"total_namespaces": namespaces_result.total_namespaces,
|
|
863
|
+
"total_memories": namespaces_result.total_memories,
|
|
864
|
+
}
|
|
865
|
+
|
|
866
|
+
def _handle_delete_namespace(self, arguments: dict[str, Any]) -> DeleteNamespaceResponse:
|
|
867
|
+
"""Handle delete_namespace tool call."""
|
|
868
|
+
delete_result = self._utility_service.delete_namespace(
|
|
869
|
+
namespace=arguments["namespace"],
|
|
870
|
+
confirm=arguments.get("confirm", False),
|
|
871
|
+
dry_run=arguments.get("dry_run", True),
|
|
872
|
+
)
|
|
873
|
+
return {
|
|
874
|
+
"namespace": delete_result.namespace,
|
|
875
|
+
"memories_deleted": delete_result.memories_deleted,
|
|
876
|
+
"success": delete_result.success,
|
|
877
|
+
"message": delete_result.message,
|
|
878
|
+
"dry_run": delete_result.dry_run,
|
|
879
|
+
}
|
|
880
|
+
|
|
881
|
+
def _handle_rename_namespace(self, arguments: dict[str, Any]) -> RenameNamespaceResponse:
|
|
882
|
+
"""Handle rename_namespace tool call."""
|
|
883
|
+
rename_result = self._utility_service.rename_namespace(
|
|
884
|
+
old_namespace=arguments["old_namespace"],
|
|
885
|
+
new_namespace=arguments["new_namespace"],
|
|
886
|
+
)
|
|
887
|
+
return {
|
|
888
|
+
"old_namespace": rename_result.old_namespace,
|
|
889
|
+
"new_namespace": rename_result.new_namespace,
|
|
890
|
+
"memories_renamed": rename_result.memories_renamed,
|
|
891
|
+
"success": rename_result.success,
|
|
892
|
+
"message": rename_result.message,
|
|
893
|
+
}
|
|
894
|
+
|
|
895
|
+
def _handle_export_memories(self, arguments: dict[str, Any]) -> ExportResponse:
|
|
896
|
+
"""Handle export_memories tool call."""
|
|
897
|
+
export_result = self._export_import_service.export_memories(
|
|
898
|
+
output_path=arguments["output_path"],
|
|
899
|
+
format=arguments.get("format"),
|
|
900
|
+
namespace=arguments.get("namespace"),
|
|
901
|
+
include_vectors=arguments.get("include_vectors", True),
|
|
902
|
+
)
|
|
903
|
+
return {
|
|
904
|
+
"format": export_result.format,
|
|
905
|
+
"output_path": export_result.output_path,
|
|
906
|
+
"memories_exported": export_result.memories_exported,
|
|
907
|
+
"file_size_bytes": export_result.file_size_bytes,
|
|
908
|
+
"file_size_mb": export_result.file_size_mb,
|
|
909
|
+
"namespaces_included": export_result.namespaces_included,
|
|
910
|
+
"duration_seconds": export_result.duration_seconds,
|
|
911
|
+
"compression": export_result.compression,
|
|
912
|
+
}
|
|
913
|
+
|
|
914
|
+
def _handle_import_memories(self, arguments: dict[str, Any]) -> ImportResponse:
|
|
915
|
+
"""Handle import_memories tool call."""
|
|
916
|
+
dry_run = arguments.get("dry_run", True)
|
|
917
|
+
import_result = self._export_import_service.import_memories(
|
|
918
|
+
source_path=arguments["source_path"],
|
|
919
|
+
format=arguments.get("format"),
|
|
920
|
+
namespace_override=arguments.get("namespace_override"),
|
|
921
|
+
deduplicate=arguments.get("deduplicate", False),
|
|
922
|
+
dedup_threshold=arguments.get("dedup_threshold", 0.95),
|
|
923
|
+
validate=arguments.get("validate", True),
|
|
924
|
+
regenerate_embeddings=arguments.get("regenerate_embeddings", False),
|
|
925
|
+
dry_run=dry_run,
|
|
926
|
+
)
|
|
927
|
+
return {
|
|
928
|
+
"source_path": import_result.source_path,
|
|
929
|
+
"format": import_result.format,
|
|
930
|
+
"total_records_in_file": import_result.total_records_in_file,
|
|
931
|
+
"memories_imported": import_result.memories_imported,
|
|
932
|
+
"memories_skipped": import_result.memories_skipped,
|
|
933
|
+
"memories_failed": import_result.memories_failed,
|
|
934
|
+
"validation_errors": [
|
|
935
|
+
{
|
|
936
|
+
"row_number": err.row_number,
|
|
937
|
+
"field": err.field,
|
|
938
|
+
"error": err.error,
|
|
939
|
+
"value": str(err.value) if err.value is not None else None,
|
|
940
|
+
}
|
|
941
|
+
for err in import_result.validation_errors
|
|
942
|
+
] if import_result.validation_errors else [],
|
|
943
|
+
"namespace_override": import_result.namespace_override,
|
|
944
|
+
"duration_seconds": import_result.duration_seconds,
|
|
945
|
+
"dry_run": dry_run,
|
|
946
|
+
"imported_memories": [
|
|
947
|
+
{
|
|
948
|
+
"id": m.id,
|
|
949
|
+
"content_preview": m.content_preview,
|
|
950
|
+
"namespace": m.namespace,
|
|
951
|
+
}
|
|
952
|
+
for m in import_result.imported_memories[:10]
|
|
953
|
+
] if import_result.imported_memories else [],
|
|
954
|
+
}
|
|
955
|
+
|
|
956
|
+
def _handle_hybrid_recall(self, arguments: dict[str, Any]) -> HybridRecallResponse:
|
|
957
|
+
"""Handle hybrid_recall tool call."""
|
|
958
|
+
hybrid_result = self._utility_service.hybrid_recall(
|
|
959
|
+
query=arguments["query"],
|
|
960
|
+
alpha=arguments.get("alpha", 0.5),
|
|
961
|
+
limit=arguments.get("limit", 5),
|
|
962
|
+
namespace=arguments.get("namespace"),
|
|
963
|
+
min_similarity=arguments.get("min_similarity", 0.0),
|
|
964
|
+
)
|
|
965
|
+
|
|
966
|
+
# Convert to dict list for potential decay processing
|
|
967
|
+
memories_list = [
|
|
968
|
+
{
|
|
969
|
+
"id": m.id,
|
|
970
|
+
"content": m.content,
|
|
971
|
+
"similarity": m.similarity,
|
|
972
|
+
"namespace": m.namespace,
|
|
973
|
+
"tags": m.tags,
|
|
974
|
+
"importance": m.importance,
|
|
975
|
+
"created_at": m.created_at.isoformat() if m.created_at else None,
|
|
976
|
+
"metadata": m.metadata,
|
|
977
|
+
"vector_score": m.vector_score,
|
|
978
|
+
"fts_score": m.fts_score,
|
|
979
|
+
"last_accessed": m.last_accessed,
|
|
980
|
+
"access_count": m.access_count,
|
|
981
|
+
}
|
|
982
|
+
for m in hybrid_result.memories
|
|
983
|
+
]
|
|
984
|
+
|
|
985
|
+
# Apply auto-decay if enabled (adds effective_importance, re-ranks)
|
|
986
|
+
if self._decay_manager is not None and self._decay_manager.enabled:
|
|
987
|
+
memories_list = self._decay_manager.apply_decay_to_results(
|
|
988
|
+
memories_list, rerank=True
|
|
989
|
+
)
|
|
990
|
+
|
|
991
|
+
# Build response - include effective_importance if present
|
|
992
|
+
response_memories = []
|
|
993
|
+
for m in memories_list:
|
|
994
|
+
mem_dict: dict[str, Any] = {
|
|
995
|
+
"id": m["id"],
|
|
996
|
+
"content": m["content"],
|
|
997
|
+
"similarity": m["similarity"],
|
|
998
|
+
"namespace": m["namespace"],
|
|
999
|
+
"tags": m["tags"],
|
|
1000
|
+
"importance": m["importance"],
|
|
1001
|
+
"created_at": m["created_at"],
|
|
1002
|
+
"metadata": m["metadata"],
|
|
1003
|
+
"vector_score": m.get("vector_score"),
|
|
1004
|
+
"fts_score": m.get("fts_score"),
|
|
1005
|
+
}
|
|
1006
|
+
if "effective_importance" in m:
|
|
1007
|
+
mem_dict["effective_importance"] = m["effective_importance"]
|
|
1008
|
+
response_memories.append(mem_dict)
|
|
1009
|
+
|
|
1010
|
+
return {
|
|
1011
|
+
"query": hybrid_result.query,
|
|
1012
|
+
"alpha": hybrid_result.alpha,
|
|
1013
|
+
"memories": response_memories, # type: ignore[typeddict-item]
|
|
1014
|
+
"total": len(response_memories),
|
|
1015
|
+
"search_type": hybrid_result.search_type,
|
|
1016
|
+
}
|
|
1017
|
+
|
|
1018
|
+
# =========================================================================
|
|
1019
|
+
# Tool Routing
|
|
1020
|
+
# =========================================================================
|
|
1021
|
+
|
|
1022
|
+
def _handle_tool(self, name: str, arguments: dict[str, Any]) -> HandlerResponse:
|
|
1023
|
+
"""Route tool call to appropriate handler.
|
|
1024
|
+
|
|
1025
|
+
Args:
|
|
1026
|
+
name: Tool name.
|
|
1027
|
+
arguments: Tool arguments.
|
|
1028
|
+
|
|
1029
|
+
Returns:
|
|
1030
|
+
Tool result as typed dictionary.
|
|
1031
|
+
|
|
1032
|
+
Raises:
|
|
1033
|
+
ValidationError: If tool name is unknown.
|
|
1034
|
+
"""
|
|
1035
|
+
# Record metrics for this tool call
|
|
1036
|
+
with record_request(name, "success"):
|
|
1037
|
+
return self._handle_tool_impl(name, arguments)
|
|
1038
|
+
|
|
1039
|
+
def _handle_tool_impl(self, name: str, arguments: dict[str, Any]) -> HandlerResponse:
|
|
1040
|
+
"""Implementation of tool routing using dispatch pattern.
|
|
1041
|
+
|
|
1042
|
+
Args:
|
|
1043
|
+
name: Tool name.
|
|
1044
|
+
arguments: Tool arguments.
|
|
1045
|
+
|
|
1046
|
+
Returns:
|
|
1047
|
+
Tool result as dictionary.
|
|
1048
|
+
|
|
1049
|
+
Raises:
|
|
1050
|
+
ValidationError: If tool name is unknown.
|
|
1051
|
+
"""
|
|
1052
|
+
handler = self._tool_handlers.get(name)
|
|
1053
|
+
if handler is None:
|
|
1054
|
+
raise ValidationError(f"Unknown tool: {name}")
|
|
1055
|
+
return handler(arguments)
|
|
1056
|
+
|
|
1057
|
+
@staticmethod
|
|
1058
|
+
def _get_server_instructions() -> str:
|
|
1059
|
+
"""Return behavioral instructions for Claude when using spatial-memory.
|
|
1060
|
+
|
|
1061
|
+
These instructions are automatically injected into Claude's system prompt
|
|
1062
|
+
when the MCP server connects, enabling proactive memory management without
|
|
1063
|
+
requiring user configuration.
|
|
1064
|
+
"""
|
|
1065
|
+
return '''## Spatial Memory System
|
|
1066
|
+
|
|
1067
|
+
You have access to a persistent semantic memory system. Use it proactively to
|
|
1068
|
+
build cumulative knowledge across sessions.
|
|
1069
|
+
|
|
1070
|
+
### Session Start
|
|
1071
|
+
At conversation start, call `recall` with the user's apparent task/context to
|
|
1072
|
+
load relevant memories. Present insights naturally:
|
|
1073
|
+
- Good: "Based on previous work, you decided to use PostgreSQL because..."
|
|
1074
|
+
- Bad: "The database returned: [{id: '...', content: '...'}]"
|
|
1075
|
+
|
|
1076
|
+
### Recognizing Memory-Worthy Moments
|
|
1077
|
+
After these events, ask briefly "Save this? y/n" (minimal friction):
|
|
1078
|
+
- **Decisions**: "Let's use X...", "We decided...", "The approach is..."
|
|
1079
|
+
- **Solutions**: "The fix was...", "It failed because...", "The error was..."
|
|
1080
|
+
- **Patterns**: "This pattern works...", "The trick is...", "Always do X when..."
|
|
1081
|
+
- **Discoveries**: "I found that...", "Important:...", "TIL..."
|
|
1082
|
+
|
|
1083
|
+
Do NOT ask for trivial information. Only prompt for insights that would help
|
|
1084
|
+
future sessions.
|
|
1085
|
+
|
|
1086
|
+
### Saving Memories
|
|
1087
|
+
When user confirms, save with:
|
|
1088
|
+
- **Detailed content**: Include full context, reasoning, and specifics. Future
|
|
1089
|
+
agents need complete information.
|
|
1090
|
+
- **Contextual namespace**: Use project name, or categories like "decisions", "errors", "patterns"
|
|
1091
|
+
- **Descriptive tags**: Technologies, concepts, error types involved
|
|
1092
|
+
- **High importance (0.8-1.0)**: For decisions and critical fixes
|
|
1093
|
+
- **Medium importance (0.5-0.7)**: For patterns and learnings
|
|
1094
|
+
|
|
1095
|
+
### Synthesizing Answers
|
|
1096
|
+
When using `recall` or `hybrid_recall`, present results as natural knowledge:
|
|
1097
|
+
- Integrate memories into your response conversationally
|
|
1098
|
+
- Reference prior decisions: "You previously decided X because Y"
|
|
1099
|
+
- Don't expose raw JSON or tool mechanics to the user
|
|
1100
|
+
|
|
1101
|
+
### Auto-Extract for Long Sessions
|
|
1102
|
+
For significant problem-solving conversations (debugging sessions, architecture discussions), offer:
|
|
1103
|
+
"This session had good learnings. Extract key memories? y/n"
|
|
1104
|
+
Then use `extract` to automatically capture important information.
|
|
1105
|
+
|
|
1106
|
+
### Tool Selection Guide
|
|
1107
|
+
- `remember`: Store a single memory with full context
|
|
1108
|
+
- `recall`: Semantic search for relevant memories
|
|
1109
|
+
- `hybrid_recall`: Combined keyword + semantic search (better for specific terms)
|
|
1110
|
+
- `extract`: Auto-extract memories from conversation text
|
|
1111
|
+
- `nearby`: Find memories similar to a known memory
|
|
1112
|
+
- `regions`: Discover topic clusters in memory space
|
|
1113
|
+
- `journey`: Navigate conceptual path between two memories'''
|
|
1114
|
+
|
|
1115
|
+
async def run(self) -> None:
|
|
1116
|
+
"""Run the MCP server using stdio transport."""
|
|
1117
|
+
async with stdio_server() as (read_stream, write_stream):
|
|
1118
|
+
await self._server.run(
|
|
1119
|
+
read_stream,
|
|
1120
|
+
write_stream,
|
|
1121
|
+
self._server.create_initialization_options(),
|
|
1122
|
+
)
|
|
1123
|
+
|
|
1124
|
+
def close(self) -> None:
|
|
1125
|
+
"""Clean up resources."""
|
|
1126
|
+
# Stop the decay manager (flushes pending updates)
|
|
1127
|
+
if self._decay_manager is not None:
|
|
1128
|
+
self._decay_manager.stop()
|
|
1129
|
+
|
|
1130
|
+
# Shutdown the thread pool executor
|
|
1131
|
+
if hasattr(self, "_executor"):
|
|
1132
|
+
self._executor.shutdown(wait=False)
|
|
1133
|
+
|
|
1134
|
+
if self._db is not None:
|
|
1135
|
+
self._db.close()
|
|
1136
|
+
|
|
1137
|
+
|
|
1138
|
+
def create_server(
|
|
1139
|
+
repository: MemoryRepositoryProtocol | None = None,
|
|
1140
|
+
embeddings: EmbeddingServiceProtocol | None = None,
|
|
1141
|
+
) -> SpatialMemoryServer:
|
|
1142
|
+
"""Create a new SpatialMemoryServer instance.
|
|
1143
|
+
|
|
1144
|
+
This factory function allows dependency injection for testing.
|
|
1145
|
+
|
|
1146
|
+
Args:
|
|
1147
|
+
repository: Optional repository implementation.
|
|
1148
|
+
embeddings: Optional embedding service implementation.
|
|
1149
|
+
|
|
1150
|
+
Returns:
|
|
1151
|
+
Configured SpatialMemoryServer instance.
|
|
1152
|
+
"""
|
|
1153
|
+
return SpatialMemoryServer(repository=repository, embeddings=embeddings)
|
|
1154
|
+
|
|
1155
|
+
|
|
1156
|
+
async def main() -> None:
|
|
1157
|
+
"""Main entry point for the MCP server."""
|
|
1158
|
+
# Get settings
|
|
1159
|
+
settings = get_settings()
|
|
1160
|
+
|
|
1161
|
+
# Validate configuration
|
|
1162
|
+
try:
|
|
1163
|
+
warnings = validate_startup(settings)
|
|
1164
|
+
# Use basic logging temporarily for startup validation
|
|
1165
|
+
logging.basicConfig(level=settings.log_level)
|
|
1166
|
+
logger = logging.getLogger(__name__)
|
|
1167
|
+
for warning in warnings:
|
|
1168
|
+
logger.warning(f"Configuration warning: {warning}")
|
|
1169
|
+
except ConfigurationError as e:
|
|
1170
|
+
# Use basic logging for error
|
|
1171
|
+
logging.basicConfig(level=logging.ERROR)
|
|
1172
|
+
logger = logging.getLogger(__name__)
|
|
1173
|
+
logger.error(f"Configuration error: {e}")
|
|
1174
|
+
sys.exit(1)
|
|
1175
|
+
|
|
1176
|
+
# Configure logging properly
|
|
1177
|
+
configure_logging(
|
|
1178
|
+
level=settings.log_level,
|
|
1179
|
+
json_format=settings.log_format == "json",
|
|
1180
|
+
)
|
|
1181
|
+
|
|
1182
|
+
server = create_server()
|
|
1183
|
+
cleanup_done = False
|
|
1184
|
+
|
|
1185
|
+
def cleanup() -> None:
|
|
1186
|
+
"""Cleanup function for server resources."""
|
|
1187
|
+
nonlocal cleanup_done
|
|
1188
|
+
if cleanup_done:
|
|
1189
|
+
return
|
|
1190
|
+
cleanup_done = True
|
|
1191
|
+
logger.info("Cleaning up server resources...")
|
|
1192
|
+
server.close()
|
|
1193
|
+
clear_connection_cache()
|
|
1194
|
+
logger.info("Server shutdown complete")
|
|
1195
|
+
|
|
1196
|
+
def handle_shutdown(signum: int, frame: Any) -> None:
|
|
1197
|
+
"""Handle shutdown signals gracefully."""
|
|
1198
|
+
sig_name = signal.Signals(signum).name
|
|
1199
|
+
logger.info(f"Received {sig_name}, initiating graceful shutdown...")
|
|
1200
|
+
|
|
1201
|
+
# Register signal handlers for logging (both platforms use same code)
|
|
1202
|
+
signal.signal(signal.SIGINT, handle_shutdown)
|
|
1203
|
+
signal.signal(signal.SIGTERM, handle_shutdown)
|
|
1204
|
+
|
|
1205
|
+
# Register atexit as a safety net for cleanup
|
|
1206
|
+
atexit.register(cleanup)
|
|
1207
|
+
|
|
1208
|
+
try:
|
|
1209
|
+
await server.run()
|
|
1210
|
+
except asyncio.CancelledError:
|
|
1211
|
+
logger.info("Server task cancelled")
|
|
1212
|
+
finally:
|
|
1213
|
+
cleanup()
|
|
1214
|
+
atexit.unregister(cleanup) # Prevent double cleanup
|