spatial-memory-mcp 1.9.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- spatial_memory/__init__.py +97 -0
- spatial_memory/__main__.py +271 -0
- spatial_memory/adapters/__init__.py +7 -0
- spatial_memory/adapters/lancedb_repository.py +880 -0
- spatial_memory/config.py +769 -0
- spatial_memory/core/__init__.py +118 -0
- spatial_memory/core/cache.py +317 -0
- spatial_memory/core/circuit_breaker.py +297 -0
- spatial_memory/core/connection_pool.py +220 -0
- spatial_memory/core/consolidation_strategies.py +401 -0
- spatial_memory/core/database.py +3072 -0
- spatial_memory/core/db_idempotency.py +242 -0
- spatial_memory/core/db_indexes.py +576 -0
- spatial_memory/core/db_migrations.py +588 -0
- spatial_memory/core/db_search.py +512 -0
- spatial_memory/core/db_versioning.py +178 -0
- spatial_memory/core/embeddings.py +558 -0
- spatial_memory/core/errors.py +317 -0
- spatial_memory/core/file_security.py +701 -0
- spatial_memory/core/filesystem.py +178 -0
- spatial_memory/core/health.py +289 -0
- spatial_memory/core/helpers.py +79 -0
- spatial_memory/core/import_security.py +433 -0
- spatial_memory/core/lifecycle_ops.py +1067 -0
- spatial_memory/core/logging.py +194 -0
- spatial_memory/core/metrics.py +192 -0
- spatial_memory/core/models.py +660 -0
- spatial_memory/core/rate_limiter.py +326 -0
- spatial_memory/core/response_types.py +500 -0
- spatial_memory/core/security.py +588 -0
- spatial_memory/core/spatial_ops.py +430 -0
- spatial_memory/core/tracing.py +300 -0
- spatial_memory/core/utils.py +110 -0
- spatial_memory/core/validation.py +406 -0
- spatial_memory/factory.py +444 -0
- spatial_memory/migrations/__init__.py +40 -0
- spatial_memory/ports/__init__.py +11 -0
- spatial_memory/ports/repositories.py +630 -0
- spatial_memory/py.typed +0 -0
- spatial_memory/server.py +1214 -0
- spatial_memory/services/__init__.py +70 -0
- spatial_memory/services/decay_manager.py +411 -0
- spatial_memory/services/export_import.py +1031 -0
- spatial_memory/services/lifecycle.py +1139 -0
- spatial_memory/services/memory.py +412 -0
- spatial_memory/services/spatial.py +1152 -0
- spatial_memory/services/utility.py +429 -0
- spatial_memory/tools/__init__.py +5 -0
- spatial_memory/tools/definitions.py +695 -0
- spatial_memory/verify.py +140 -0
- spatial_memory_mcp-1.9.1.dist-info/METADATA +509 -0
- spatial_memory_mcp-1.9.1.dist-info/RECORD +55 -0
- spatial_memory_mcp-1.9.1.dist-info/WHEEL +4 -0
- spatial_memory_mcp-1.9.1.dist-info/entry_points.txt +2 -0
- spatial_memory_mcp-1.9.1.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,194 @@
|
|
|
1
|
+
"""Secure structured logging for Spatial Memory MCP Server.
|
|
2
|
+
|
|
3
|
+
This module provides secure logging with sensitive data masking and
|
|
4
|
+
optional request context tracking for observability.
|
|
5
|
+
|
|
6
|
+
Features:
|
|
7
|
+
- Sensitive data masking (API keys, passwords)
|
|
8
|
+
- JSON structured logging format
|
|
9
|
+
- Request context integration ([req=xxx][agent=yyy] prefixes)
|
|
10
|
+
- Configurable log levels and formats
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from __future__ import annotations
|
|
14
|
+
|
|
15
|
+
import json
|
|
16
|
+
import logging
|
|
17
|
+
import re
|
|
18
|
+
from datetime import datetime, timezone
|
|
19
|
+
from typing import TYPE_CHECKING
|
|
20
|
+
|
|
21
|
+
if TYPE_CHECKING:
|
|
22
|
+
pass
|
|
23
|
+
|
|
24
|
+
# Patterns to mask in logs
|
|
25
|
+
SENSITIVE_PATTERNS = [
|
|
26
|
+
(re.compile(r'api[_-]?key["\']?\s*[:=]\s*["\']?[\w-]+', re.I), 'api_key=***MASKED***'),
|
|
27
|
+
(re.compile(r'sk-[a-zA-Z0-9]{20,}'), '***OPENAI_KEY***'),
|
|
28
|
+
(re.compile(r'password["\']?\s*[:=]\s*["\']?[^\s"\']+', re.I), 'password=***MASKED***'),
|
|
29
|
+
]
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def _get_trace_context() -> tuple[str | None, str | None]:
|
|
33
|
+
"""Get request context without importing at module level.
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
Tuple of (request_id, agent_id) or (None, None) if no context.
|
|
37
|
+
"""
|
|
38
|
+
try:
|
|
39
|
+
# Import here to avoid circular imports
|
|
40
|
+
from spatial_memory.core.tracing import get_current_context
|
|
41
|
+
|
|
42
|
+
ctx = get_current_context()
|
|
43
|
+
if ctx:
|
|
44
|
+
return ctx.request_id, ctx.agent_id
|
|
45
|
+
except ImportError:
|
|
46
|
+
pass
|
|
47
|
+
return None, None
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class SecureFormatter(logging.Formatter):
|
|
51
|
+
"""Formatter that masks sensitive data and includes trace context."""
|
|
52
|
+
|
|
53
|
+
def __init__(
|
|
54
|
+
self,
|
|
55
|
+
fmt: str | None = None,
|
|
56
|
+
datefmt: str | None = None,
|
|
57
|
+
include_trace_context: bool = True,
|
|
58
|
+
) -> None:
|
|
59
|
+
"""Initialize the secure formatter.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
fmt: Format string for log messages.
|
|
63
|
+
datefmt: Date format string.
|
|
64
|
+
include_trace_context: Whether to include [req=xxx][agent=yyy] prefix.
|
|
65
|
+
"""
|
|
66
|
+
super().__init__(fmt=fmt, datefmt=datefmt)
|
|
67
|
+
self.include_trace_context = include_trace_context
|
|
68
|
+
|
|
69
|
+
def format(self, record: logging.LogRecord) -> str:
|
|
70
|
+
"""Format log record and mask sensitive data.
|
|
71
|
+
|
|
72
|
+
Args:
|
|
73
|
+
record: The log record to format.
|
|
74
|
+
|
|
75
|
+
Returns:
|
|
76
|
+
Formatted log message with sensitive data masked and trace context.
|
|
77
|
+
"""
|
|
78
|
+
message = super().format(record)
|
|
79
|
+
|
|
80
|
+
# Add trace context prefix if available
|
|
81
|
+
if self.include_trace_context:
|
|
82
|
+
request_id, agent_id = _get_trace_context()
|
|
83
|
+
if request_id:
|
|
84
|
+
prefix_parts = [f"[req={request_id}]"]
|
|
85
|
+
if agent_id:
|
|
86
|
+
prefix_parts.append(f"[agent={agent_id}]")
|
|
87
|
+
prefix = "".join(prefix_parts) + " "
|
|
88
|
+
# Insert after timestamp and logger name
|
|
89
|
+
# Format: "2024-01-15 10:30:00 - logger - LEVEL - message"
|
|
90
|
+
# We want: "2024-01-15 10:30:00 - logger - LEVEL - [req=xxx] message"
|
|
91
|
+
parts = message.split(" - ", 3)
|
|
92
|
+
if len(parts) == 4:
|
|
93
|
+
message = f"{parts[0]} - {parts[1]} - {parts[2]} - {prefix}{parts[3]}"
|
|
94
|
+
else:
|
|
95
|
+
# Fallback: just prepend
|
|
96
|
+
message = prefix + message
|
|
97
|
+
|
|
98
|
+
for pattern, replacement in SENSITIVE_PATTERNS:
|
|
99
|
+
message = pattern.sub(replacement, message)
|
|
100
|
+
return message
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
class JSONFormatter(logging.Formatter):
|
|
104
|
+
"""JSON formatter for structured logging with trace context."""
|
|
105
|
+
|
|
106
|
+
def __init__(self, include_trace_context: bool = True) -> None:
|
|
107
|
+
"""Initialize the JSON formatter.
|
|
108
|
+
|
|
109
|
+
Args:
|
|
110
|
+
include_trace_context: Whether to include request_id and agent_id fields.
|
|
111
|
+
"""
|
|
112
|
+
super().__init__()
|
|
113
|
+
self.include_trace_context = include_trace_context
|
|
114
|
+
|
|
115
|
+
def format(self, record: logging.LogRecord) -> str:
|
|
116
|
+
"""Format log record as JSON with sensitive data masked.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
record: The log record to format.
|
|
120
|
+
|
|
121
|
+
Returns:
|
|
122
|
+
JSON-formatted log message with sensitive data masked.
|
|
123
|
+
"""
|
|
124
|
+
log_data: dict[str, str | None] = {
|
|
125
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
126
|
+
"level": record.levelname,
|
|
127
|
+
"logger": record.name,
|
|
128
|
+
"message": record.getMessage(),
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
# Add trace context if available
|
|
132
|
+
if self.include_trace_context:
|
|
133
|
+
request_id, agent_id = _get_trace_context()
|
|
134
|
+
if request_id:
|
|
135
|
+
log_data["request_id"] = request_id
|
|
136
|
+
if agent_id:
|
|
137
|
+
log_data["agent_id"] = agent_id
|
|
138
|
+
|
|
139
|
+
if record.exc_info:
|
|
140
|
+
log_data["exception"] = self.formatException(record.exc_info)
|
|
141
|
+
|
|
142
|
+
# Mask sensitive data
|
|
143
|
+
json_str = json.dumps(log_data)
|
|
144
|
+
for pattern, replacement in SENSITIVE_PATTERNS:
|
|
145
|
+
json_str = pattern.sub(replacement, json_str)
|
|
146
|
+
|
|
147
|
+
return json_str
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def configure_logging(
|
|
151
|
+
level: str = "INFO",
|
|
152
|
+
json_format: bool = False,
|
|
153
|
+
mask_sensitive: bool = True,
|
|
154
|
+
include_trace_context: bool = True,
|
|
155
|
+
) -> None:
|
|
156
|
+
"""Configure logging for the application.
|
|
157
|
+
|
|
158
|
+
Args:
|
|
159
|
+
level: Logging level (DEBUG, INFO, WARNING, ERROR).
|
|
160
|
+
json_format: Use JSON format for structured logging.
|
|
161
|
+
mask_sensitive: Mask sensitive data in logs.
|
|
162
|
+
include_trace_context: Include [req=xxx][agent=yyy] in log messages.
|
|
163
|
+
"""
|
|
164
|
+
# Get root logger
|
|
165
|
+
root_logger = logging.getLogger()
|
|
166
|
+
root_logger.setLevel(level)
|
|
167
|
+
|
|
168
|
+
# Remove existing handlers
|
|
169
|
+
for handler in root_logger.handlers[:]:
|
|
170
|
+
root_logger.removeHandler(handler)
|
|
171
|
+
|
|
172
|
+
# Create console handler
|
|
173
|
+
console_handler = logging.StreamHandler()
|
|
174
|
+
console_handler.setLevel(level)
|
|
175
|
+
|
|
176
|
+
# Choose formatter
|
|
177
|
+
if json_format:
|
|
178
|
+
formatter: logging.Formatter = JSONFormatter(
|
|
179
|
+
include_trace_context=include_trace_context
|
|
180
|
+
)
|
|
181
|
+
elif mask_sensitive:
|
|
182
|
+
formatter = SecureFormatter(
|
|
183
|
+
fmt="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
|
184
|
+
datefmt="%Y-%m-%d %H:%M:%S",
|
|
185
|
+
include_trace_context=include_trace_context,
|
|
186
|
+
)
|
|
187
|
+
else:
|
|
188
|
+
formatter = logging.Formatter(
|
|
189
|
+
fmt="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
|
190
|
+
datefmt="%Y-%m-%d %H:%M:%S",
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
console_handler.setFormatter(formatter)
|
|
194
|
+
root_logger.addHandler(console_handler)
|
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
"""Prometheus metrics for Spatial Memory MCP Server.
|
|
2
|
+
|
|
3
|
+
This module provides optional Prometheus metrics. If prometheus_client is not
|
|
4
|
+
installed, no-op stubs are provided so the code works without metrics.
|
|
5
|
+
|
|
6
|
+
Usage:
|
|
7
|
+
from spatial_memory.core.metrics import (
|
|
8
|
+
record_request,
|
|
9
|
+
record_search_similarity,
|
|
10
|
+
record_embedding_latency,
|
|
11
|
+
update_memory_count,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
with record_request("recall", "success"):
|
|
15
|
+
# ... do work
|
|
16
|
+
pass
|
|
17
|
+
|
|
18
|
+
record_search_similarity(0.85)
|
|
19
|
+
record_embedding_latency(0.234, model="openai")
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
from __future__ import annotations
|
|
23
|
+
|
|
24
|
+
import time
|
|
25
|
+
from collections.abc import Generator
|
|
26
|
+
from contextlib import contextmanager
|
|
27
|
+
from typing import TYPE_CHECKING
|
|
28
|
+
|
|
29
|
+
if TYPE_CHECKING:
|
|
30
|
+
from prometheus_client import Counter as CounterType
|
|
31
|
+
from prometheus_client import Gauge as GaugeType
|
|
32
|
+
from prometheus_client import Histogram as HistogramType
|
|
33
|
+
|
|
34
|
+
try:
|
|
35
|
+
from prometheus_client import Counter, Gauge, Histogram
|
|
36
|
+
|
|
37
|
+
PROMETHEUS_AVAILABLE = True
|
|
38
|
+
except ImportError:
|
|
39
|
+
PROMETHEUS_AVAILABLE = False
|
|
40
|
+
Counter = None # type: ignore
|
|
41
|
+
Histogram = None # type: ignore
|
|
42
|
+
Gauge = None # type: ignore
|
|
43
|
+
|
|
44
|
+
# Metrics definitions (only created if prometheus_client available)
|
|
45
|
+
if PROMETHEUS_AVAILABLE:
|
|
46
|
+
# Request metrics
|
|
47
|
+
REQUESTS_TOTAL: CounterType = Counter(
|
|
48
|
+
"spatial_memory_requests_total",
|
|
49
|
+
"Total number of requests",
|
|
50
|
+
["tool", "status"],
|
|
51
|
+
)
|
|
52
|
+
REQUEST_DURATION: HistogramType = Histogram(
|
|
53
|
+
"spatial_memory_request_duration_seconds",
|
|
54
|
+
"Request duration in seconds",
|
|
55
|
+
["tool"],
|
|
56
|
+
buckets=(0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0),
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
# Memory metrics
|
|
60
|
+
MEMORIES_TOTAL: GaugeType = Gauge(
|
|
61
|
+
"spatial_memory_memories_total",
|
|
62
|
+
"Total number of memories",
|
|
63
|
+
["namespace"],
|
|
64
|
+
)
|
|
65
|
+
INDEX_STATUS: GaugeType = Gauge(
|
|
66
|
+
"spatial_memory_index_status",
|
|
67
|
+
"Index status (1=exists, 0=missing)",
|
|
68
|
+
["index_type"],
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
# Search metrics
|
|
72
|
+
SEARCH_SIMILARITY: HistogramType = Histogram(
|
|
73
|
+
"spatial_memory_search_similarity_score",
|
|
74
|
+
"Search result similarity scores",
|
|
75
|
+
buckets=(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 1.0),
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
# Embedding metrics
|
|
79
|
+
EMBEDDING_LATENCY: HistogramType = Histogram(
|
|
80
|
+
"spatial_memory_embedding_latency_seconds",
|
|
81
|
+
"Embedding generation latency in seconds",
|
|
82
|
+
["model"],
|
|
83
|
+
buckets=(0.01, 0.05, 0.1, 0.25, 0.5, 1.0, 2.0, 5.0),
|
|
84
|
+
)
|
|
85
|
+
else:
|
|
86
|
+
# No-op stubs when prometheus_client is not available
|
|
87
|
+
REQUESTS_TOTAL = None # type: ignore
|
|
88
|
+
REQUEST_DURATION = None # type: ignore
|
|
89
|
+
MEMORIES_TOTAL = None # type: ignore
|
|
90
|
+
INDEX_STATUS = None # type: ignore
|
|
91
|
+
SEARCH_SIMILARITY = None # type: ignore
|
|
92
|
+
EMBEDDING_LATENCY = None # type: ignore
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
@contextmanager
|
|
96
|
+
def record_request(tool: str, status: str = "success") -> Generator[None, None, None]:
|
|
97
|
+
"""Context manager to record request metrics.
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
tool: Name of the tool being called.
|
|
101
|
+
status: Status of the request (success, error, etc.).
|
|
102
|
+
|
|
103
|
+
Yields:
|
|
104
|
+
None
|
|
105
|
+
|
|
106
|
+
Example:
|
|
107
|
+
with record_request("recall", "success"):
|
|
108
|
+
# ... do work
|
|
109
|
+
pass
|
|
110
|
+
"""
|
|
111
|
+
if not PROMETHEUS_AVAILABLE:
|
|
112
|
+
yield
|
|
113
|
+
return
|
|
114
|
+
|
|
115
|
+
start = time.monotonic()
|
|
116
|
+
try:
|
|
117
|
+
yield
|
|
118
|
+
except Exception:
|
|
119
|
+
status = "error"
|
|
120
|
+
raise
|
|
121
|
+
finally:
|
|
122
|
+
duration = time.monotonic() - start
|
|
123
|
+
REQUESTS_TOTAL.labels(tool=tool, status=status).inc()
|
|
124
|
+
REQUEST_DURATION.labels(tool=tool).observe(duration)
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
def record_search_similarity(similarity: float) -> None:
|
|
128
|
+
"""Record a search result similarity score.
|
|
129
|
+
|
|
130
|
+
Args:
|
|
131
|
+
similarity: Similarity score between 0.0 and 1.0.
|
|
132
|
+
|
|
133
|
+
Example:
|
|
134
|
+
record_search_similarity(0.85)
|
|
135
|
+
"""
|
|
136
|
+
if PROMETHEUS_AVAILABLE:
|
|
137
|
+
SEARCH_SIMILARITY.observe(similarity)
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
def record_embedding_latency(duration: float, model: str = "local") -> None:
|
|
141
|
+
"""Record embedding generation latency.
|
|
142
|
+
|
|
143
|
+
Args:
|
|
144
|
+
duration: Time taken to generate embeddings in seconds.
|
|
145
|
+
model: Model identifier (e.g., "local", "openai").
|
|
146
|
+
|
|
147
|
+
Example:
|
|
148
|
+
record_embedding_latency(0.234, model="openai")
|
|
149
|
+
"""
|
|
150
|
+
if PROMETHEUS_AVAILABLE:
|
|
151
|
+
EMBEDDING_LATENCY.labels(model=model).observe(duration)
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
def update_memory_count(namespace: str, count: int) -> None:
|
|
155
|
+
"""Update memory count for a namespace.
|
|
156
|
+
|
|
157
|
+
Args:
|
|
158
|
+
namespace: The namespace identifier.
|
|
159
|
+
count: Total number of memories in the namespace.
|
|
160
|
+
|
|
161
|
+
Example:
|
|
162
|
+
update_memory_count("default", 1000)
|
|
163
|
+
"""
|
|
164
|
+
if PROMETHEUS_AVAILABLE:
|
|
165
|
+
MEMORIES_TOTAL.labels(namespace=namespace).set(count)
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
def update_index_status(index_type: str, exists: bool) -> None:
|
|
169
|
+
"""Update index status.
|
|
170
|
+
|
|
171
|
+
Args:
|
|
172
|
+
index_type: Type of index (e.g., "vector", "fts", "scalar").
|
|
173
|
+
exists: Whether the index exists.
|
|
174
|
+
|
|
175
|
+
Example:
|
|
176
|
+
update_index_status("vector", True)
|
|
177
|
+
"""
|
|
178
|
+
if PROMETHEUS_AVAILABLE:
|
|
179
|
+
INDEX_STATUS.labels(index_type=index_type).set(1 if exists else 0)
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
def is_available() -> bool:
|
|
183
|
+
"""Check if Prometheus metrics are available.
|
|
184
|
+
|
|
185
|
+
Returns:
|
|
186
|
+
True if prometheus_client is installed, False otherwise.
|
|
187
|
+
|
|
188
|
+
Example:
|
|
189
|
+
if is_available():
|
|
190
|
+
print("Metrics are available")
|
|
191
|
+
"""
|
|
192
|
+
return PROMETHEUS_AVAILABLE
|