spatial-memory-mcp 1.0.3__py3-none-any.whl → 1.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of spatial-memory-mcp might be problematic. Click here for more details.
- spatial_memory/__init__.py +97 -97
- spatial_memory/__main__.py +241 -2
- spatial_memory/adapters/lancedb_repository.py +74 -5
- spatial_memory/config.py +115 -2
- spatial_memory/core/__init__.py +35 -0
- spatial_memory/core/cache.py +317 -0
- spatial_memory/core/circuit_breaker.py +297 -0
- spatial_memory/core/connection_pool.py +41 -3
- spatial_memory/core/consolidation_strategies.py +402 -0
- spatial_memory/core/database.py +791 -769
- spatial_memory/core/db_idempotency.py +242 -0
- spatial_memory/core/db_indexes.py +575 -0
- spatial_memory/core/db_migrations.py +584 -0
- spatial_memory/core/db_search.py +509 -0
- spatial_memory/core/db_versioning.py +177 -0
- spatial_memory/core/embeddings.py +156 -19
- spatial_memory/core/errors.py +75 -3
- spatial_memory/core/filesystem.py +178 -0
- spatial_memory/core/logging.py +194 -103
- spatial_memory/core/models.py +4 -0
- spatial_memory/core/rate_limiter.py +326 -105
- spatial_memory/core/response_types.py +497 -0
- spatial_memory/core/tracing.py +300 -0
- spatial_memory/core/validation.py +403 -319
- spatial_memory/factory.py +407 -0
- spatial_memory/migrations/__init__.py +40 -0
- spatial_memory/ports/repositories.py +52 -2
- spatial_memory/server.py +329 -188
- spatial_memory/services/export_import.py +61 -43
- spatial_memory/services/lifecycle.py +397 -122
- spatial_memory/services/memory.py +81 -4
- spatial_memory/services/spatial.py +129 -46
- spatial_memory/tools/definitions.py +695 -671
- {spatial_memory_mcp-1.0.3.dist-info → spatial_memory_mcp-1.6.0.dist-info}/METADATA +83 -3
- spatial_memory_mcp-1.6.0.dist-info/RECORD +54 -0
- spatial_memory_mcp-1.0.3.dist-info/RECORD +0 -41
- {spatial_memory_mcp-1.0.3.dist-info → spatial_memory_mcp-1.6.0.dist-info}/WHEEL +0 -0
- {spatial_memory_mcp-1.0.3.dist-info → spatial_memory_mcp-1.6.0.dist-info}/entry_points.txt +0 -0
- {spatial_memory_mcp-1.0.3.dist-info → spatial_memory_mcp-1.6.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,407 @@
|
|
|
1
|
+
"""Service factory for dependency injection and initialization.
|
|
2
|
+
|
|
3
|
+
This module provides a factory pattern for creating and wiring all services
|
|
4
|
+
used by the SpatialMemoryServer. It centralizes configuration and dependency
|
|
5
|
+
injection, making the server initialization cleaner and services more testable.
|
|
6
|
+
|
|
7
|
+
Usage:
|
|
8
|
+
from spatial_memory.factory import ServiceFactory
|
|
9
|
+
|
|
10
|
+
factory = ServiceFactory(settings)
|
|
11
|
+
services = factory.create_all()
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
import logging
|
|
17
|
+
from dataclasses import dataclass
|
|
18
|
+
from typing import TYPE_CHECKING
|
|
19
|
+
|
|
20
|
+
from spatial_memory.adapters.lancedb_repository import LanceDBMemoryRepository
|
|
21
|
+
from spatial_memory.config import Settings
|
|
22
|
+
from spatial_memory.core.cache import ResponseCache
|
|
23
|
+
from spatial_memory.core.database import Database
|
|
24
|
+
from spatial_memory.core.embeddings import EmbeddingService
|
|
25
|
+
from spatial_memory.core.rate_limiter import AgentAwareRateLimiter, RateLimiter
|
|
26
|
+
from spatial_memory.services.export_import import ExportImportConfig, ExportImportService
|
|
27
|
+
from spatial_memory.services.lifecycle import LifecycleConfig, LifecycleService
|
|
28
|
+
from spatial_memory.services.memory import MemoryService
|
|
29
|
+
from spatial_memory.services.spatial import SpatialConfig, SpatialService
|
|
30
|
+
from spatial_memory.services.utility import UtilityConfig, UtilityService
|
|
31
|
+
|
|
32
|
+
if TYPE_CHECKING:
|
|
33
|
+
from spatial_memory.ports.repositories import (
|
|
34
|
+
EmbeddingServiceProtocol,
|
|
35
|
+
MemoryRepositoryProtocol,
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
logger = logging.getLogger(__name__)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
@dataclass
|
|
42
|
+
class ServiceContainer:
|
|
43
|
+
"""Container for all initialized services.
|
|
44
|
+
|
|
45
|
+
Provides access to all service instances created by the factory.
|
|
46
|
+
This allows the server to access services through a single container
|
|
47
|
+
rather than managing individual references.
|
|
48
|
+
|
|
49
|
+
Attributes:
|
|
50
|
+
embeddings: Embedding service for vector generation.
|
|
51
|
+
database: Database connection and operations.
|
|
52
|
+
repository: Memory repository for CRUD operations.
|
|
53
|
+
memory: Memory service for remember/recall operations.
|
|
54
|
+
spatial: Spatial service for exploration operations.
|
|
55
|
+
lifecycle: Lifecycle service for decay/reinforce/consolidate.
|
|
56
|
+
utility: Utility service for stats/namespaces/hybrid search.
|
|
57
|
+
export_import: Export/import service for data portability.
|
|
58
|
+
rate_limiter: Simple rate limiter (if per-agent disabled).
|
|
59
|
+
agent_rate_limiter: Per-agent rate limiter (if enabled).
|
|
60
|
+
cache: Response cache for read operations.
|
|
61
|
+
per_agent_rate_limiting: Whether per-agent rate limiting is enabled.
|
|
62
|
+
cache_enabled: Whether response caching is enabled.
|
|
63
|
+
regions_cache_ttl: TTL for regions cache entries.
|
|
64
|
+
"""
|
|
65
|
+
|
|
66
|
+
embeddings: EmbeddingServiceProtocol
|
|
67
|
+
database: Database | None
|
|
68
|
+
repository: MemoryRepositoryProtocol
|
|
69
|
+
memory: MemoryService
|
|
70
|
+
spatial: SpatialService
|
|
71
|
+
lifecycle: LifecycleService
|
|
72
|
+
utility: UtilityService
|
|
73
|
+
export_import: ExportImportService
|
|
74
|
+
rate_limiter: RateLimiter | None
|
|
75
|
+
agent_rate_limiter: AgentAwareRateLimiter | None
|
|
76
|
+
cache: ResponseCache | None
|
|
77
|
+
per_agent_rate_limiting: bool
|
|
78
|
+
cache_enabled: bool
|
|
79
|
+
regions_cache_ttl: float
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class ServiceFactory:
|
|
83
|
+
"""Factory for creating and wiring all services.
|
|
84
|
+
|
|
85
|
+
Centralizes service creation with proper dependency injection.
|
|
86
|
+
This simplifies server initialization and improves testability.
|
|
87
|
+
|
|
88
|
+
Example:
|
|
89
|
+
factory = ServiceFactory(settings)
|
|
90
|
+
services = factory.create_all()
|
|
91
|
+
# Use services.memory, services.spatial, etc.
|
|
92
|
+
"""
|
|
93
|
+
|
|
94
|
+
def __init__(
|
|
95
|
+
self,
|
|
96
|
+
settings: Settings,
|
|
97
|
+
repository: MemoryRepositoryProtocol | None = None,
|
|
98
|
+
embeddings: EmbeddingServiceProtocol | None = None,
|
|
99
|
+
) -> None:
|
|
100
|
+
"""Initialize the factory.
|
|
101
|
+
|
|
102
|
+
Args:
|
|
103
|
+
settings: Application settings.
|
|
104
|
+
repository: Optional repository override for testing.
|
|
105
|
+
embeddings: Optional embeddings override for testing.
|
|
106
|
+
"""
|
|
107
|
+
self._settings = settings
|
|
108
|
+
self._injected_repository = repository
|
|
109
|
+
self._injected_embeddings = embeddings
|
|
110
|
+
|
|
111
|
+
def create_embedding_service(self) -> EmbeddingService:
|
|
112
|
+
"""Create the embedding service.
|
|
113
|
+
|
|
114
|
+
Returns:
|
|
115
|
+
Configured EmbeddingService instance.
|
|
116
|
+
"""
|
|
117
|
+
return EmbeddingService(
|
|
118
|
+
model_name=self._settings.embedding_model,
|
|
119
|
+
openai_api_key=self._settings.openai_api_key,
|
|
120
|
+
backend=self._settings.embedding_backend, # type: ignore[arg-type]
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
def create_database(self, embedding_dim: int) -> Database:
|
|
124
|
+
"""Create the database connection.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
embedding_dim: Dimension of embedding vectors.
|
|
128
|
+
|
|
129
|
+
Returns:
|
|
130
|
+
Configured Database instance.
|
|
131
|
+
"""
|
|
132
|
+
db = Database(
|
|
133
|
+
storage_path=self._settings.memory_path,
|
|
134
|
+
embedding_dim=embedding_dim,
|
|
135
|
+
auto_create_indexes=self._settings.auto_create_indexes,
|
|
136
|
+
vector_index_threshold=self._settings.vector_index_threshold,
|
|
137
|
+
enable_fts=self._settings.enable_fts_index,
|
|
138
|
+
index_nprobes=self._settings.index_nprobes,
|
|
139
|
+
index_refine_factor=self._settings.index_refine_factor,
|
|
140
|
+
max_retry_attempts=self._settings.max_retry_attempts,
|
|
141
|
+
retry_backoff_seconds=self._settings.retry_backoff_seconds,
|
|
142
|
+
read_consistency_interval_ms=self._settings.read_consistency_interval_ms,
|
|
143
|
+
index_wait_timeout_seconds=self._settings.index_wait_timeout_seconds,
|
|
144
|
+
fts_stem=self._settings.fts_stem,
|
|
145
|
+
fts_remove_stop_words=self._settings.fts_remove_stop_words,
|
|
146
|
+
fts_language=self._settings.fts_language,
|
|
147
|
+
index_type=self._settings.index_type,
|
|
148
|
+
hnsw_m=self._settings.hnsw_m,
|
|
149
|
+
hnsw_ef_construction=self._settings.hnsw_ef_construction,
|
|
150
|
+
enable_memory_expiration=self._settings.enable_memory_expiration,
|
|
151
|
+
default_memory_ttl_days=self._settings.default_memory_ttl_days,
|
|
152
|
+
acknowledge_network_filesystem_risk=self._settings.acknowledge_network_filesystem_risk,
|
|
153
|
+
)
|
|
154
|
+
db.connect()
|
|
155
|
+
return db
|
|
156
|
+
|
|
157
|
+
def create_repository(self, database: Database) -> LanceDBMemoryRepository:
|
|
158
|
+
"""Create the memory repository.
|
|
159
|
+
|
|
160
|
+
Args:
|
|
161
|
+
database: Database instance.
|
|
162
|
+
|
|
163
|
+
Returns:
|
|
164
|
+
LanceDBMemoryRepository instance.
|
|
165
|
+
"""
|
|
166
|
+
return LanceDBMemoryRepository(database)
|
|
167
|
+
|
|
168
|
+
def create_memory_service(
|
|
169
|
+
self,
|
|
170
|
+
repository: MemoryRepositoryProtocol,
|
|
171
|
+
embeddings: EmbeddingServiceProtocol,
|
|
172
|
+
) -> MemoryService:
|
|
173
|
+
"""Create the memory service.
|
|
174
|
+
|
|
175
|
+
Args:
|
|
176
|
+
repository: Memory repository.
|
|
177
|
+
embeddings: Embedding service.
|
|
178
|
+
|
|
179
|
+
Returns:
|
|
180
|
+
Configured MemoryService instance.
|
|
181
|
+
"""
|
|
182
|
+
return MemoryService(
|
|
183
|
+
repository=repository,
|
|
184
|
+
embeddings=embeddings,
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
def create_spatial_service(
|
|
188
|
+
self,
|
|
189
|
+
repository: MemoryRepositoryProtocol,
|
|
190
|
+
embeddings: EmbeddingServiceProtocol,
|
|
191
|
+
) -> SpatialService:
|
|
192
|
+
"""Create the spatial service.
|
|
193
|
+
|
|
194
|
+
Args:
|
|
195
|
+
repository: Memory repository.
|
|
196
|
+
embeddings: Embedding service.
|
|
197
|
+
|
|
198
|
+
Returns:
|
|
199
|
+
Configured SpatialService instance.
|
|
200
|
+
"""
|
|
201
|
+
return SpatialService(
|
|
202
|
+
repository=repository,
|
|
203
|
+
embeddings=embeddings,
|
|
204
|
+
config=SpatialConfig(
|
|
205
|
+
journey_max_steps=self._settings.max_journey_steps,
|
|
206
|
+
wander_max_steps=self._settings.max_wander_steps,
|
|
207
|
+
regions_max_memories=self._settings.regions_max_memories,
|
|
208
|
+
visualize_max_memories=self._settings.max_visualize_memories,
|
|
209
|
+
visualize_n_neighbors=self._settings.umap_n_neighbors,
|
|
210
|
+
visualize_min_dist=self._settings.umap_min_dist,
|
|
211
|
+
visualize_similarity_threshold=self._settings.visualize_similarity_threshold,
|
|
212
|
+
),
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
def create_lifecycle_service(
|
|
216
|
+
self,
|
|
217
|
+
repository: MemoryRepositoryProtocol,
|
|
218
|
+
embeddings: EmbeddingServiceProtocol,
|
|
219
|
+
) -> LifecycleService:
|
|
220
|
+
"""Create the lifecycle service.
|
|
221
|
+
|
|
222
|
+
Args:
|
|
223
|
+
repository: Memory repository.
|
|
224
|
+
embeddings: Embedding service.
|
|
225
|
+
|
|
226
|
+
Returns:
|
|
227
|
+
Configured LifecycleService instance.
|
|
228
|
+
"""
|
|
229
|
+
return LifecycleService(
|
|
230
|
+
repository=repository,
|
|
231
|
+
embeddings=embeddings,
|
|
232
|
+
config=LifecycleConfig(
|
|
233
|
+
decay_default_half_life_days=self._settings.decay_default_half_life_days,
|
|
234
|
+
decay_default_function=self._settings.decay_default_function,
|
|
235
|
+
decay_min_importance_floor=self._settings.decay_min_importance_floor,
|
|
236
|
+
decay_batch_size=self._settings.decay_batch_size,
|
|
237
|
+
reinforce_default_boost=self._settings.reinforce_default_boost,
|
|
238
|
+
reinforce_max_importance=self._settings.reinforce_max_importance,
|
|
239
|
+
extract_max_text_length=self._settings.extract_max_text_length,
|
|
240
|
+
extract_max_candidates=self._settings.extract_max_candidates,
|
|
241
|
+
extract_default_importance=self._settings.extract_default_importance,
|
|
242
|
+
extract_default_namespace=self._settings.extract_default_namespace,
|
|
243
|
+
consolidate_min_threshold=self._settings.consolidate_min_threshold,
|
|
244
|
+
consolidate_content_weight=self._settings.consolidate_content_weight,
|
|
245
|
+
consolidate_max_batch=self._settings.consolidate_max_batch,
|
|
246
|
+
),
|
|
247
|
+
)
|
|
248
|
+
|
|
249
|
+
def create_utility_service(
|
|
250
|
+
self,
|
|
251
|
+
repository: MemoryRepositoryProtocol,
|
|
252
|
+
embeddings: EmbeddingServiceProtocol,
|
|
253
|
+
) -> UtilityService:
|
|
254
|
+
"""Create the utility service.
|
|
255
|
+
|
|
256
|
+
Args:
|
|
257
|
+
repository: Memory repository.
|
|
258
|
+
embeddings: Embedding service.
|
|
259
|
+
|
|
260
|
+
Returns:
|
|
261
|
+
Configured UtilityService instance.
|
|
262
|
+
"""
|
|
263
|
+
return UtilityService(
|
|
264
|
+
repository=repository,
|
|
265
|
+
embeddings=embeddings,
|
|
266
|
+
config=UtilityConfig(
|
|
267
|
+
hybrid_default_alpha=self._settings.hybrid_default_alpha,
|
|
268
|
+
hybrid_min_alpha=self._settings.hybrid_min_alpha,
|
|
269
|
+
hybrid_max_alpha=self._settings.hybrid_max_alpha,
|
|
270
|
+
stats_include_index_details=True,
|
|
271
|
+
namespace_batch_size=self._settings.namespace_batch_size,
|
|
272
|
+
delete_namespace_require_confirmation=self._settings.destructive_require_namespace_confirmation,
|
|
273
|
+
),
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
def create_export_import_service(
|
|
277
|
+
self,
|
|
278
|
+
repository: MemoryRepositoryProtocol,
|
|
279
|
+
embeddings: EmbeddingServiceProtocol,
|
|
280
|
+
) -> ExportImportService:
|
|
281
|
+
"""Create the export/import service.
|
|
282
|
+
|
|
283
|
+
Args:
|
|
284
|
+
repository: Memory repository.
|
|
285
|
+
embeddings: Embedding service.
|
|
286
|
+
|
|
287
|
+
Returns:
|
|
288
|
+
Configured ExportImportService instance.
|
|
289
|
+
"""
|
|
290
|
+
return ExportImportService(
|
|
291
|
+
repository=repository,
|
|
292
|
+
embeddings=embeddings,
|
|
293
|
+
config=ExportImportConfig(
|
|
294
|
+
default_export_format=self._settings.export_default_format,
|
|
295
|
+
export_batch_size=self._settings.export_batch_size,
|
|
296
|
+
import_batch_size=self._settings.import_batch_size,
|
|
297
|
+
import_deduplicate=self._settings.import_deduplicate_default,
|
|
298
|
+
import_dedup_threshold=self._settings.import_dedup_threshold,
|
|
299
|
+
validate_on_import=self._settings.import_validate_vectors,
|
|
300
|
+
parquet_compression="zstd",
|
|
301
|
+
max_import_records=self._settings.import_max_records,
|
|
302
|
+
csv_include_vectors=self._settings.csv_include_vectors,
|
|
303
|
+
max_export_records=self._settings.max_export_records,
|
|
304
|
+
),
|
|
305
|
+
allowed_export_paths=self._settings.export_allowed_paths,
|
|
306
|
+
allowed_import_paths=self._settings.import_allowed_paths,
|
|
307
|
+
allow_symlinks=self._settings.export_allow_symlinks,
|
|
308
|
+
max_import_size_bytes=int(self._settings.import_max_file_size_mb * 1024 * 1024),
|
|
309
|
+
)
|
|
310
|
+
|
|
311
|
+
def create_rate_limiter(self) -> tuple[RateLimiter | None, AgentAwareRateLimiter | None, bool]:
|
|
312
|
+
"""Create rate limiter based on settings.
|
|
313
|
+
|
|
314
|
+
Returns:
|
|
315
|
+
Tuple of (simple_limiter, agent_limiter, per_agent_enabled).
|
|
316
|
+
"""
|
|
317
|
+
per_agent = self._settings.rate_limit_per_agent_enabled
|
|
318
|
+
if per_agent:
|
|
319
|
+
return (
|
|
320
|
+
None,
|
|
321
|
+
AgentAwareRateLimiter(
|
|
322
|
+
global_rate=self._settings.embedding_rate_limit,
|
|
323
|
+
per_agent_rate=self._settings.rate_limit_per_agent_rate,
|
|
324
|
+
max_agents=self._settings.rate_limit_max_tracked_agents,
|
|
325
|
+
),
|
|
326
|
+
True,
|
|
327
|
+
)
|
|
328
|
+
return (
|
|
329
|
+
RateLimiter(
|
|
330
|
+
rate=self._settings.embedding_rate_limit,
|
|
331
|
+
capacity=int(self._settings.embedding_rate_limit * 2),
|
|
332
|
+
),
|
|
333
|
+
None,
|
|
334
|
+
False,
|
|
335
|
+
)
|
|
336
|
+
|
|
337
|
+
def create_cache(self) -> tuple[ResponseCache | None, bool, float]:
|
|
338
|
+
"""Create response cache based on settings.
|
|
339
|
+
|
|
340
|
+
Returns:
|
|
341
|
+
Tuple of (cache, enabled, regions_ttl).
|
|
342
|
+
"""
|
|
343
|
+
if not self._settings.response_cache_enabled:
|
|
344
|
+
return None, False, 0.0
|
|
345
|
+
return (
|
|
346
|
+
ResponseCache(
|
|
347
|
+
max_size=self._settings.response_cache_max_size,
|
|
348
|
+
default_ttl=self._settings.response_cache_default_ttl,
|
|
349
|
+
),
|
|
350
|
+
True,
|
|
351
|
+
self._settings.response_cache_regions_ttl,
|
|
352
|
+
)
|
|
353
|
+
|
|
354
|
+
def create_all(self) -> ServiceContainer:
|
|
355
|
+
"""Create all services with proper dependency wiring.
|
|
356
|
+
|
|
357
|
+
Returns:
|
|
358
|
+
ServiceContainer with all services initialized.
|
|
359
|
+
"""
|
|
360
|
+
# Use injected dependencies or create new ones
|
|
361
|
+
if self._injected_embeddings is None:
|
|
362
|
+
embeddings = self.create_embedding_service()
|
|
363
|
+
else:
|
|
364
|
+
embeddings = self._injected_embeddings
|
|
365
|
+
|
|
366
|
+
# Auto-detect embedding dimensions
|
|
367
|
+
embedding_dim = embeddings.dimensions
|
|
368
|
+
logger.info(f"Auto-detected embedding dimensions: {embedding_dim}")
|
|
369
|
+
logger.info(f"Embedding backend: {embeddings.backend}")
|
|
370
|
+
|
|
371
|
+
# Create database and repository
|
|
372
|
+
database: Database | None = None
|
|
373
|
+
if self._injected_repository is None:
|
|
374
|
+
database = self.create_database(embedding_dim)
|
|
375
|
+
repository = self.create_repository(database)
|
|
376
|
+
else:
|
|
377
|
+
repository = self._injected_repository
|
|
378
|
+
|
|
379
|
+
# Create services with shared dependencies
|
|
380
|
+
memory = self.create_memory_service(repository, embeddings)
|
|
381
|
+
spatial = self.create_spatial_service(repository, embeddings)
|
|
382
|
+
lifecycle = self.create_lifecycle_service(repository, embeddings)
|
|
383
|
+
utility = self.create_utility_service(repository, embeddings)
|
|
384
|
+
export_import = self.create_export_import_service(repository, embeddings)
|
|
385
|
+
|
|
386
|
+
# Create rate limiter
|
|
387
|
+
rate_limiter, agent_rate_limiter, per_agent_enabled = self.create_rate_limiter()
|
|
388
|
+
|
|
389
|
+
# Create cache
|
|
390
|
+
cache, cache_enabled, regions_cache_ttl = self.create_cache()
|
|
391
|
+
|
|
392
|
+
return ServiceContainer(
|
|
393
|
+
embeddings=embeddings,
|
|
394
|
+
database=database,
|
|
395
|
+
repository=repository,
|
|
396
|
+
memory=memory,
|
|
397
|
+
spatial=spatial,
|
|
398
|
+
lifecycle=lifecycle,
|
|
399
|
+
utility=utility,
|
|
400
|
+
export_import=export_import,
|
|
401
|
+
rate_limiter=rate_limiter,
|
|
402
|
+
agent_rate_limiter=agent_rate_limiter,
|
|
403
|
+
cache=cache,
|
|
404
|
+
per_agent_rate_limiting=per_agent_enabled,
|
|
405
|
+
cache_enabled=cache_enabled,
|
|
406
|
+
regions_cache_ttl=regions_cache_ttl,
|
|
407
|
+
)
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
"""Database migrations for spatial-memory-mcp.
|
|
2
|
+
|
|
3
|
+
This package contains database migration scripts for schema changes.
|
|
4
|
+
Each migration is a module that exports a Migration class.
|
|
5
|
+
|
|
6
|
+
Migrations should be numbered sequentially (001, 002, etc.) and use
|
|
7
|
+
semantic versioning for their version string.
|
|
8
|
+
|
|
9
|
+
Example:
|
|
10
|
+
# migrations/001_add_expires_at.py
|
|
11
|
+
from spatial_memory.core.db_migrations import Migration
|
|
12
|
+
|
|
13
|
+
class Migration001AddExpiresAt(Migration):
|
|
14
|
+
version = "1.1.0"
|
|
15
|
+
description = "Add expires_at column for TTL support"
|
|
16
|
+
|
|
17
|
+
def up(self, db, embeddings=None):
|
|
18
|
+
# Apply migration
|
|
19
|
+
pass
|
|
20
|
+
|
|
21
|
+
def down(self, db):
|
|
22
|
+
# Rollback migration (optional)
|
|
23
|
+
raise NotImplementedError("Rollback not supported")
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
from spatial_memory.core.db_migrations import (
|
|
27
|
+
CURRENT_SCHEMA_VERSION,
|
|
28
|
+
Migration,
|
|
29
|
+
MigrationManager,
|
|
30
|
+
MigrationResult,
|
|
31
|
+
check_migration_status,
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
__all__ = [
|
|
35
|
+
"CURRENT_SCHEMA_VERSION",
|
|
36
|
+
"Migration",
|
|
37
|
+
"MigrationManager",
|
|
38
|
+
"MigrationResult",
|
|
39
|
+
"check_migration_status",
|
|
40
|
+
]
|
|
@@ -104,14 +104,16 @@ class MemoryRepositoryProtocol(Protocol):
|
|
|
104
104
|
"""
|
|
105
105
|
...
|
|
106
106
|
|
|
107
|
-
def delete_batch(self, memory_ids: list[str]) -> int:
|
|
107
|
+
def delete_batch(self, memory_ids: list[str]) -> tuple[int, list[str]]:
|
|
108
108
|
"""Delete multiple memories.
|
|
109
109
|
|
|
110
110
|
Args:
|
|
111
111
|
memory_ids: List of memory UUIDs to delete.
|
|
112
112
|
|
|
113
113
|
Returns:
|
|
114
|
-
|
|
114
|
+
Tuple of (count_deleted, list_of_deleted_ids) where:
|
|
115
|
+
- count_deleted: Number of memories actually deleted
|
|
116
|
+
- list_of_deleted_ids: IDs that were actually deleted
|
|
115
117
|
|
|
116
118
|
Raises:
|
|
117
119
|
ValidationError: If any memory_id is invalid.
|
|
@@ -124,6 +126,7 @@ class MemoryRepositoryProtocol(Protocol):
|
|
|
124
126
|
query_vector: np.ndarray,
|
|
125
127
|
limit: int = 5,
|
|
126
128
|
namespace: str | None = None,
|
|
129
|
+
include_vector: bool = False,
|
|
127
130
|
) -> list[MemoryResult]:
|
|
128
131
|
"""Search for similar memories by vector.
|
|
129
132
|
|
|
@@ -131,9 +134,12 @@ class MemoryRepositoryProtocol(Protocol):
|
|
|
131
134
|
query_vector: Query embedding vector.
|
|
132
135
|
limit: Maximum number of results.
|
|
133
136
|
namespace: Filter to specific namespace.
|
|
137
|
+
include_vector: Whether to include embedding vectors in results.
|
|
138
|
+
Defaults to False to reduce response size.
|
|
134
139
|
|
|
135
140
|
Returns:
|
|
136
141
|
List of MemoryResult objects with similarity scores.
|
|
142
|
+
If include_vector=True, each result includes its embedding vector.
|
|
137
143
|
|
|
138
144
|
Raises:
|
|
139
145
|
ValidationError: If input validation fails.
|
|
@@ -183,6 +189,37 @@ class MemoryRepositoryProtocol(Protocol):
|
|
|
183
189
|
"""
|
|
184
190
|
...
|
|
185
191
|
|
|
192
|
+
def get_batch(self, memory_ids: list[str]) -> dict[str, Memory]:
|
|
193
|
+
"""Get multiple memories by ID in a single query.
|
|
194
|
+
|
|
195
|
+
Args:
|
|
196
|
+
memory_ids: List of memory UUIDs to retrieve.
|
|
197
|
+
|
|
198
|
+
Returns:
|
|
199
|
+
Dict mapping memory_id to Memory object. Missing IDs are not included.
|
|
200
|
+
|
|
201
|
+
Raises:
|
|
202
|
+
ValidationError: If any memory_id format is invalid.
|
|
203
|
+
StorageError: If database operation fails.
|
|
204
|
+
"""
|
|
205
|
+
...
|
|
206
|
+
|
|
207
|
+
def update_batch(
|
|
208
|
+
self, updates: list[tuple[str, dict[str, Any]]]
|
|
209
|
+
) -> tuple[int, list[str]]:
|
|
210
|
+
"""Update multiple memories in a single batch operation.
|
|
211
|
+
|
|
212
|
+
Args:
|
|
213
|
+
updates: List of (memory_id, updates_dict) tuples.
|
|
214
|
+
|
|
215
|
+
Returns:
|
|
216
|
+
Tuple of (success_count, list of failed memory_ids).
|
|
217
|
+
|
|
218
|
+
Raises:
|
|
219
|
+
StorageError: If database operation fails completely.
|
|
220
|
+
"""
|
|
221
|
+
...
|
|
222
|
+
|
|
186
223
|
def count(self, namespace: str | None = None) -> int:
|
|
187
224
|
"""Count memories.
|
|
188
225
|
|
|
@@ -340,6 +377,7 @@ class MemoryRepositoryProtocol(Protocol):
|
|
|
340
377
|
query_vectors: list[np.ndarray],
|
|
341
378
|
limit_per_query: int = 3,
|
|
342
379
|
namespace: str | None = None,
|
|
380
|
+
include_vector: bool = False,
|
|
343
381
|
) -> list[list[dict[str, Any]]]:
|
|
344
382
|
"""Search for memories near multiple query points.
|
|
345
383
|
|
|
@@ -350,10 +388,13 @@ class MemoryRepositoryProtocol(Protocol):
|
|
|
350
388
|
query_vectors: List of query embedding vectors.
|
|
351
389
|
limit_per_query: Maximum results per query vector.
|
|
352
390
|
namespace: Filter to specific namespace.
|
|
391
|
+
include_vector: Whether to include embedding vectors in results.
|
|
392
|
+
Defaults to False to reduce response size.
|
|
353
393
|
|
|
354
394
|
Returns:
|
|
355
395
|
List of result lists (one per query vector). Each result
|
|
356
396
|
is a dict containing memory fields and similarity score.
|
|
397
|
+
If include_vector=True, each dict includes the 'vector' field.
|
|
357
398
|
|
|
358
399
|
Raises:
|
|
359
400
|
ValidationError: If input validation fails.
|
|
@@ -552,6 +593,15 @@ class EmbeddingServiceProtocol(Protocol):
|
|
|
552
593
|
"""Get the embedding dimensions."""
|
|
553
594
|
...
|
|
554
595
|
|
|
596
|
+
@property
|
|
597
|
+
def backend(self) -> str:
|
|
598
|
+
"""Get the active embedding backend.
|
|
599
|
+
|
|
600
|
+
Returns:
|
|
601
|
+
'openai' for OpenAI API, 'onnx' or 'pytorch' for local models.
|
|
602
|
+
"""
|
|
603
|
+
...
|
|
604
|
+
|
|
555
605
|
def embed(self, text: str) -> np.ndarray:
|
|
556
606
|
"""Generate embedding for a single text.
|
|
557
607
|
|