spatial-memory-mcp 1.5.3__py3-none-any.whl → 1.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of spatial-memory-mcp might be problematic. Click here for more details.
- spatial_memory/__init__.py +1 -1
- spatial_memory/__main__.py +241 -2
- spatial_memory/adapters/lancedb_repository.py +74 -5
- spatial_memory/config.py +10 -2
- spatial_memory/core/__init__.py +9 -0
- spatial_memory/core/connection_pool.py +41 -3
- spatial_memory/core/consolidation_strategies.py +402 -0
- spatial_memory/core/database.py +774 -918
- spatial_memory/core/db_idempotency.py +242 -0
- spatial_memory/core/db_indexes.py +575 -0
- spatial_memory/core/db_migrations.py +584 -0
- spatial_memory/core/db_search.py +509 -0
- spatial_memory/core/db_versioning.py +177 -0
- spatial_memory/core/embeddings.py +65 -18
- spatial_memory/core/errors.py +75 -3
- spatial_memory/core/filesystem.py +178 -0
- spatial_memory/core/models.py +4 -0
- spatial_memory/core/rate_limiter.py +26 -9
- spatial_memory/core/response_types.py +497 -0
- spatial_memory/core/validation.py +86 -2
- spatial_memory/factory.py +407 -0
- spatial_memory/migrations/__init__.py +40 -0
- spatial_memory/ports/repositories.py +52 -2
- spatial_memory/server.py +131 -189
- spatial_memory/services/export_import.py +61 -43
- spatial_memory/services/lifecycle.py +397 -122
- spatial_memory/services/memory.py +2 -2
- spatial_memory/services/spatial.py +129 -46
- {spatial_memory_mcp-1.5.3.dist-info → spatial_memory_mcp-1.6.0.dist-info}/METADATA +83 -3
- spatial_memory_mcp-1.6.0.dist-info/RECORD +54 -0
- spatial_memory_mcp-1.5.3.dist-info/RECORD +0 -44
- {spatial_memory_mcp-1.5.3.dist-info → spatial_memory_mcp-1.6.0.dist-info}/WHEEL +0 -0
- {spatial_memory_mcp-1.5.3.dist-info → spatial_memory_mcp-1.6.0.dist-info}/entry_points.txt +0 -0
- {spatial_memory_mcp-1.5.3.dist-info → spatial_memory_mcp-1.6.0.dist-info}/licenses/LICENSE +0 -0
spatial_memory/__init__.py
CHANGED
spatial_memory/__main__.py
CHANGED
|
@@ -1,14 +1,253 @@
|
|
|
1
|
-
"""Entry point for running the Spatial Memory MCP Server."""
|
|
1
|
+
"""Entry point for running the Spatial Memory MCP Server and CLI commands."""
|
|
2
2
|
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import argparse
|
|
3
6
|
import asyncio
|
|
7
|
+
import logging
|
|
8
|
+
import sys
|
|
9
|
+
from typing import NoReturn
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
4
12
|
|
|
5
13
|
|
|
6
|
-
def
|
|
14
|
+
def run_server() -> None:
|
|
7
15
|
"""Run the Spatial Memory MCP Server."""
|
|
8
16
|
from spatial_memory.server import main as server_main
|
|
9
17
|
|
|
10
18
|
asyncio.run(server_main())
|
|
11
19
|
|
|
12
20
|
|
|
21
|
+
def run_migrate(args: argparse.Namespace) -> int:
|
|
22
|
+
"""Run database migrations.
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
args: Parsed command line arguments.
|
|
26
|
+
|
|
27
|
+
Returns:
|
|
28
|
+
Exit code (0 for success, 1 for error).
|
|
29
|
+
"""
|
|
30
|
+
from spatial_memory.config import get_settings
|
|
31
|
+
from spatial_memory.core.database import Database
|
|
32
|
+
from spatial_memory.core.db_migrations import (
|
|
33
|
+
CURRENT_SCHEMA_VERSION,
|
|
34
|
+
MigrationManager,
|
|
35
|
+
)
|
|
36
|
+
from spatial_memory.core.embeddings import EmbeddingService
|
|
37
|
+
|
|
38
|
+
settings = get_settings()
|
|
39
|
+
|
|
40
|
+
# Set up logging based on verbosity
|
|
41
|
+
log_level = logging.DEBUG if args.verbose else logging.INFO
|
|
42
|
+
logging.basicConfig(
|
|
43
|
+
level=log_level,
|
|
44
|
+
format="%(levelname)s: %(message)s",
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
print(f"Spatial Memory Migration Tool")
|
|
48
|
+
print(f"Target schema version: {CURRENT_SCHEMA_VERSION}")
|
|
49
|
+
print(f"Database path: {settings.memory_path}")
|
|
50
|
+
print()
|
|
51
|
+
|
|
52
|
+
try:
|
|
53
|
+
# Create embedding service if needed for migrations
|
|
54
|
+
embeddings = None
|
|
55
|
+
if not args.dry_run:
|
|
56
|
+
# Only load embeddings for actual migrations (some may need re-embedding)
|
|
57
|
+
print("Loading embedding service...")
|
|
58
|
+
embeddings = EmbeddingService(
|
|
59
|
+
model_name=settings.embedding_model,
|
|
60
|
+
openai_api_key=settings.openai_api_key,
|
|
61
|
+
backend=settings.embedding_backend,
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
# Connect to database
|
|
65
|
+
print("Connecting to database...")
|
|
66
|
+
db = Database(
|
|
67
|
+
storage_path=settings.memory_path,
|
|
68
|
+
embedding_dim=embeddings.dimensions if embeddings else 384,
|
|
69
|
+
auto_create_indexes=settings.auto_create_indexes,
|
|
70
|
+
)
|
|
71
|
+
db.connect()
|
|
72
|
+
|
|
73
|
+
# Create migration manager
|
|
74
|
+
manager = MigrationManager(db, embeddings)
|
|
75
|
+
manager.register_builtin_migrations()
|
|
76
|
+
|
|
77
|
+
current_version = manager.get_current_version()
|
|
78
|
+
print(f"Current schema version: {current_version}")
|
|
79
|
+
|
|
80
|
+
if args.status:
|
|
81
|
+
# Just show status, don't run migrations
|
|
82
|
+
pending = manager.get_pending_migrations()
|
|
83
|
+
if pending:
|
|
84
|
+
print(f"\nPending migrations ({len(pending)}):")
|
|
85
|
+
for m in pending:
|
|
86
|
+
print(f" - {m.version}: {m.description}")
|
|
87
|
+
else:
|
|
88
|
+
print("\nNo pending migrations. Database is up to date.")
|
|
89
|
+
|
|
90
|
+
applied = manager.get_applied_migrations()
|
|
91
|
+
if applied:
|
|
92
|
+
print(f"\nApplied migrations ({len(applied)}):")
|
|
93
|
+
for m in applied:
|
|
94
|
+
print(f" - {m.version}: {m.description} (applied: {m.applied_at})")
|
|
95
|
+
|
|
96
|
+
db.close()
|
|
97
|
+
return 0
|
|
98
|
+
|
|
99
|
+
if args.rollback:
|
|
100
|
+
# Rollback to specified version
|
|
101
|
+
print(f"\nRolling back to version {args.rollback}...")
|
|
102
|
+
result = manager.rollback(args.rollback)
|
|
103
|
+
|
|
104
|
+
if result.errors:
|
|
105
|
+
print("\nRollback failed with errors:")
|
|
106
|
+
for error in result.errors:
|
|
107
|
+
print(f" - {error}")
|
|
108
|
+
db.close()
|
|
109
|
+
return 1
|
|
110
|
+
|
|
111
|
+
if result.migrations_applied:
|
|
112
|
+
print(f"\nRolled back migrations:")
|
|
113
|
+
for v in result.migrations_applied:
|
|
114
|
+
print(f" - {v}")
|
|
115
|
+
print(f"\nCurrent version: {result.current_version}")
|
|
116
|
+
else:
|
|
117
|
+
print("\nNo migrations to rollback.")
|
|
118
|
+
|
|
119
|
+
db.close()
|
|
120
|
+
return 0
|
|
121
|
+
|
|
122
|
+
# Run pending migrations
|
|
123
|
+
pending = manager.get_pending_migrations()
|
|
124
|
+
if not pending:
|
|
125
|
+
print("\nNo pending migrations. Database is up to date.")
|
|
126
|
+
db.close()
|
|
127
|
+
return 0
|
|
128
|
+
|
|
129
|
+
print(f"\nPending migrations ({len(pending)}):")
|
|
130
|
+
for m in pending:
|
|
131
|
+
print(f" - {m.version}: {m.description}")
|
|
132
|
+
|
|
133
|
+
if args.dry_run:
|
|
134
|
+
print("\n[DRY RUN] Would apply the above migrations.")
|
|
135
|
+
print("Run without --dry-run to apply.")
|
|
136
|
+
db.close()
|
|
137
|
+
return 0
|
|
138
|
+
|
|
139
|
+
# Confirm before applying
|
|
140
|
+
if not args.yes:
|
|
141
|
+
print()
|
|
142
|
+
response = input("Apply these migrations? [y/N] ").strip().lower()
|
|
143
|
+
if response not in ("y", "yes"):
|
|
144
|
+
print("Aborted.")
|
|
145
|
+
db.close()
|
|
146
|
+
return 0
|
|
147
|
+
|
|
148
|
+
print("\nApplying migrations...")
|
|
149
|
+
result = manager.run_pending(dry_run=False)
|
|
150
|
+
|
|
151
|
+
if result.errors:
|
|
152
|
+
print("\nMigration failed with errors:")
|
|
153
|
+
for error in result.errors:
|
|
154
|
+
print(f" - {error}")
|
|
155
|
+
print("\nSome migrations may have been applied. Check database state.")
|
|
156
|
+
db.close()
|
|
157
|
+
return 1
|
|
158
|
+
|
|
159
|
+
print(f"\nSuccessfully applied {len(result.migrations_applied)} migration(s):")
|
|
160
|
+
for v in result.migrations_applied:
|
|
161
|
+
print(f" - {v}")
|
|
162
|
+
print(f"\nCurrent version: {result.current_version}")
|
|
163
|
+
|
|
164
|
+
db.close()
|
|
165
|
+
return 0
|
|
166
|
+
|
|
167
|
+
except Exception as e:
|
|
168
|
+
logger.error(f"Migration failed: {e}", exc_info=args.verbose)
|
|
169
|
+
print(f"\nError: {e}")
|
|
170
|
+
return 1
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
def run_version() -> None:
|
|
174
|
+
"""Print version information."""
|
|
175
|
+
from spatial_memory import __version__
|
|
176
|
+
|
|
177
|
+
print(f"spatial-memory {__version__}")
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
def main() -> NoReturn:
|
|
181
|
+
"""Main entry point with subcommand support."""
|
|
182
|
+
parser = argparse.ArgumentParser(
|
|
183
|
+
prog="spatial-memory",
|
|
184
|
+
description="Spatial Memory MCP Server and CLI tools",
|
|
185
|
+
)
|
|
186
|
+
parser.add_argument(
|
|
187
|
+
"--version", "-V",
|
|
188
|
+
action="store_true",
|
|
189
|
+
help="Show version and exit",
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
subparsers = parser.add_subparsers(
|
|
193
|
+
dest="command",
|
|
194
|
+
title="commands",
|
|
195
|
+
description="Available commands",
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
# Server command (default)
|
|
199
|
+
server_parser = subparsers.add_parser(
|
|
200
|
+
"serve",
|
|
201
|
+
help="Start the MCP server (default if no command given)",
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
# Migrate command
|
|
205
|
+
migrate_parser = subparsers.add_parser(
|
|
206
|
+
"migrate",
|
|
207
|
+
help="Run database migrations",
|
|
208
|
+
)
|
|
209
|
+
migrate_parser.add_argument(
|
|
210
|
+
"--dry-run",
|
|
211
|
+
action="store_true",
|
|
212
|
+
help="Preview migrations without applying",
|
|
213
|
+
)
|
|
214
|
+
migrate_parser.add_argument(
|
|
215
|
+
"--status",
|
|
216
|
+
action="store_true",
|
|
217
|
+
help="Show migration status and exit",
|
|
218
|
+
)
|
|
219
|
+
migrate_parser.add_argument(
|
|
220
|
+
"--rollback",
|
|
221
|
+
metavar="VERSION",
|
|
222
|
+
help="Rollback to specified version (e.g., 1.0.0)",
|
|
223
|
+
)
|
|
224
|
+
migrate_parser.add_argument(
|
|
225
|
+
"-y", "--yes",
|
|
226
|
+
action="store_true",
|
|
227
|
+
help="Skip confirmation prompt",
|
|
228
|
+
)
|
|
229
|
+
migrate_parser.add_argument(
|
|
230
|
+
"-v", "--verbose",
|
|
231
|
+
action="store_true",
|
|
232
|
+
help="Enable verbose output",
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
args = parser.parse_args()
|
|
236
|
+
|
|
237
|
+
if args.version:
|
|
238
|
+
run_version()
|
|
239
|
+
sys.exit(0)
|
|
240
|
+
|
|
241
|
+
if args.command == "migrate":
|
|
242
|
+
sys.exit(run_migrate(args))
|
|
243
|
+
elif args.command == "serve" or args.command is None:
|
|
244
|
+
# Default to running the server
|
|
245
|
+
run_server()
|
|
246
|
+
sys.exit(0)
|
|
247
|
+
else:
|
|
248
|
+
parser.print_help()
|
|
249
|
+
sys.exit(1)
|
|
250
|
+
|
|
251
|
+
|
|
13
252
|
if __name__ == "__main__":
|
|
14
253
|
main()
|
|
@@ -178,7 +178,7 @@ class LanceDBMemoryRepository:
|
|
|
178
178
|
logger.error(f"Unexpected error in delete: {e}")
|
|
179
179
|
raise StorageError(f"Failed to delete memory: {e}") from e
|
|
180
180
|
|
|
181
|
-
def delete_batch(self, memory_ids: list[str]) -> int:
|
|
181
|
+
def delete_batch(self, memory_ids: list[str]) -> tuple[int, list[str]]:
|
|
182
182
|
"""Delete multiple memories atomically.
|
|
183
183
|
|
|
184
184
|
Delegates to Database.delete_batch for proper encapsulation.
|
|
@@ -187,7 +187,9 @@ class LanceDBMemoryRepository:
|
|
|
187
187
|
memory_ids: List of memory UUIDs to delete.
|
|
188
188
|
|
|
189
189
|
Returns:
|
|
190
|
-
|
|
190
|
+
Tuple of (count_deleted, list_of_deleted_ids) where:
|
|
191
|
+
- count_deleted: Number of memories actually deleted
|
|
192
|
+
- list_of_deleted_ids: IDs that were actually deleted
|
|
191
193
|
|
|
192
194
|
Raises:
|
|
193
195
|
ValidationError: If any memory_id is invalid.
|
|
@@ -206,6 +208,7 @@ class LanceDBMemoryRepository:
|
|
|
206
208
|
query_vector: np.ndarray,
|
|
207
209
|
limit: int = 5,
|
|
208
210
|
namespace: str | None = None,
|
|
211
|
+
include_vector: bool = False,
|
|
209
212
|
) -> list[MemoryResult]:
|
|
210
213
|
"""Search for similar memories by vector.
|
|
211
214
|
|
|
@@ -213,16 +216,24 @@ class LanceDBMemoryRepository:
|
|
|
213
216
|
query_vector: Query embedding vector.
|
|
214
217
|
limit: Maximum number of results.
|
|
215
218
|
namespace: Filter to specific namespace.
|
|
219
|
+
include_vector: Whether to include embedding vectors in results.
|
|
220
|
+
Defaults to False to reduce response size.
|
|
216
221
|
|
|
217
222
|
Returns:
|
|
218
223
|
List of MemoryResult objects with similarity scores.
|
|
224
|
+
If include_vector=True, each result includes its embedding vector.
|
|
219
225
|
|
|
220
226
|
Raises:
|
|
221
227
|
ValidationError: If input validation fails.
|
|
222
228
|
StorageError: If database operation fails.
|
|
223
229
|
"""
|
|
224
230
|
try:
|
|
225
|
-
results = self._db.vector_search(
|
|
231
|
+
results = self._db.vector_search(
|
|
232
|
+
query_vector,
|
|
233
|
+
limit=limit,
|
|
234
|
+
namespace=namespace,
|
|
235
|
+
include_vector=include_vector,
|
|
236
|
+
)
|
|
226
237
|
return [self._record_to_memory_result(r) for r in results]
|
|
227
238
|
except (ValidationError, StorageError):
|
|
228
239
|
raise
|
|
@@ -292,6 +303,53 @@ class LanceDBMemoryRepository:
|
|
|
292
303
|
logger.error(f"Unexpected error in update: {e}")
|
|
293
304
|
raise StorageError(f"Failed to update memory: {e}") from e
|
|
294
305
|
|
|
306
|
+
def get_batch(self, memory_ids: list[str]) -> dict[str, Memory]:
|
|
307
|
+
"""Get multiple memories by ID in a single query.
|
|
308
|
+
|
|
309
|
+
Args:
|
|
310
|
+
memory_ids: List of memory UUIDs to retrieve.
|
|
311
|
+
|
|
312
|
+
Returns:
|
|
313
|
+
Dict mapping memory_id to Memory object. Missing IDs are not included.
|
|
314
|
+
|
|
315
|
+
Raises:
|
|
316
|
+
ValidationError: If any memory_id format is invalid.
|
|
317
|
+
StorageError: If database operation fails.
|
|
318
|
+
"""
|
|
319
|
+
try:
|
|
320
|
+
raw_results = self._db.get_batch(memory_ids)
|
|
321
|
+
result: dict[str, Memory] = {}
|
|
322
|
+
for memory_id, record in raw_results.items():
|
|
323
|
+
result[memory_id] = self._record_to_memory(record)
|
|
324
|
+
return result
|
|
325
|
+
except (ValidationError, StorageError):
|
|
326
|
+
raise
|
|
327
|
+
except Exception as e:
|
|
328
|
+
logger.error(f"Unexpected error in get_batch: {e}")
|
|
329
|
+
raise StorageError(f"Failed to batch get memories: {e}") from e
|
|
330
|
+
|
|
331
|
+
def update_batch(
|
|
332
|
+
self, updates: list[tuple[str, dict[str, Any]]]
|
|
333
|
+
) -> tuple[int, list[str]]:
|
|
334
|
+
"""Update multiple memories in a single batch operation.
|
|
335
|
+
|
|
336
|
+
Args:
|
|
337
|
+
updates: List of (memory_id, updates_dict) tuples.
|
|
338
|
+
|
|
339
|
+
Returns:
|
|
340
|
+
Tuple of (success_count, list of failed memory_ids).
|
|
341
|
+
|
|
342
|
+
Raises:
|
|
343
|
+
StorageError: If database operation fails completely.
|
|
344
|
+
"""
|
|
345
|
+
try:
|
|
346
|
+
return self._db.update_batch(updates)
|
|
347
|
+
except StorageError:
|
|
348
|
+
raise
|
|
349
|
+
except Exception as e:
|
|
350
|
+
logger.error(f"Unexpected error in update_batch: {e}")
|
|
351
|
+
raise StorageError(f"Failed to batch update memories: {e}") from e
|
|
352
|
+
|
|
295
353
|
def count(self, namespace: str | None = None) -> int:
|
|
296
354
|
"""Count memories.
|
|
297
355
|
|
|
@@ -533,6 +591,13 @@ class LanceDBMemoryRepository:
|
|
|
533
591
|
similarity = record.get("similarity", 0.0)
|
|
534
592
|
similarity = max(0.0, min(1.0, similarity))
|
|
535
593
|
|
|
594
|
+
# Include vector if present in record (when include_vector=True in search)
|
|
595
|
+
vector = None
|
|
596
|
+
if "vector" in record and record["vector"] is not None:
|
|
597
|
+
# Convert to list for JSON serialization
|
|
598
|
+
vec = record["vector"]
|
|
599
|
+
vector = vec.tolist() if hasattr(vec, "tolist") else list(vec)
|
|
600
|
+
|
|
536
601
|
return MemoryResult(
|
|
537
602
|
id=record["id"],
|
|
538
603
|
content=record["content"],
|
|
@@ -542,6 +607,7 @@ class LanceDBMemoryRepository:
|
|
|
542
607
|
importance=record["importance"],
|
|
543
608
|
created_at=record["created_at"],
|
|
544
609
|
metadata=record.get("metadata", {}),
|
|
610
|
+
vector=vector,
|
|
545
611
|
)
|
|
546
612
|
|
|
547
613
|
# ========================================================================
|
|
@@ -586,6 +652,7 @@ class LanceDBMemoryRepository:
|
|
|
586
652
|
query_vectors: list[np.ndarray],
|
|
587
653
|
limit_per_query: int = 3,
|
|
588
654
|
namespace: str | None = None,
|
|
655
|
+
include_vector: bool = False,
|
|
589
656
|
) -> list[list[dict[str, Any]]]:
|
|
590
657
|
"""Search for memories near multiple query points.
|
|
591
658
|
|
|
@@ -597,10 +664,13 @@ class LanceDBMemoryRepository:
|
|
|
597
664
|
query_vectors: List of query embedding vectors.
|
|
598
665
|
limit_per_query: Maximum results per query vector.
|
|
599
666
|
namespace: Filter to specific namespace.
|
|
667
|
+
include_vector: Whether to include embedding vectors in results.
|
|
668
|
+
Defaults to False to reduce response size.
|
|
600
669
|
|
|
601
670
|
Returns:
|
|
602
671
|
List of result lists (one per query vector). Each result
|
|
603
672
|
is a dict containing memory fields and similarity score.
|
|
673
|
+
If include_vector=True, each dict includes the 'vector' field.
|
|
604
674
|
|
|
605
675
|
Raises:
|
|
606
676
|
ValidationError: If input validation fails.
|
|
@@ -611,8 +681,7 @@ class LanceDBMemoryRepository:
|
|
|
611
681
|
query_vectors=query_vectors,
|
|
612
682
|
limit_per_query=limit_per_query,
|
|
613
683
|
namespace=namespace,
|
|
614
|
-
|
|
615
|
-
max_workers=4,
|
|
684
|
+
include_vector=include_vector,
|
|
616
685
|
)
|
|
617
686
|
except (ValidationError, StorageError):
|
|
618
687
|
raise
|
spatial_memory/config.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"""Configuration system for Spatial Memory MCP Server."""
|
|
2
2
|
|
|
3
3
|
from pathlib import Path
|
|
4
|
-
from typing import Any
|
|
4
|
+
from typing import Any, Literal
|
|
5
5
|
|
|
6
6
|
from pydantic import Field, SecretStr
|
|
7
7
|
from pydantic_settings import BaseSettings
|
|
@@ -27,6 +27,14 @@ class Settings(BaseSettings):
|
|
|
27
27
|
default=Path("./.spatial-memory"),
|
|
28
28
|
description="Path to LanceDB storage directory",
|
|
29
29
|
)
|
|
30
|
+
acknowledge_network_filesystem_risk: bool = Field(
|
|
31
|
+
default=False,
|
|
32
|
+
description=(
|
|
33
|
+
"Set to True to suppress warnings about network filesystem usage. "
|
|
34
|
+
"File-based locking does not work reliably on NFS/SMB/CIFS. "
|
|
35
|
+
"Only set this if you are certain only one instance will access the storage."
|
|
36
|
+
),
|
|
37
|
+
)
|
|
30
38
|
|
|
31
39
|
# Embedding Model
|
|
32
40
|
embedding_model: str = Field(
|
|
@@ -133,7 +141,7 @@ class Settings(BaseSettings):
|
|
|
133
141
|
ge=1,
|
|
134
142
|
description="Re-rank top (refine_factor * limit) candidates for accuracy",
|
|
135
143
|
)
|
|
136
|
-
index_type:
|
|
144
|
+
index_type: Literal["IVF_PQ", "IVF_FLAT", "HNSW_SQ"] = Field(
|
|
137
145
|
default="IVF_PQ",
|
|
138
146
|
description="Vector index type: IVF_PQ, IVF_FLAT, or HNSW_SQ",
|
|
139
147
|
)
|
spatial_memory/core/__init__.py
CHANGED
|
@@ -6,6 +6,10 @@ from spatial_memory.core.circuit_breaker import (
|
|
|
6
6
|
CircuitState,
|
|
7
7
|
)
|
|
8
8
|
from spatial_memory.core.database import Database
|
|
9
|
+
from spatial_memory.core.db_idempotency import IdempotencyManager, IdempotencyRecord
|
|
10
|
+
from spatial_memory.core.db_indexes import IndexManager
|
|
11
|
+
from spatial_memory.core.db_search import SearchManager
|
|
12
|
+
from spatial_memory.core.db_versioning import VersionManager
|
|
9
13
|
from spatial_memory.core.embeddings import EmbeddingService
|
|
10
14
|
from spatial_memory.core.rate_limiter import RateLimiter
|
|
11
15
|
from spatial_memory.core.errors import (
|
|
@@ -87,6 +91,11 @@ __all__ = [
|
|
|
87
91
|
"FilterGroup",
|
|
88
92
|
# Core services
|
|
89
93
|
"Database",
|
|
94
|
+
"VersionManager",
|
|
95
|
+
"IndexManager",
|
|
96
|
+
"SearchManager",
|
|
97
|
+
"IdempotencyManager",
|
|
98
|
+
"IdempotencyRecord",
|
|
90
99
|
"EmbeddingService",
|
|
91
100
|
"RateLimiter",
|
|
92
101
|
# Utilities
|
|
@@ -62,6 +62,7 @@ class ConnectionPool:
|
|
|
62
62
|
self,
|
|
63
63
|
uri: str,
|
|
64
64
|
read_consistency_interval_ms: int = 0,
|
|
65
|
+
validate_health: bool = True,
|
|
65
66
|
**kwargs: Any,
|
|
66
67
|
) -> DBConnection:
|
|
67
68
|
"""Get existing connection or create new one.
|
|
@@ -69,6 +70,7 @@ class ConnectionPool:
|
|
|
69
70
|
Args:
|
|
70
71
|
uri: Database URI/path.
|
|
71
72
|
read_consistency_interval_ms: Read consistency interval.
|
|
73
|
+
validate_health: Whether to validate cached connection health (default: True).
|
|
72
74
|
**kwargs: Additional args for lancedb.connect().
|
|
73
75
|
|
|
74
76
|
Returns:
|
|
@@ -77,9 +79,21 @@ class ConnectionPool:
|
|
|
77
79
|
with self._lock:
|
|
78
80
|
# Check if exists
|
|
79
81
|
if uri in self._connections:
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
82
|
+
conn = self._connections[uri]
|
|
83
|
+
|
|
84
|
+
# Optionally validate health of cached connection
|
|
85
|
+
if validate_health and not self._validate_connection(conn, uri):
|
|
86
|
+
# Connection is stale, remove and create new
|
|
87
|
+
logger.info(f"Stale connection detected for {uri}, recreating")
|
|
88
|
+
self._connections.pop(uri)
|
|
89
|
+
try:
|
|
90
|
+
conn.close()
|
|
91
|
+
except Exception:
|
|
92
|
+
pass
|
|
93
|
+
else:
|
|
94
|
+
# Connection is healthy, move to end (most recently used)
|
|
95
|
+
self._connections.move_to_end(uri)
|
|
96
|
+
return conn
|
|
83
97
|
|
|
84
98
|
# Evict oldest if at capacity
|
|
85
99
|
while len(self._connections) >= self._max_size:
|
|
@@ -98,6 +112,30 @@ class ConnectionPool:
|
|
|
98
112
|
logger.debug(f"Created new connection for {uri} (pool size: {len(self._connections)})")
|
|
99
113
|
return conn
|
|
100
114
|
|
|
115
|
+
def _validate_connection(self, conn: DBConnection, uri: str) -> bool:
|
|
116
|
+
"""Validate that a cached connection is still healthy.
|
|
117
|
+
|
|
118
|
+
Performs a lightweight operation to verify the connection
|
|
119
|
+
is still usable.
|
|
120
|
+
|
|
121
|
+
Args:
|
|
122
|
+
conn: The connection to validate.
|
|
123
|
+
uri: URI for logging purposes.
|
|
124
|
+
|
|
125
|
+
Returns:
|
|
126
|
+
True if connection is healthy, False if stale.
|
|
127
|
+
"""
|
|
128
|
+
try:
|
|
129
|
+
# List tables is a lightweight operation that validates connection
|
|
130
|
+
conn.table_names()
|
|
131
|
+
return True
|
|
132
|
+
except Exception as e:
|
|
133
|
+
if self.is_stale_connection_error(e):
|
|
134
|
+
logger.debug(f"Connection health check failed for {uri}: {e}")
|
|
135
|
+
return False
|
|
136
|
+
# Other errors might be transient, consider healthy
|
|
137
|
+
return True
|
|
138
|
+
|
|
101
139
|
def _evict_oldest(self) -> None:
|
|
102
140
|
"""Evict the oldest (least recently used) connection."""
|
|
103
141
|
if self._connections:
|