memorygraphMCP 0.11.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- memorygraph/__init__.py +50 -0
- memorygraph/__main__.py +12 -0
- memorygraph/advanced_tools.py +509 -0
- memorygraph/analytics/__init__.py +46 -0
- memorygraph/analytics/advanced_queries.py +727 -0
- memorygraph/backends/__init__.py +21 -0
- memorygraph/backends/base.py +179 -0
- memorygraph/backends/cloud.py +75 -0
- memorygraph/backends/cloud_backend.py +858 -0
- memorygraph/backends/factory.py +577 -0
- memorygraph/backends/falkordb_backend.py +749 -0
- memorygraph/backends/falkordblite_backend.py +746 -0
- memorygraph/backends/ladybugdb_backend.py +242 -0
- memorygraph/backends/memgraph_backend.py +327 -0
- memorygraph/backends/neo4j_backend.py +298 -0
- memorygraph/backends/sqlite_fallback.py +463 -0
- memorygraph/backends/turso.py +448 -0
- memorygraph/cli.py +743 -0
- memorygraph/cloud_database.py +297 -0
- memorygraph/config.py +295 -0
- memorygraph/database.py +933 -0
- memorygraph/graph_analytics.py +631 -0
- memorygraph/integration/__init__.py +69 -0
- memorygraph/integration/context_capture.py +426 -0
- memorygraph/integration/project_analysis.py +583 -0
- memorygraph/integration/workflow_tracking.py +492 -0
- memorygraph/intelligence/__init__.py +59 -0
- memorygraph/intelligence/context_retrieval.py +447 -0
- memorygraph/intelligence/entity_extraction.py +386 -0
- memorygraph/intelligence/pattern_recognition.py +420 -0
- memorygraph/intelligence/temporal.py +374 -0
- memorygraph/migration/__init__.py +27 -0
- memorygraph/migration/manager.py +579 -0
- memorygraph/migration/models.py +142 -0
- memorygraph/migration/scripts/__init__.py +17 -0
- memorygraph/migration/scripts/bitemporal_migration.py +595 -0
- memorygraph/migration/scripts/multitenancy_migration.py +452 -0
- memorygraph/migration_tools_module.py +146 -0
- memorygraph/models.py +684 -0
- memorygraph/proactive/__init__.py +46 -0
- memorygraph/proactive/outcome_learning.py +444 -0
- memorygraph/proactive/predictive.py +410 -0
- memorygraph/proactive/session_briefing.py +399 -0
- memorygraph/relationships.py +668 -0
- memorygraph/server.py +883 -0
- memorygraph/sqlite_database.py +1876 -0
- memorygraph/tools/__init__.py +59 -0
- memorygraph/tools/activity_tools.py +262 -0
- memorygraph/tools/memory_tools.py +315 -0
- memorygraph/tools/migration_tools.py +181 -0
- memorygraph/tools/relationship_tools.py +147 -0
- memorygraph/tools/search_tools.py +406 -0
- memorygraph/tools/temporal_tools.py +339 -0
- memorygraph/utils/__init__.py +10 -0
- memorygraph/utils/context_extractor.py +429 -0
- memorygraph/utils/error_handling.py +151 -0
- memorygraph/utils/export_import.py +425 -0
- memorygraph/utils/graph_algorithms.py +200 -0
- memorygraph/utils/pagination.py +149 -0
- memorygraph/utils/project_detection.py +133 -0
- memorygraphmcp-0.11.7.dist-info/METADATA +970 -0
- memorygraphmcp-0.11.7.dist-info/RECORD +65 -0
- memorygraphmcp-0.11.7.dist-info/WHEEL +4 -0
- memorygraphmcp-0.11.7.dist-info/entry_points.txt +2 -0
- memorygraphmcp-0.11.7.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,579 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Migration manager for backend-to-backend memory migration.
|
|
3
|
+
|
|
4
|
+
Provides a comprehensive migration system with validation, verification, and rollback.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
import tempfile
|
|
9
|
+
import time
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import Dict, Any, List, Optional, Callable
|
|
13
|
+
|
|
14
|
+
from .models import (
|
|
15
|
+
BackendConfig,
|
|
16
|
+
MigrationOptions,
|
|
17
|
+
MigrationResult,
|
|
18
|
+
ValidationResult,
|
|
19
|
+
VerificationResult
|
|
20
|
+
)
|
|
21
|
+
from ..backends.factory import BackendFactory
|
|
22
|
+
from ..database import MemoryDatabase
|
|
23
|
+
from ..utils.export_import import export_to_json, import_from_json
|
|
24
|
+
from ..utils.pagination import count_memories, count_relationships, paginate_memories, get_all_memories
|
|
25
|
+
from ..models import SearchQuery
|
|
26
|
+
|
|
27
|
+
logger = logging.getLogger(__name__)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class MigrationError(Exception):
|
|
31
|
+
"""Raised when migration fails."""
|
|
32
|
+
pass
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class MigrationManager:
|
|
36
|
+
"""
|
|
37
|
+
Manages backend-to-backend memory migrations.
|
|
38
|
+
|
|
39
|
+
Performs migrations in 5 phases:
|
|
40
|
+
1. Pre-flight validation (backends accessible, compatible)
|
|
41
|
+
2. Export from source
|
|
42
|
+
3. Validate export data
|
|
43
|
+
4. Import to target (if not dry-run)
|
|
44
|
+
5. Verify migration
|
|
45
|
+
|
|
46
|
+
Supports rollback on failure and dry-run mode for validation.
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
async def migrate(
|
|
50
|
+
self,
|
|
51
|
+
source_config: BackendConfig,
|
|
52
|
+
target_config: BackendConfig,
|
|
53
|
+
options: MigrationOptions
|
|
54
|
+
) -> MigrationResult:
|
|
55
|
+
"""
|
|
56
|
+
Migrate memories from source backend to target backend.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
source_config: Source backend configuration
|
|
60
|
+
target_config: Target backend configuration
|
|
61
|
+
options: Migration options (dry_run, verify, etc.)
|
|
62
|
+
|
|
63
|
+
Returns:
|
|
64
|
+
MigrationResult with statistics and any errors
|
|
65
|
+
|
|
66
|
+
Raises:
|
|
67
|
+
MigrationError: If migration fails
|
|
68
|
+
"""
|
|
69
|
+
start_time = time.time()
|
|
70
|
+
logger.info(f"Starting migration: {source_config.backend_type.value} → {target_config.backend_type.value}")
|
|
71
|
+
|
|
72
|
+
temp_export = None
|
|
73
|
+
|
|
74
|
+
try:
|
|
75
|
+
# Phase 1: Pre-flight validation
|
|
76
|
+
logger.info("Phase 1: Pre-flight validation")
|
|
77
|
+
await self._validate_source(source_config)
|
|
78
|
+
await self._validate_target(target_config)
|
|
79
|
+
await self._check_compatibility(source_config, target_config)
|
|
80
|
+
|
|
81
|
+
# Phase 2: Export from source
|
|
82
|
+
logger.info("Phase 2: Exporting from source")
|
|
83
|
+
temp_export = await self._export_from_source(source_config, options)
|
|
84
|
+
|
|
85
|
+
# Phase 3: Validate export
|
|
86
|
+
logger.info("Phase 3: Validating export")
|
|
87
|
+
validation_result = await self._validate_export(temp_export)
|
|
88
|
+
if not validation_result.valid:
|
|
89
|
+
raise MigrationError(f"Export validation failed: {validation_result.errors}")
|
|
90
|
+
|
|
91
|
+
if options.dry_run:
|
|
92
|
+
logger.info("Dry-run mode: Skipping import phase")
|
|
93
|
+
source_stats = await self._get_backend_stats(source_config)
|
|
94
|
+
return MigrationResult(
|
|
95
|
+
success=True,
|
|
96
|
+
dry_run=True,
|
|
97
|
+
source_stats=source_stats,
|
|
98
|
+
duration_seconds=time.time() - start_time
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
# Phase 4: Import to target
|
|
102
|
+
logger.info("Phase 4: Importing to target")
|
|
103
|
+
import_stats = await self._import_to_target(target_config, temp_export, options)
|
|
104
|
+
|
|
105
|
+
# Phase 5: Verify migration
|
|
106
|
+
verification_result = None
|
|
107
|
+
if options.verify:
|
|
108
|
+
logger.info("Phase 5: Verifying migration")
|
|
109
|
+
verification_result = await self._verify_migration(
|
|
110
|
+
source_config,
|
|
111
|
+
target_config,
|
|
112
|
+
temp_export
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
if not verification_result.valid and options.rollback_on_failure:
|
|
116
|
+
logger.error("Verification failed, rolling back...")
|
|
117
|
+
await self._rollback_target(target_config)
|
|
118
|
+
raise MigrationError(f"Verification failed: {verification_result.errors}")
|
|
119
|
+
|
|
120
|
+
# Phase 6: Cleanup
|
|
121
|
+
logger.info("Phase 6: Cleanup")
|
|
122
|
+
await self._cleanup_temp_files(temp_export)
|
|
123
|
+
|
|
124
|
+
source_stats = await self._get_backend_stats(source_config)
|
|
125
|
+
target_stats = await self._get_backend_stats(target_config)
|
|
126
|
+
|
|
127
|
+
logger.info("Migration completed successfully")
|
|
128
|
+
return MigrationResult(
|
|
129
|
+
success=True,
|
|
130
|
+
source_stats=source_stats,
|
|
131
|
+
target_stats=target_stats,
|
|
132
|
+
imported_memories=import_stats["imported_memories"],
|
|
133
|
+
imported_relationships=import_stats["imported_relationships"],
|
|
134
|
+
skipped_memories=import_stats["skipped_memories"],
|
|
135
|
+
verification_result=verification_result,
|
|
136
|
+
duration_seconds=time.time() - start_time
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
except Exception as e:
|
|
140
|
+
logger.error(f"Migration failed: {e}", exc_info=True)
|
|
141
|
+
|
|
142
|
+
# Cleanup on failure
|
|
143
|
+
if temp_export and temp_export.exists():
|
|
144
|
+
try:
|
|
145
|
+
await self._cleanup_temp_files(temp_export)
|
|
146
|
+
except Exception as cleanup_error:
|
|
147
|
+
logger.warning(f"Failed to cleanup temp files: {cleanup_error}")
|
|
148
|
+
|
|
149
|
+
return MigrationResult(
|
|
150
|
+
success=False,
|
|
151
|
+
duration_seconds=time.time() - start_time,
|
|
152
|
+
errors=[str(e)]
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
async def _validate_source(self, config: BackendConfig) -> None:
|
|
156
|
+
"""
|
|
157
|
+
Validate source backend is accessible and healthy.
|
|
158
|
+
|
|
159
|
+
Raises:
|
|
160
|
+
MigrationError: If source backend is not accessible
|
|
161
|
+
"""
|
|
162
|
+
# Validate configuration
|
|
163
|
+
config_errors = config.validate()
|
|
164
|
+
if config_errors:
|
|
165
|
+
raise MigrationError(f"Invalid source configuration: {', '.join(config_errors)}")
|
|
166
|
+
|
|
167
|
+
backend = await self._create_backend(config)
|
|
168
|
+
try:
|
|
169
|
+
health = await backend.health_check()
|
|
170
|
+
if not health.get("connected"):
|
|
171
|
+
raise MigrationError(f"Source backend not accessible: {health.get('error')}")
|
|
172
|
+
|
|
173
|
+
stats = health.get("statistics", {})
|
|
174
|
+
memory_count = stats.get("memory_count", 0)
|
|
175
|
+
logger.info(f"Source backend healthy: {memory_count} memories")
|
|
176
|
+
|
|
177
|
+
if memory_count == 0:
|
|
178
|
+
logger.warning("Source backend is empty")
|
|
179
|
+
|
|
180
|
+
finally:
|
|
181
|
+
await backend.disconnect()
|
|
182
|
+
|
|
183
|
+
async def _validate_target(self, config: BackendConfig) -> None:
|
|
184
|
+
"""
|
|
185
|
+
Validate target backend is accessible and writable.
|
|
186
|
+
|
|
187
|
+
Raises:
|
|
188
|
+
MigrationError: If target backend is not accessible
|
|
189
|
+
"""
|
|
190
|
+
# Validate configuration
|
|
191
|
+
config_errors = config.validate()
|
|
192
|
+
if config_errors:
|
|
193
|
+
raise MigrationError(f"Invalid target configuration: {', '.join(config_errors)}")
|
|
194
|
+
|
|
195
|
+
backend = await self._create_backend(config)
|
|
196
|
+
try:
|
|
197
|
+
health = await backend.health_check()
|
|
198
|
+
if not health.get("connected"):
|
|
199
|
+
raise MigrationError(f"Target backend not accessible: {health.get('error')}")
|
|
200
|
+
|
|
201
|
+
# Warn if target already has data
|
|
202
|
+
stats = health.get("statistics", {})
|
|
203
|
+
memory_count = stats.get("memory_count", 0)
|
|
204
|
+
if memory_count > 0:
|
|
205
|
+
logger.warning(f"Target backend already contains {memory_count} memories. Migration will add to existing data.")
|
|
206
|
+
|
|
207
|
+
logger.info("Target backend accessible and writable")
|
|
208
|
+
|
|
209
|
+
finally:
|
|
210
|
+
await backend.disconnect()
|
|
211
|
+
|
|
212
|
+
async def _check_compatibility(
|
|
213
|
+
self,
|
|
214
|
+
source_config: BackendConfig,
|
|
215
|
+
target_config: BackendConfig
|
|
216
|
+
) -> None:
|
|
217
|
+
"""
|
|
218
|
+
Check if migration between these backends is supported.
|
|
219
|
+
|
|
220
|
+
All backends use the same GraphBackend interface, so all migrations
|
|
221
|
+
are technically supported. This method checks for feature parity warnings.
|
|
222
|
+
"""
|
|
223
|
+
if source_config.backend_type == target_config.backend_type:
|
|
224
|
+
logger.warning(f"Source and target are the same backend type ({source_config.backend_type.value})")
|
|
225
|
+
|
|
226
|
+
# All backends are compatible for migration
|
|
227
|
+
logger.info("Backend compatibility check passed")
|
|
228
|
+
|
|
229
|
+
async def _export_from_source(
|
|
230
|
+
self,
|
|
231
|
+
config: BackendConfig,
|
|
232
|
+
options: MigrationOptions
|
|
233
|
+
) -> Path:
|
|
234
|
+
"""
|
|
235
|
+
Export data from source backend to temporary file.
|
|
236
|
+
|
|
237
|
+
Returns:
|
|
238
|
+
Path to temporary export file
|
|
239
|
+
|
|
240
|
+
Raises:
|
|
241
|
+
MigrationError: If export fails
|
|
242
|
+
"""
|
|
243
|
+
from ..backends.sqlite_fallback import SQLiteFallbackBackend
|
|
244
|
+
from ..backends.falkordblite_backend import FalkorDBLiteBackend
|
|
245
|
+
from ..sqlite_database import SQLiteMemoryDatabase
|
|
246
|
+
|
|
247
|
+
backend = await self._create_backend(config)
|
|
248
|
+
|
|
249
|
+
# Use SQLiteMemoryDatabase for SQLite-based backends
|
|
250
|
+
if isinstance(backend, (SQLiteFallbackBackend, FalkorDBLiteBackend)):
|
|
251
|
+
db = SQLiteMemoryDatabase(backend)
|
|
252
|
+
else:
|
|
253
|
+
db = MemoryDatabase(backend)
|
|
254
|
+
|
|
255
|
+
try:
|
|
256
|
+
# Create temp export file
|
|
257
|
+
temp_dir = Path(tempfile.gettempdir()) / "memorygraph_migration"
|
|
258
|
+
temp_dir.mkdir(exist_ok=True, parents=True)
|
|
259
|
+
export_path = temp_dir / f"migration_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
|
|
260
|
+
|
|
261
|
+
# Use universal export (from Phase 1)
|
|
262
|
+
progress_callback = self._report_progress if options.verbose else None
|
|
263
|
+
await export_to_json(db, str(export_path), progress_callback=progress_callback)
|
|
264
|
+
|
|
265
|
+
logger.info(f"Export complete: {export_path}")
|
|
266
|
+
return export_path
|
|
267
|
+
|
|
268
|
+
except Exception as e:
|
|
269
|
+
logger.error(f"Export failed: {e}")
|
|
270
|
+
raise MigrationError(f"Export failed: {e}")
|
|
271
|
+
|
|
272
|
+
finally:
|
|
273
|
+
await backend.disconnect()
|
|
274
|
+
|
|
275
|
+
async def _validate_export(self, export_path: Path) -> ValidationResult:
|
|
276
|
+
"""
|
|
277
|
+
Validate exported data integrity.
|
|
278
|
+
|
|
279
|
+
Returns:
|
|
280
|
+
ValidationResult indicating if export is valid
|
|
281
|
+
"""
|
|
282
|
+
errors = []
|
|
283
|
+
warnings = []
|
|
284
|
+
|
|
285
|
+
try:
|
|
286
|
+
# Check file exists and is readable
|
|
287
|
+
if not export_path.exists():
|
|
288
|
+
errors.append(f"Export file not found: {export_path}")
|
|
289
|
+
return ValidationResult(valid=False, errors=errors)
|
|
290
|
+
|
|
291
|
+
# Load and validate JSON structure
|
|
292
|
+
import json
|
|
293
|
+
with open(export_path, 'r') as f:
|
|
294
|
+
data = json.load(f)
|
|
295
|
+
|
|
296
|
+
# Check required fields
|
|
297
|
+
if "memories" not in data:
|
|
298
|
+
errors.append("Export missing 'memories' field")
|
|
299
|
+
if "relationships" not in data:
|
|
300
|
+
errors.append("Export missing 'relationships' field")
|
|
301
|
+
|
|
302
|
+
# Check format version
|
|
303
|
+
if "format_version" not in data and "export_version" not in data:
|
|
304
|
+
errors.append("Export missing version information")
|
|
305
|
+
|
|
306
|
+
# Check memory count
|
|
307
|
+
memory_count = len(data.get("memories", []))
|
|
308
|
+
if memory_count == 0:
|
|
309
|
+
warnings.append("Export contains zero memories")
|
|
310
|
+
else:
|
|
311
|
+
logger.info(f"Export contains {memory_count} memories")
|
|
312
|
+
|
|
313
|
+
# Check relationship count
|
|
314
|
+
relationship_count = len(data.get("relationships", []))
|
|
315
|
+
logger.info(f"Export contains {relationship_count} relationships")
|
|
316
|
+
|
|
317
|
+
except json.JSONDecodeError as e:
|
|
318
|
+
errors.append(f"Invalid JSON format: {e}")
|
|
319
|
+
except Exception as e:
|
|
320
|
+
errors.append(f"Validation failed: {e}")
|
|
321
|
+
|
|
322
|
+
return ValidationResult(
|
|
323
|
+
valid=(len(errors) == 0),
|
|
324
|
+
errors=errors,
|
|
325
|
+
warnings=warnings
|
|
326
|
+
)
|
|
327
|
+
|
|
328
|
+
async def _import_to_target(
|
|
329
|
+
self,
|
|
330
|
+
config: BackendConfig,
|
|
331
|
+
export_path: Path,
|
|
332
|
+
options: MigrationOptions
|
|
333
|
+
) -> Dict[str, int]:
|
|
334
|
+
"""
|
|
335
|
+
Import data to target backend.
|
|
336
|
+
|
|
337
|
+
Returns:
|
|
338
|
+
Dictionary with import statistics
|
|
339
|
+
|
|
340
|
+
Raises:
|
|
341
|
+
MigrationError: If import fails
|
|
342
|
+
"""
|
|
343
|
+
from ..backends.sqlite_fallback import SQLiteFallbackBackend
|
|
344
|
+
from ..backends.falkordblite_backend import FalkorDBLiteBackend
|
|
345
|
+
from ..sqlite_database import SQLiteMemoryDatabase
|
|
346
|
+
|
|
347
|
+
backend = await self._create_backend(config)
|
|
348
|
+
|
|
349
|
+
# Use SQLiteMemoryDatabase for SQLite-based backends
|
|
350
|
+
if isinstance(backend, (SQLiteFallbackBackend, FalkorDBLiteBackend)):
|
|
351
|
+
db = SQLiteMemoryDatabase(backend)
|
|
352
|
+
else:
|
|
353
|
+
db = MemoryDatabase(backend)
|
|
354
|
+
|
|
355
|
+
try:
|
|
356
|
+
# Import with progress reporting
|
|
357
|
+
progress_callback = self._report_progress if options.verbose else None
|
|
358
|
+
import_result = await import_from_json(
|
|
359
|
+
db,
|
|
360
|
+
str(export_path),
|
|
361
|
+
skip_duplicates=options.skip_duplicates,
|
|
362
|
+
progress_callback=progress_callback
|
|
363
|
+
)
|
|
364
|
+
|
|
365
|
+
logger.info(
|
|
366
|
+
f"Import complete: {import_result['imported_memories']} memories, "
|
|
367
|
+
f"{import_result['imported_relationships']} relationships"
|
|
368
|
+
)
|
|
369
|
+
|
|
370
|
+
return import_result
|
|
371
|
+
|
|
372
|
+
except Exception as e:
|
|
373
|
+
logger.error(f"Import failed: {e}")
|
|
374
|
+
raise MigrationError(f"Import failed: {e}")
|
|
375
|
+
|
|
376
|
+
finally:
|
|
377
|
+
await backend.disconnect()
|
|
378
|
+
|
|
379
|
+
async def _verify_migration(
|
|
380
|
+
self,
|
|
381
|
+
source_config: BackendConfig,
|
|
382
|
+
target_config: BackendConfig,
|
|
383
|
+
export_path: Path
|
|
384
|
+
) -> VerificationResult:
|
|
385
|
+
"""
|
|
386
|
+
Verify target backend has same data as source.
|
|
387
|
+
|
|
388
|
+
Returns:
|
|
389
|
+
VerificationResult with detailed comparison
|
|
390
|
+
"""
|
|
391
|
+
from ..backends.sqlite_fallback import SQLiteFallbackBackend
|
|
392
|
+
from ..backends.falkordblite_backend import FalkorDBLiteBackend
|
|
393
|
+
from ..sqlite_database import SQLiteMemoryDatabase
|
|
394
|
+
|
|
395
|
+
source_backend = await self._create_backend(source_config)
|
|
396
|
+
target_backend = await self._create_backend(target_config)
|
|
397
|
+
|
|
398
|
+
# Use SQLiteMemoryDatabase for SQLite-based backends
|
|
399
|
+
if isinstance(source_backend, (SQLiteFallbackBackend, FalkorDBLiteBackend)):
|
|
400
|
+
source_db = SQLiteMemoryDatabase(source_backend)
|
|
401
|
+
else:
|
|
402
|
+
source_db = MemoryDatabase(source_backend)
|
|
403
|
+
|
|
404
|
+
if isinstance(target_backend, (SQLiteFallbackBackend, FalkorDBLiteBackend)):
|
|
405
|
+
target_db = SQLiteMemoryDatabase(target_backend)
|
|
406
|
+
else:
|
|
407
|
+
target_db = MemoryDatabase(target_backend)
|
|
408
|
+
|
|
409
|
+
errors = []
|
|
410
|
+
|
|
411
|
+
try:
|
|
412
|
+
# Check memory counts
|
|
413
|
+
source_count = await self._count_memories(source_db)
|
|
414
|
+
target_count = await self._count_memories(target_db)
|
|
415
|
+
|
|
416
|
+
logger.info(f"Memory count - Source: {source_count}, Target: {target_count}")
|
|
417
|
+
|
|
418
|
+
if source_count != target_count:
|
|
419
|
+
errors.append(f"Memory count mismatch: source={source_count}, target={target_count}")
|
|
420
|
+
|
|
421
|
+
# Check relationship counts
|
|
422
|
+
source_rels = await self._count_relationships(source_db)
|
|
423
|
+
target_rels = await self._count_relationships(target_db)
|
|
424
|
+
|
|
425
|
+
logger.info(f"Relationship count - Source: {source_rels}, Target: {target_rels}")
|
|
426
|
+
|
|
427
|
+
if source_rels != target_rels:
|
|
428
|
+
errors.append(f"Relationship count mismatch: source={source_rels}, target={target_rels}")
|
|
429
|
+
|
|
430
|
+
# Sample check: verify 10 random memories
|
|
431
|
+
sample_size = min(10, source_count)
|
|
432
|
+
sample_passed = 0
|
|
433
|
+
|
|
434
|
+
if sample_size > 0:
|
|
435
|
+
sample_memories = await self._get_random_sample(source_db, sample_size)
|
|
436
|
+
for memory in sample_memories:
|
|
437
|
+
target_memory = await target_db.get_memory(memory.id, include_relationships=False)
|
|
438
|
+
if not target_memory:
|
|
439
|
+
errors.append(f"Memory {memory.id} not found in target")
|
|
440
|
+
elif target_memory.content != memory.content:
|
|
441
|
+
errors.append(f"Memory {memory.id} content mismatch")
|
|
442
|
+
else:
|
|
443
|
+
sample_passed += 1
|
|
444
|
+
|
|
445
|
+
logger.info(f"Sample verification: {sample_passed}/{sample_size} passed")
|
|
446
|
+
|
|
447
|
+
return VerificationResult(
|
|
448
|
+
valid=(len(errors) == 0),
|
|
449
|
+
errors=errors,
|
|
450
|
+
source_count=source_count,
|
|
451
|
+
target_count=target_count,
|
|
452
|
+
sample_checks=sample_size,
|
|
453
|
+
sample_passed=sample_passed
|
|
454
|
+
)
|
|
455
|
+
|
|
456
|
+
except Exception as e:
|
|
457
|
+
logger.error(f"Verification failed: {e}")
|
|
458
|
+
errors.append(f"Verification error: {e}")
|
|
459
|
+
return VerificationResult(valid=False, errors=errors)
|
|
460
|
+
|
|
461
|
+
finally:
|
|
462
|
+
await source_backend.disconnect()
|
|
463
|
+
await target_backend.disconnect()
|
|
464
|
+
|
|
465
|
+
async def _rollback_target(self, config: BackendConfig) -> None:
|
|
466
|
+
"""
|
|
467
|
+
Rollback target backend to pre-migration state.
|
|
468
|
+
|
|
469
|
+
WARNING: This deletes ALL data in target backend.
|
|
470
|
+
In future, could track imported IDs and delete only those.
|
|
471
|
+
|
|
472
|
+
Raises:
|
|
473
|
+
MigrationError: If rollback fails
|
|
474
|
+
"""
|
|
475
|
+
from ..backends.sqlite_fallback import SQLiteFallbackBackend
|
|
476
|
+
from ..backends.falkordblite_backend import FalkorDBLiteBackend
|
|
477
|
+
from ..sqlite_database import SQLiteMemoryDatabase
|
|
478
|
+
|
|
479
|
+
logger.warning("Rolling back target backend (deleting all data)...")
|
|
480
|
+
backend = await self._create_backend(config)
|
|
481
|
+
|
|
482
|
+
try:
|
|
483
|
+
# Use SQLiteMemoryDatabase for SQLite-based backends
|
|
484
|
+
if isinstance(backend, (SQLiteFallbackBackend, FalkorDBLiteBackend)):
|
|
485
|
+
db = SQLiteMemoryDatabase(backend)
|
|
486
|
+
else:
|
|
487
|
+
db = MemoryDatabase(backend)
|
|
488
|
+
|
|
489
|
+
# Delete all data
|
|
490
|
+
# Note: We use the backend's clear_all_data if available
|
|
491
|
+
if hasattr(backend, 'clear_all_data'):
|
|
492
|
+
await backend.clear_all_data()
|
|
493
|
+
else:
|
|
494
|
+
# Fallback: Delete memories one by one (cascades relationships)
|
|
495
|
+
all_memories = await get_all_memories(db)
|
|
496
|
+
for memory in all_memories:
|
|
497
|
+
await db.delete_memory(memory.id)
|
|
498
|
+
|
|
499
|
+
logger.info("Rollback complete")
|
|
500
|
+
|
|
501
|
+
except Exception as e:
|
|
502
|
+
logger.error(f"Rollback failed: {e}")
|
|
503
|
+
raise MigrationError(f"Rollback failed: {e}")
|
|
504
|
+
|
|
505
|
+
finally:
|
|
506
|
+
await backend.disconnect()
|
|
507
|
+
|
|
508
|
+
async def _create_backend(self, config: BackendConfig):
|
|
509
|
+
"""
|
|
510
|
+
Create a backend instance from configuration.
|
|
511
|
+
|
|
512
|
+
Uses thread-safe BackendFactory.create_from_config() method that doesn't
|
|
513
|
+
mutate environment variables.
|
|
514
|
+
|
|
515
|
+
Returns:
|
|
516
|
+
Connected GraphBackend instance
|
|
517
|
+
|
|
518
|
+
Raises:
|
|
519
|
+
MigrationError: If backend creation fails
|
|
520
|
+
"""
|
|
521
|
+
try:
|
|
522
|
+
# Use thread-safe factory method that accepts config directly
|
|
523
|
+
backend = await BackendFactory.create_from_config(config)
|
|
524
|
+
return backend
|
|
525
|
+
|
|
526
|
+
except Exception as e:
|
|
527
|
+
logger.error(f"Failed to create backend: {e}")
|
|
528
|
+
raise MigrationError(f"Failed to create backend: {e}")
|
|
529
|
+
|
|
530
|
+
async def _get_backend_stats(self, config: BackendConfig) -> Dict[str, Any]:
|
|
531
|
+
"""Get statistics from a backend."""
|
|
532
|
+
backend = await self._create_backend(config)
|
|
533
|
+
try:
|
|
534
|
+
health = await backend.health_check()
|
|
535
|
+
return health.get("statistics", {})
|
|
536
|
+
finally:
|
|
537
|
+
await backend.disconnect()
|
|
538
|
+
|
|
539
|
+
async def _count_memories(self, db: MemoryDatabase) -> int:
|
|
540
|
+
"""Count total memories in database."""
|
|
541
|
+
return await count_memories(db)
|
|
542
|
+
|
|
543
|
+
async def _count_relationships(self, db: MemoryDatabase) -> int:
|
|
544
|
+
"""Count total relationships in database."""
|
|
545
|
+
return await count_relationships(db)
|
|
546
|
+
|
|
547
|
+
async def _get_random_sample(self, db: MemoryDatabase, sample_size: int) -> List:
|
|
548
|
+
"""Get random sample of memories from database."""
|
|
549
|
+
import random
|
|
550
|
+
|
|
551
|
+
# Get all memories using helper
|
|
552
|
+
all_memories = await get_all_memories(db)
|
|
553
|
+
|
|
554
|
+
# Return random sample
|
|
555
|
+
if len(all_memories) <= sample_size:
|
|
556
|
+
return all_memories
|
|
557
|
+
return random.sample(all_memories, sample_size)
|
|
558
|
+
|
|
559
|
+
async def _cleanup_temp_files(self, export_path: Path) -> None:
|
|
560
|
+
"""Delete temporary export files."""
|
|
561
|
+
try:
|
|
562
|
+
if export_path.exists():
|
|
563
|
+
export_path.unlink()
|
|
564
|
+
logger.info(f"Cleaned up temporary file: {export_path}")
|
|
565
|
+
|
|
566
|
+
# Clean up empty temp directory
|
|
567
|
+
temp_dir = export_path.parent
|
|
568
|
+
if temp_dir.exists() and not any(temp_dir.iterdir()):
|
|
569
|
+
temp_dir.rmdir()
|
|
570
|
+
logger.info(f"Cleaned up temporary directory: {temp_dir}")
|
|
571
|
+
|
|
572
|
+
except Exception as e:
|
|
573
|
+
logger.warning(f"Failed to cleanup temp files: {e}")
|
|
574
|
+
|
|
575
|
+
def _report_progress(self, current: int, total: int) -> None:
|
|
576
|
+
"""Report migration progress to user (for verbose mode)."""
|
|
577
|
+
if total > 0:
|
|
578
|
+
percent = (current / total * 100)
|
|
579
|
+
logger.info(f"Progress: {current}/{total} ({percent:.1f}%)")
|