memorygraphMCP 0.11.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. memorygraph/__init__.py +50 -0
  2. memorygraph/__main__.py +12 -0
  3. memorygraph/advanced_tools.py +509 -0
  4. memorygraph/analytics/__init__.py +46 -0
  5. memorygraph/analytics/advanced_queries.py +727 -0
  6. memorygraph/backends/__init__.py +21 -0
  7. memorygraph/backends/base.py +179 -0
  8. memorygraph/backends/cloud.py +75 -0
  9. memorygraph/backends/cloud_backend.py +858 -0
  10. memorygraph/backends/factory.py +577 -0
  11. memorygraph/backends/falkordb_backend.py +749 -0
  12. memorygraph/backends/falkordblite_backend.py +746 -0
  13. memorygraph/backends/ladybugdb_backend.py +242 -0
  14. memorygraph/backends/memgraph_backend.py +327 -0
  15. memorygraph/backends/neo4j_backend.py +298 -0
  16. memorygraph/backends/sqlite_fallback.py +463 -0
  17. memorygraph/backends/turso.py +448 -0
  18. memorygraph/cli.py +743 -0
  19. memorygraph/cloud_database.py +297 -0
  20. memorygraph/config.py +295 -0
  21. memorygraph/database.py +933 -0
  22. memorygraph/graph_analytics.py +631 -0
  23. memorygraph/integration/__init__.py +69 -0
  24. memorygraph/integration/context_capture.py +426 -0
  25. memorygraph/integration/project_analysis.py +583 -0
  26. memorygraph/integration/workflow_tracking.py +492 -0
  27. memorygraph/intelligence/__init__.py +59 -0
  28. memorygraph/intelligence/context_retrieval.py +447 -0
  29. memorygraph/intelligence/entity_extraction.py +386 -0
  30. memorygraph/intelligence/pattern_recognition.py +420 -0
  31. memorygraph/intelligence/temporal.py +374 -0
  32. memorygraph/migration/__init__.py +27 -0
  33. memorygraph/migration/manager.py +579 -0
  34. memorygraph/migration/models.py +142 -0
  35. memorygraph/migration/scripts/__init__.py +17 -0
  36. memorygraph/migration/scripts/bitemporal_migration.py +595 -0
  37. memorygraph/migration/scripts/multitenancy_migration.py +452 -0
  38. memorygraph/migration_tools_module.py +146 -0
  39. memorygraph/models.py +684 -0
  40. memorygraph/proactive/__init__.py +46 -0
  41. memorygraph/proactive/outcome_learning.py +444 -0
  42. memorygraph/proactive/predictive.py +410 -0
  43. memorygraph/proactive/session_briefing.py +399 -0
  44. memorygraph/relationships.py +668 -0
  45. memorygraph/server.py +883 -0
  46. memorygraph/sqlite_database.py +1876 -0
  47. memorygraph/tools/__init__.py +59 -0
  48. memorygraph/tools/activity_tools.py +262 -0
  49. memorygraph/tools/memory_tools.py +315 -0
  50. memorygraph/tools/migration_tools.py +181 -0
  51. memorygraph/tools/relationship_tools.py +147 -0
  52. memorygraph/tools/search_tools.py +406 -0
  53. memorygraph/tools/temporal_tools.py +339 -0
  54. memorygraph/utils/__init__.py +10 -0
  55. memorygraph/utils/context_extractor.py +429 -0
  56. memorygraph/utils/error_handling.py +151 -0
  57. memorygraph/utils/export_import.py +425 -0
  58. memorygraph/utils/graph_algorithms.py +200 -0
  59. memorygraph/utils/pagination.py +149 -0
  60. memorygraph/utils/project_detection.py +133 -0
  61. memorygraphmcp-0.11.7.dist-info/METADATA +970 -0
  62. memorygraphmcp-0.11.7.dist-info/RECORD +65 -0
  63. memorygraphmcp-0.11.7.dist-info/WHEEL +4 -0
  64. memorygraphmcp-0.11.7.dist-info/entry_points.txt +2 -0
  65. memorygraphmcp-0.11.7.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,452 @@
1
+ """
2
+ 001_add_multitenancy - Migration to add multi-tenancy support.
3
+
4
+ This migration:
5
+ 1. Backfills tenant_id for existing memories (if specified)
6
+ 2. Sets visibility to 'team' for existing memories
7
+ 3. Creates multi-tenant indexes if not already present
8
+ 4. Supports rollback by removing tenant assignments
9
+
10
+ Usage:
11
+ from memorygraph.migration.scripts import migrate_to_multitenant
12
+
13
+ # Migrate with default tenant
14
+ await migrate_to_multitenant(backend, tenant_id="default")
15
+
16
+ # Rollback
17
+ await rollback_from_multitenant(backend)
18
+ """
19
+
20
+ import json
21
+ import logging
22
+ import re
23
+ from typing import Optional
24
+ from ...backends.base import GraphBackend
25
+ from ...backends.sqlite_fallback import SQLiteFallbackBackend
26
+ from ...backends.turso import TursoBackend
27
+ from ...models import DatabaseConnectionError, Memory
28
+
29
+ logger = logging.getLogger(__name__)
30
+
31
+
32
+ async def migrate_to_multitenant(
33
+ backend: GraphBackend,
34
+ tenant_id: str = "default",
35
+ dry_run: bool = False,
36
+ visibility: str = "team"
37
+ ) -> dict:
38
+ """
39
+ Migrate existing single-tenant database to multi-tenant mode.
40
+
41
+ This function backfills tenant_id and visibility fields for all existing
42
+ memories that don't have them set.
43
+
44
+ Args:
45
+ backend: Backend instance (must be connected)
46
+ tenant_id: Tenant ID to assign to existing memories (default: "default")
47
+ dry_run: If True, only report what would be changed without making changes
48
+ visibility: Visibility level to set (default: "team")
49
+
50
+ Returns:
51
+ Dictionary with migration statistics:
52
+ {
53
+ "success": bool,
54
+ "dry_run": bool,
55
+ "memories_updated": int,
56
+ "errors": list
57
+ }
58
+
59
+ Raises:
60
+ DatabaseConnectionError: If backend is not connected
61
+ ValueError: If tenant_id is empty or visibility is invalid
62
+
63
+ Example:
64
+ >>> backend = SQLiteFallbackBackend()
65
+ >>> await backend.connect()
66
+ >>> result = await migrate_to_multitenant(backend, tenant_id="acme-corp")
67
+ >>> print(f"Updated {result['memories_updated']} memories")
68
+ """
69
+ # Check backend connection (use _connected attribute for SQLite backends)
70
+ is_connected = getattr(backend, '_connected', False)
71
+ if not backend or not is_connected:
72
+ raise DatabaseConnectionError("Backend must be connected before migration")
73
+
74
+ # Validate tenant_id is not empty
75
+ if not tenant_id or not tenant_id.strip():
76
+ raise ValueError("tenant_id cannot be empty")
77
+
78
+ # Validate tenant_id format (alphanumeric, dashes, underscores only)
79
+ if not re.match(r'^[a-zA-Z0-9-_]+$', tenant_id):
80
+ raise ValueError(
81
+ "tenant_id must contain only alphanumeric characters, dashes, and underscores"
82
+ )
83
+
84
+ # Validate tenant_id length
85
+ if len(tenant_id) > 64:
86
+ raise ValueError("tenant_id must be 64 characters or less")
87
+
88
+ # Validate visibility value
89
+ valid_visibility = ["private", "project", "team", "public"]
90
+ if visibility not in valid_visibility:
91
+ raise ValueError(f"visibility must be one of {valid_visibility}, got '{visibility}'")
92
+
93
+ logger.info(f"Starting multi-tenancy migration (dry_run={dry_run})")
94
+ logger.info(f"Assigning tenant_id='{tenant_id}', visibility='{visibility}'")
95
+
96
+ errors = []
97
+ memories_updated = 0
98
+
99
+ try:
100
+ # SQLite-based backends (SQLite, Turso) - use duck typing to avoid
101
+ # isinstance issues when modules are reloaded during testing.
102
+ # SQLite backends have a 'conn' attribute for the database connection.
103
+ if hasattr(backend, 'conn') and backend.conn is not None:
104
+ memories_updated = await _migrate_sqlite_backend(
105
+ backend, tenant_id, visibility, dry_run
106
+ )
107
+
108
+ # Neo4j/Memgraph backends - use execute_query method
109
+ elif hasattr(backend, 'execute_query'):
110
+ memories_updated = await _migrate_graph_backend(
111
+ backend, tenant_id, visibility, dry_run
112
+ )
113
+
114
+ else:
115
+ raise ValueError(f"Unsupported backend type: {type(backend).__name__}")
116
+
117
+ logger.info(f"Migration completed: {memories_updated} memories processed")
118
+
119
+ return {
120
+ "success": True,
121
+ "dry_run": dry_run,
122
+ "memories_updated": memories_updated,
123
+ "tenant_id": tenant_id,
124
+ "visibility": visibility,
125
+ "errors": errors
126
+ }
127
+
128
+ except Exception as e:
129
+ logger.error(f"Migration failed: {e}")
130
+ errors.append(str(e))
131
+ return {
132
+ "success": False,
133
+ "dry_run": dry_run,
134
+ "memories_updated": memories_updated,
135
+ "tenant_id": tenant_id,
136
+ "visibility": visibility,
137
+ "errors": errors
138
+ }
139
+
140
+
141
+ async def _migrate_sqlite_backend(
142
+ backend: SQLiteFallbackBackend,
143
+ tenant_id: str,
144
+ visibility: str,
145
+ dry_run: bool
146
+ ) -> int:
147
+ """
148
+ Migrate SQLite-based backend to multi-tenant mode.
149
+
150
+ The properties are stored in a flat structure with context fields prefixed
151
+ with 'context_' (e.g., 'context_tenant_id', 'context_visibility').
152
+
153
+ Args:
154
+ backend: SQLite backend instance
155
+ tenant_id: Tenant ID to assign
156
+ visibility: Visibility level to set
157
+ dry_run: If True, only count without updating
158
+
159
+ Returns:
160
+ Number of memories updated
161
+ """
162
+ cursor = backend.conn.cursor()
163
+
164
+ # Count memories without tenant_id (using flat property structure)
165
+ # Properties use 'context_tenant_id' not 'context.tenant_id'
166
+ cursor.execute("""
167
+ SELECT COUNT(*) FROM nodes
168
+ WHERE label = 'Memory'
169
+ AND (
170
+ json_extract(properties, '$.context_tenant_id') IS NULL
171
+ OR json_extract(properties, '$.context_tenant_id') = ''
172
+ )
173
+ """)
174
+
175
+ count = cursor.fetchone()[0]
176
+ logger.info(f"Found {count} memories without tenant_id")
177
+
178
+ if dry_run:
179
+ logger.info(f"DRY RUN: Would update {count} memories")
180
+ return count
181
+
182
+ # Update memories by fetching, modifying, and updating each one
183
+ # This ensures proper JSON structure handling
184
+ cursor.execute("""
185
+ SELECT id, properties FROM nodes
186
+ WHERE label = 'Memory'
187
+ AND (
188
+ json_extract(properties, '$.context_tenant_id') IS NULL
189
+ OR json_extract(properties, '$.context_tenant_id') = ''
190
+ )
191
+ """)
192
+
193
+ updated = 0
194
+
195
+ for row in cursor.fetchall():
196
+ node_id = row[0]
197
+ properties = json.loads(row[1])
198
+
199
+ # Set tenant_id and visibility using flat property structure
200
+ properties['context_tenant_id'] = tenant_id
201
+ properties['context_visibility'] = visibility
202
+
203
+ # Update the node
204
+ cursor.execute("""
205
+ UPDATE nodes
206
+ SET properties = ?,
207
+ updated_at = CURRENT_TIMESTAMP
208
+ WHERE id = ?
209
+ """, [json.dumps(properties), node_id])
210
+
211
+ updated += 1
212
+
213
+ backend.conn.commit()
214
+ logger.info(f"Updated {updated} memories with tenant_id='{tenant_id}'")
215
+
216
+ return updated
217
+
218
+
219
+ async def _migrate_graph_backend(
220
+ backend: GraphBackend,
221
+ tenant_id: str,
222
+ visibility: str,
223
+ dry_run: bool
224
+ ) -> int:
225
+ """
226
+ Migrate graph-based backend (Neo4j/Memgraph) to multi-tenant mode.
227
+
228
+ Args:
229
+ backend: Graph backend instance
230
+ tenant_id: Tenant ID to assign
231
+ visibility: Visibility level to set
232
+ dry_run: If True, only count without updating
233
+
234
+ Returns:
235
+ Number of memories updated
236
+ """
237
+ # Count memories without tenant_id
238
+ count_query = """
239
+ MATCH (m:Memory)
240
+ WHERE m.context_tenant_id IS NULL OR m.context_tenant_id = ''
241
+ RETURN count(m) as count
242
+ """
243
+
244
+ count_result = await backend.execute_query(count_query)
245
+ count = count_result[0]['count'] if count_result else 0
246
+
247
+ logger.info(f"Found {count} memories without tenant_id")
248
+
249
+ if dry_run:
250
+ logger.info(f"DRY RUN: Would update {count} memories")
251
+ return count
252
+
253
+ # Update memories with tenant_id and visibility
254
+ update_query = """
255
+ MATCH (m:Memory)
256
+ WHERE m.context_tenant_id IS NULL OR m.context_tenant_id = ''
257
+ SET m.context_tenant_id = $tenant_id,
258
+ m.context_visibility = $visibility,
259
+ m.updated_at = timestamp()
260
+ RETURN count(m) as updated
261
+ """
262
+
263
+ result = await backend.execute_query(
264
+ update_query,
265
+ {"tenant_id": tenant_id, "visibility": visibility},
266
+ write=True
267
+ )
268
+
269
+ updated = result[0]['updated'] if result else 0
270
+ logger.info(f"Updated {updated} memories with tenant_id='{tenant_id}'")
271
+
272
+ return updated
273
+
274
+
275
+ async def rollback_from_multitenant(
276
+ backend: GraphBackend,
277
+ dry_run: bool = False
278
+ ) -> dict:
279
+ """
280
+ Rollback multi-tenancy migration by removing tenant_id assignments.
281
+
282
+ NOTE: This does not delete the tenant_id fields, it only sets them to NULL.
283
+ This preserves the option to re-enable multi-tenancy in the future.
284
+
285
+ Args:
286
+ backend: Backend instance (must be connected)
287
+ dry_run: If True, only report what would be changed
288
+
289
+ Returns:
290
+ Dictionary with rollback statistics
291
+
292
+ Example:
293
+ >>> result = await rollback_from_multitenant(backend)
294
+ >>> print(f"Rolled back {result['memories_updated']} memories")
295
+ """
296
+ # Check backend connection (use _connected attribute for SQLite backends)
297
+ is_connected = getattr(backend, '_connected', False)
298
+ if not backend or not is_connected:
299
+ raise DatabaseConnectionError("Backend must be connected before rollback")
300
+
301
+ logger.info(f"Starting multi-tenancy rollback (dry_run={dry_run})")
302
+
303
+ errors = []
304
+ memories_updated = 0
305
+
306
+ try:
307
+ # SQLite-based backends - use duck typing (check for conn attribute)
308
+ if hasattr(backend, 'conn') and backend.conn is not None:
309
+ memories_updated = await _rollback_sqlite_backend(backend, dry_run)
310
+
311
+ # Graph backends - use execute_query method
312
+ elif hasattr(backend, 'execute_query'):
313
+ memories_updated = await _rollback_graph_backend(backend, dry_run)
314
+
315
+ else:
316
+ raise ValueError(f"Unsupported backend type: {type(backend).__name__}")
317
+
318
+ logger.info(f"Rollback completed: {memories_updated} memories processed")
319
+
320
+ return {
321
+ "success": True,
322
+ "dry_run": dry_run,
323
+ "memories_updated": memories_updated,
324
+ "errors": errors
325
+ }
326
+
327
+ except Exception as e:
328
+ logger.error(f"Rollback failed: {e}")
329
+ errors.append(str(e))
330
+ return {
331
+ "success": False,
332
+ "dry_run": dry_run,
333
+ "memories_updated": memories_updated,
334
+ "errors": errors
335
+ }
336
+
337
+
338
+ async def _rollback_sqlite_backend(
339
+ backend: SQLiteFallbackBackend,
340
+ dry_run: bool
341
+ ) -> int:
342
+ """
343
+ Rollback SQLite backend from multi-tenant mode.
344
+
345
+ The properties are stored in a flat structure with context fields prefixed
346
+ with 'context_' (e.g., 'context_tenant_id', 'context_visibility').
347
+
348
+ Args:
349
+ backend: SQLite backend instance
350
+ dry_run: If True, only count without updating
351
+
352
+ Returns:
353
+ Number of memories updated
354
+ """
355
+ cursor = backend.conn.cursor()
356
+
357
+ # Count memories with tenant_id (using flat property structure)
358
+ cursor.execute("""
359
+ SELECT COUNT(*) FROM nodes
360
+ WHERE label = 'Memory'
361
+ AND json_extract(properties, '$.context_tenant_id') IS NOT NULL
362
+ AND json_extract(properties, '$.context_tenant_id') != ''
363
+ """)
364
+
365
+ count = cursor.fetchone()[0]
366
+ logger.info(f"Found {count} memories with tenant_id")
367
+
368
+ if dry_run:
369
+ logger.info(f"DRY RUN: Would clear tenant_id from {count} memories")
370
+ return count
371
+
372
+ # Clear tenant_id from memories
373
+ cursor.execute("""
374
+ SELECT id, properties FROM nodes
375
+ WHERE label = 'Memory'
376
+ AND json_extract(properties, '$.context_tenant_id') IS NOT NULL
377
+ AND json_extract(properties, '$.context_tenant_id') != ''
378
+ """)
379
+
380
+ updated = 0
381
+
382
+ for row in cursor.fetchall():
383
+ node_id = row[0]
384
+ properties = json.loads(row[1])
385
+
386
+ # Clear tenant_id (set to NULL) using flat property structure
387
+ properties['context_tenant_id'] = None
388
+ # Reset visibility to default
389
+ properties['context_visibility'] = 'project'
390
+
391
+ # Update the node
392
+ cursor.execute("""
393
+ UPDATE nodes
394
+ SET properties = ?,
395
+ updated_at = CURRENT_TIMESTAMP
396
+ WHERE id = ?
397
+ """, [json.dumps(properties), node_id])
398
+
399
+ updated += 1
400
+
401
+ backend.conn.commit()
402
+ logger.info(f"Cleared tenant_id from {updated} memories")
403
+
404
+ return updated
405
+
406
+
407
+ async def _rollback_graph_backend(
408
+ backend: GraphBackend,
409
+ dry_run: bool
410
+ ) -> int:
411
+ """
412
+ Rollback graph backend from multi-tenant mode.
413
+
414
+ Args:
415
+ backend: Graph backend instance
416
+ dry_run: If True, only count without updating
417
+
418
+ Returns:
419
+ Number of memories updated
420
+ """
421
+ # Count memories with tenant_id
422
+ count_query = """
423
+ MATCH (m:Memory)
424
+ WHERE m.context_tenant_id IS NOT NULL
425
+ RETURN count(m) as count
426
+ """
427
+
428
+ count_result = await backend.execute_query(count_query)
429
+ count = count_result[0]['count'] if count_result else 0
430
+
431
+ logger.info(f"Found {count} memories with tenant_id")
432
+
433
+ if dry_run:
434
+ logger.info(f"DRY RUN: Would clear tenant_id from {count} memories")
435
+ return count
436
+
437
+ # Clear tenant_id from memories
438
+ update_query = """
439
+ MATCH (m:Memory)
440
+ WHERE m.context_tenant_id IS NOT NULL
441
+ SET m.context_tenant_id = NULL,
442
+ m.context_visibility = 'project',
443
+ m.updated_at = timestamp()
444
+ RETURN count(m) as updated
445
+ """
446
+
447
+ result = await backend.execute_query(update_query, write=True)
448
+ updated = result[0]['updated'] if result else 0
449
+
450
+ logger.info(f"Cleared tenant_id from {updated} memories")
451
+
452
+ return updated
@@ -0,0 +1,146 @@
1
+ """
2
+ Migration tools module - MCP tool definitions and handlers for database migration.
3
+ """
4
+
5
+ from mcp.types import Tool
6
+ from .tools.migration_tools import handle_migrate_database, handle_validate_migration
7
+
8
+ # Tool definitions for MCP
9
+ MIGRATION_TOOLS = [
10
+ Tool(
11
+ name="migrate_database",
12
+ description="""Migrate memories from current backend to another backend (e.g., SQLite → FalkorDB).
13
+
14
+ WHEN TO USE:
15
+ - Moving from development (SQLite) to production (FalkorDB, Neo4j)
16
+ - Switching backend providers
17
+ - Disaster recovery to different backend
18
+ - Testing performance across backends
19
+ - Backend consolidation or splitting
20
+
21
+ HOW TO USE:
22
+ - Always use dry_run=True first to validate
23
+ - Specify target_backend type (sqlite, neo4j, memgraph, falkordb, falkordblite)
24
+ - Provide target_config with connection details
25
+ - Set verify=True to ensure data integrity
26
+ - Migration includes memories and relationships
27
+
28
+ SAFETY FEATURES:
29
+ - Dry-run mode validates without changes
30
+ - Verification checks data integrity
31
+ - Automatic rollback on failure
32
+ - Progress reporting for large migrations
33
+
34
+ EXAMPLES:
35
+ - Validate: migrate_database(target_backend="falkordb", target_config={"uri": "redis://prod:6379"}, dry_run=True)
36
+ - Migrate: migrate_database(target_backend="falkordb", target_config={"uri": "redis://prod:6379"}, verify=True)
37
+ - Test: migrate_database(target_backend="sqlite", target_config={"path": "/tmp/test.db"})
38
+
39
+ RETURNS:
40
+ - success: Boolean indicating if migration succeeded
41
+ - imported_memories: Number of memories migrated
42
+ - imported_relationships: Number of relationships migrated
43
+ - verification: Data integrity check results
44
+ - errors: Any errors encountered""",
45
+ inputSchema={
46
+ "type": "object",
47
+ "properties": {
48
+ "target_backend": {
49
+ "type": "string",
50
+ "enum": ["sqlite", "neo4j", "memgraph", "falkordb", "falkordblite"],
51
+ "description": "Target backend type to migrate to"
52
+ },
53
+ "target_config": {
54
+ "type": "object",
55
+ "properties": {
56
+ "path": {
57
+ "type": "string",
58
+ "description": "Database path (for sqlite/falkordblite)"
59
+ },
60
+ "uri": {
61
+ "type": "string",
62
+ "description": "Database URI (for neo4j/memgraph/falkordb)"
63
+ },
64
+ "username": {
65
+ "type": "string",
66
+ "description": "Database username (optional)"
67
+ },
68
+ "password": {
69
+ "type": "string",
70
+ "description": "Database password (optional)"
71
+ },
72
+ "database": {
73
+ "type": "string",
74
+ "description": "Database name (optional)"
75
+ }
76
+ },
77
+ "description": "Target backend configuration"
78
+ },
79
+ "dry_run": {
80
+ "type": "boolean",
81
+ "default": False,
82
+ "description": "Validate without making changes (RECOMMENDED: use true first)"
83
+ },
84
+ "skip_duplicates": {
85
+ "type": "boolean",
86
+ "default": True,
87
+ "description": "Skip memories that already exist in target"
88
+ },
89
+ "verify": {
90
+ "type": "boolean",
91
+ "default": True,
92
+ "description": "Verify data integrity after migration"
93
+ }
94
+ },
95
+ "required": ["target_backend"]
96
+ }
97
+ ),
98
+ Tool(
99
+ name="validate_migration",
100
+ description="""Validate that migration to target backend would succeed without making changes.
101
+
102
+ This is a convenience wrapper for migrate_database with dry_run=True.
103
+
104
+ WHEN TO USE:
105
+ - Before running actual migration
106
+ - Checking if target backend is accessible
107
+ - Estimating migration size and duration
108
+ - Validating target configuration
109
+
110
+ CHECKS PERFORMED:
111
+ - Source backend accessible
112
+ - Target backend accessible
113
+ - Backend compatibility
114
+ - Configuration validity
115
+ - Data export feasibility
116
+
117
+ EXAMPLES:
118
+ - validate_migration(target_backend="falkordb", target_config={"uri": "redis://prod:6379"})
119
+ - validate_migration(target_backend="neo4j", target_config={"uri": "bolt://localhost:7687", "username": "neo4j", "password": "password"})
120
+
121
+ RETURNS:
122
+ - Same as migrate_database but with dry_run=True
123
+ - No data is written to target""",
124
+ inputSchema={
125
+ "type": "object",
126
+ "properties": {
127
+ "target_backend": {
128
+ "type": "string",
129
+ "enum": ["sqlite", "neo4j", "memgraph", "falkordb", "falkordblite"],
130
+ "description": "Target backend type to validate migration to"
131
+ },
132
+ "target_config": {
133
+ "type": "object",
134
+ "description": "Target backend configuration"
135
+ }
136
+ },
137
+ "required": ["target_backend"]
138
+ }
139
+ )
140
+ ]
141
+
142
+ # Tool handlers mapping
143
+ MIGRATION_TOOL_HANDLERS = {
144
+ "migrate_database": handle_migrate_database,
145
+ "validate_migration": handle_validate_migration
146
+ }