memorygraphMCP 0.11.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- memorygraph/__init__.py +50 -0
- memorygraph/__main__.py +12 -0
- memorygraph/advanced_tools.py +509 -0
- memorygraph/analytics/__init__.py +46 -0
- memorygraph/analytics/advanced_queries.py +727 -0
- memorygraph/backends/__init__.py +21 -0
- memorygraph/backends/base.py +179 -0
- memorygraph/backends/cloud.py +75 -0
- memorygraph/backends/cloud_backend.py +858 -0
- memorygraph/backends/factory.py +577 -0
- memorygraph/backends/falkordb_backend.py +749 -0
- memorygraph/backends/falkordblite_backend.py +746 -0
- memorygraph/backends/ladybugdb_backend.py +242 -0
- memorygraph/backends/memgraph_backend.py +327 -0
- memorygraph/backends/neo4j_backend.py +298 -0
- memorygraph/backends/sqlite_fallback.py +463 -0
- memorygraph/backends/turso.py +448 -0
- memorygraph/cli.py +743 -0
- memorygraph/cloud_database.py +297 -0
- memorygraph/config.py +295 -0
- memorygraph/database.py +933 -0
- memorygraph/graph_analytics.py +631 -0
- memorygraph/integration/__init__.py +69 -0
- memorygraph/integration/context_capture.py +426 -0
- memorygraph/integration/project_analysis.py +583 -0
- memorygraph/integration/workflow_tracking.py +492 -0
- memorygraph/intelligence/__init__.py +59 -0
- memorygraph/intelligence/context_retrieval.py +447 -0
- memorygraph/intelligence/entity_extraction.py +386 -0
- memorygraph/intelligence/pattern_recognition.py +420 -0
- memorygraph/intelligence/temporal.py +374 -0
- memorygraph/migration/__init__.py +27 -0
- memorygraph/migration/manager.py +579 -0
- memorygraph/migration/models.py +142 -0
- memorygraph/migration/scripts/__init__.py +17 -0
- memorygraph/migration/scripts/bitemporal_migration.py +595 -0
- memorygraph/migration/scripts/multitenancy_migration.py +452 -0
- memorygraph/migration_tools_module.py +146 -0
- memorygraph/models.py +684 -0
- memorygraph/proactive/__init__.py +46 -0
- memorygraph/proactive/outcome_learning.py +444 -0
- memorygraph/proactive/predictive.py +410 -0
- memorygraph/proactive/session_briefing.py +399 -0
- memorygraph/relationships.py +668 -0
- memorygraph/server.py +883 -0
- memorygraph/sqlite_database.py +1876 -0
- memorygraph/tools/__init__.py +59 -0
- memorygraph/tools/activity_tools.py +262 -0
- memorygraph/tools/memory_tools.py +315 -0
- memorygraph/tools/migration_tools.py +181 -0
- memorygraph/tools/relationship_tools.py +147 -0
- memorygraph/tools/search_tools.py +406 -0
- memorygraph/tools/temporal_tools.py +339 -0
- memorygraph/utils/__init__.py +10 -0
- memorygraph/utils/context_extractor.py +429 -0
- memorygraph/utils/error_handling.py +151 -0
- memorygraph/utils/export_import.py +425 -0
- memorygraph/utils/graph_algorithms.py +200 -0
- memorygraph/utils/pagination.py +149 -0
- memorygraph/utils/project_detection.py +133 -0
- memorygraphmcp-0.11.7.dist-info/METADATA +970 -0
- memorygraphmcp-0.11.7.dist-info/RECORD +65 -0
- memorygraphmcp-0.11.7.dist-info/WHEEL +4 -0
- memorygraphmcp-0.11.7.dist-info/entry_points.txt +2 -0
- memorygraphmcp-0.11.7.dist-info/licenses/LICENSE +21 -0
memorygraph/cli.py
ADDED
|
@@ -0,0 +1,743 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Command-line interface for MemoryGraph MCP Server.
|
|
3
|
+
|
|
4
|
+
Provides easy server startup with configuration options for AI coding agents.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import argparse
|
|
8
|
+
import asyncio
|
|
9
|
+
import json
|
|
10
|
+
import logging
|
|
11
|
+
import os
|
|
12
|
+
import sys
|
|
13
|
+
from datetime import datetime, timezone
|
|
14
|
+
from typing import Optional
|
|
15
|
+
|
|
16
|
+
from . import __version__
|
|
17
|
+
from .config import Config, BackendType, TOOL_PROFILES
|
|
18
|
+
from .server import main as server_main
|
|
19
|
+
|
|
20
|
+
logger = logging.getLogger(__name__)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
async def handle_export(args: argparse.Namespace) -> None:
|
|
24
|
+
"""Handle export command - works with all backends."""
|
|
25
|
+
import time
|
|
26
|
+
from .backends.factory import BackendFactory
|
|
27
|
+
from .sqlite_database import SQLiteMemoryDatabase
|
|
28
|
+
from .database import MemoryDatabase
|
|
29
|
+
from .backends.sqlite_fallback import SQLiteFallbackBackend
|
|
30
|
+
from .utils.export_import import export_to_json, export_to_markdown
|
|
31
|
+
|
|
32
|
+
try:
|
|
33
|
+
# Connect to database
|
|
34
|
+
backend = await BackendFactory.create_backend()
|
|
35
|
+
backend_name = backend.backend_name()
|
|
36
|
+
|
|
37
|
+
print(f"\nš¤ Exporting memories from {backend_name} backend...")
|
|
38
|
+
|
|
39
|
+
# Create appropriate database wrapper
|
|
40
|
+
if isinstance(backend, SQLiteFallbackBackend):
|
|
41
|
+
db = SQLiteMemoryDatabase(backend)
|
|
42
|
+
else:
|
|
43
|
+
db = MemoryDatabase(backend)
|
|
44
|
+
|
|
45
|
+
start_time = time.time()
|
|
46
|
+
|
|
47
|
+
# Perform export with progress tracking
|
|
48
|
+
if args.format == "json":
|
|
49
|
+
result = await export_to_json(db, args.output)
|
|
50
|
+
duration = time.time() - start_time
|
|
51
|
+
|
|
52
|
+
print(f"\nā
Export complete!")
|
|
53
|
+
print(f" Backend: {result.get('backend_type', backend_name)}")
|
|
54
|
+
print(f" Output: {args.output}")
|
|
55
|
+
print(f" Memories: {result['memory_count']}")
|
|
56
|
+
print(f" Relationships: {result['relationship_count']}")
|
|
57
|
+
print(f" Duration: {duration:.1f} seconds")
|
|
58
|
+
|
|
59
|
+
elif args.format == "markdown":
|
|
60
|
+
await export_to_markdown(db, args.output)
|
|
61
|
+
duration = time.time() - start_time
|
|
62
|
+
|
|
63
|
+
print(f"\nā
Export complete!")
|
|
64
|
+
print(f" Backend: {backend_name}")
|
|
65
|
+
print(f" Output: {args.output}/")
|
|
66
|
+
print(f" Duration: {duration:.1f} seconds")
|
|
67
|
+
|
|
68
|
+
await backend.disconnect()
|
|
69
|
+
|
|
70
|
+
except Exception as e:
|
|
71
|
+
print(f"ā Export failed: {e}")
|
|
72
|
+
logger.error(f"Export failed: {e}", exc_info=True)
|
|
73
|
+
sys.exit(1)
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
async def handle_import(args: argparse.Namespace) -> None:
|
|
77
|
+
"""Handle import command - works with all backends."""
|
|
78
|
+
import time
|
|
79
|
+
from .backends.factory import BackendFactory
|
|
80
|
+
from .sqlite_database import SQLiteMemoryDatabase
|
|
81
|
+
from .database import MemoryDatabase
|
|
82
|
+
from .backends.sqlite_fallback import SQLiteFallbackBackend
|
|
83
|
+
from .utils.export_import import import_from_json
|
|
84
|
+
|
|
85
|
+
try:
|
|
86
|
+
# Connect to database
|
|
87
|
+
backend = await BackendFactory.create_backend()
|
|
88
|
+
backend_name = backend.backend_name()
|
|
89
|
+
|
|
90
|
+
print(f"\nš„ Importing memories to {backend_name} backend...")
|
|
91
|
+
|
|
92
|
+
# Create appropriate database wrapper
|
|
93
|
+
if isinstance(backend, SQLiteFallbackBackend):
|
|
94
|
+
db = SQLiteMemoryDatabase(backend)
|
|
95
|
+
else:
|
|
96
|
+
db = MemoryDatabase(backend)
|
|
97
|
+
|
|
98
|
+
await db.initialize_schema()
|
|
99
|
+
|
|
100
|
+
start_time = time.time()
|
|
101
|
+
|
|
102
|
+
# Perform import
|
|
103
|
+
if args.format == "json":
|
|
104
|
+
result = await import_from_json(db, args.input, skip_duplicates=args.skip_duplicates)
|
|
105
|
+
duration = time.time() - start_time
|
|
106
|
+
|
|
107
|
+
print(f"\nā
Import complete!")
|
|
108
|
+
print(f" Backend: {backend_name}")
|
|
109
|
+
print(f" Imported: {result['imported_memories']} memories, {result['imported_relationships']} relationships")
|
|
110
|
+
if result['skipped_memories'] > 0 or result['skipped_relationships'] > 0:
|
|
111
|
+
print(f" Skipped: {result['skipped_memories']} memories, {result['skipped_relationships']} relationships")
|
|
112
|
+
print(f" Duration: {duration:.1f} seconds")
|
|
113
|
+
|
|
114
|
+
await backend.disconnect()
|
|
115
|
+
|
|
116
|
+
except Exception as e:
|
|
117
|
+
print(f"ā Import failed: {e}")
|
|
118
|
+
logger.error(f"Import failed: {e}", exc_info=True)
|
|
119
|
+
sys.exit(1)
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
async def handle_migrate(args: argparse.Namespace) -> None:
|
|
123
|
+
"""Handle migrate command."""
|
|
124
|
+
from .migration.manager import MigrationManager
|
|
125
|
+
from .migration.models import BackendConfig, MigrationOptions
|
|
126
|
+
|
|
127
|
+
print(f"\nš Migrating memories: {args.source_backend or 'current'} ā {args.target_backend}")
|
|
128
|
+
|
|
129
|
+
try:
|
|
130
|
+
# Build source config
|
|
131
|
+
if args.source_backend:
|
|
132
|
+
source_config = BackendConfig(
|
|
133
|
+
backend_type=BackendType(args.source_backend),
|
|
134
|
+
path=args.from_path,
|
|
135
|
+
uri=args.from_uri
|
|
136
|
+
)
|
|
137
|
+
else:
|
|
138
|
+
source_config = BackendConfig.from_env()
|
|
139
|
+
|
|
140
|
+
# Build target config
|
|
141
|
+
target_config = BackendConfig(
|
|
142
|
+
backend_type=BackendType(args.target_backend),
|
|
143
|
+
path=args.to_path,
|
|
144
|
+
uri=args.to_uri
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
# Build options
|
|
148
|
+
options = MigrationOptions(
|
|
149
|
+
dry_run=args.dry_run,
|
|
150
|
+
verbose=args.verbose,
|
|
151
|
+
skip_duplicates=args.skip_duplicates,
|
|
152
|
+
verify=not args.no_verify,
|
|
153
|
+
rollback_on_failure=True
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
# Perform migration
|
|
157
|
+
manager = MigrationManager()
|
|
158
|
+
result = await manager.migrate(source_config, target_config, options)
|
|
159
|
+
|
|
160
|
+
# Display results
|
|
161
|
+
if result.dry_run:
|
|
162
|
+
print("\nā
Dry-run successful - migration would proceed safely")
|
|
163
|
+
if result.source_stats:
|
|
164
|
+
memory_count = result.source_stats.get('memory_count', 0)
|
|
165
|
+
print(f" Source: {memory_count} memories")
|
|
166
|
+
if result.errors:
|
|
167
|
+
print("\nā ļø Warnings:")
|
|
168
|
+
for error in result.errors:
|
|
169
|
+
print(f" - {error}")
|
|
170
|
+
|
|
171
|
+
elif result.success:
|
|
172
|
+
print("\nā
Migration completed successfully!")
|
|
173
|
+
print(f" Migrated: {result.imported_memories} memories")
|
|
174
|
+
print(f" Migrated: {result.imported_relationships} relationships")
|
|
175
|
+
if result.skipped_memories > 0:
|
|
176
|
+
print(f" Skipped: {result.skipped_memories} duplicates")
|
|
177
|
+
print(f" Duration: {result.duration_seconds:.1f} seconds")
|
|
178
|
+
|
|
179
|
+
if result.verification_result and result.verification_result.valid:
|
|
180
|
+
print(f"\nā Verification passed:")
|
|
181
|
+
print(f" Source: {result.verification_result.source_count} memories")
|
|
182
|
+
print(f" Target: {result.verification_result.target_count} memories")
|
|
183
|
+
print(f" Sample check: {result.verification_result.sample_passed}/{result.verification_result.sample_checks} passed")
|
|
184
|
+
|
|
185
|
+
else:
|
|
186
|
+
print("\nā Migration failed!")
|
|
187
|
+
for error in result.errors:
|
|
188
|
+
print(f" - {error}")
|
|
189
|
+
sys.exit(1)
|
|
190
|
+
|
|
191
|
+
except Exception as e:
|
|
192
|
+
print(f"ā Migration failed: {e}")
|
|
193
|
+
logger.error(f"Migration failed: {e}", exc_info=True)
|
|
194
|
+
sys.exit(1)
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
async def handle_migrate_multitenant(args: argparse.Namespace) -> None:
|
|
198
|
+
"""Handle migrate-to-multitenant command."""
|
|
199
|
+
from .backends.factory import BackendFactory
|
|
200
|
+
from .migration.scripts import migrate_to_multitenant, rollback_from_multitenant
|
|
201
|
+
|
|
202
|
+
try:
|
|
203
|
+
# Connect to backend
|
|
204
|
+
backend = await BackendFactory.create_backend()
|
|
205
|
+
backend_name = backend.backend_name()
|
|
206
|
+
|
|
207
|
+
if args.rollback:
|
|
208
|
+
print(f"\nš Rolling back multi-tenancy migration on {backend_name}...")
|
|
209
|
+
|
|
210
|
+
result = await rollback_from_multitenant(backend, dry_run=args.dry_run)
|
|
211
|
+
|
|
212
|
+
if result['dry_run']:
|
|
213
|
+
print("\nā
Dry-run successful - rollback would proceed safely")
|
|
214
|
+
print(f" Would clear tenant_id from: {result['memories_updated']} memories")
|
|
215
|
+
elif result['success']:
|
|
216
|
+
print("\nā
Rollback completed successfully!")
|
|
217
|
+
print(f" Cleared tenant_id from: {result['memories_updated']} memories")
|
|
218
|
+
else:
|
|
219
|
+
print("\nā Rollback failed!")
|
|
220
|
+
for error in result['errors']:
|
|
221
|
+
print(f" - {error}")
|
|
222
|
+
sys.exit(1)
|
|
223
|
+
|
|
224
|
+
else:
|
|
225
|
+
# Migrate to multi-tenant
|
|
226
|
+
print(f"\nš Migrating to multi-tenant mode on {backend_name}...")
|
|
227
|
+
print(f" Tenant ID: {args.tenant_id}")
|
|
228
|
+
print(f" Visibility: {args.visibility}")
|
|
229
|
+
|
|
230
|
+
result = await migrate_to_multitenant(
|
|
231
|
+
backend,
|
|
232
|
+
tenant_id=args.tenant_id,
|
|
233
|
+
dry_run=args.dry_run,
|
|
234
|
+
visibility=args.visibility
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
if result['dry_run']:
|
|
238
|
+
print("\nā
Dry-run successful - migration would proceed safely")
|
|
239
|
+
print(f" Would update: {result['memories_updated']} memories")
|
|
240
|
+
print(f" Tenant ID would be: {result['tenant_id']}")
|
|
241
|
+
print(f" Visibility would be: {result['visibility']}")
|
|
242
|
+
elif result['success']:
|
|
243
|
+
print("\nā
Migration completed successfully!")
|
|
244
|
+
print(f" Updated: {result['memories_updated']} memories")
|
|
245
|
+
print(f" Tenant ID: {result['tenant_id']}")
|
|
246
|
+
print(f" Visibility: {result['visibility']}")
|
|
247
|
+
print("\nNext steps:")
|
|
248
|
+
print(f" 1. Set MEMORY_MULTI_TENANT_MODE=true in your environment")
|
|
249
|
+
print(f" 2. Restart the server to enable multi-tenant indexes")
|
|
250
|
+
else:
|
|
251
|
+
print("\nā Migration failed!")
|
|
252
|
+
for error in result['errors']:
|
|
253
|
+
print(f" - {error}")
|
|
254
|
+
sys.exit(1)
|
|
255
|
+
|
|
256
|
+
await backend.disconnect()
|
|
257
|
+
|
|
258
|
+
except Exception as e:
|
|
259
|
+
print(f"ā Migration failed: {e}")
|
|
260
|
+
logger.error(f"Migration failed: {e}", exc_info=True)
|
|
261
|
+
sys.exit(1)
|
|
262
|
+
|
|
263
|
+
|
|
264
|
+
async def perform_health_check(timeout: float = 5.0) -> dict:
|
|
265
|
+
"""
|
|
266
|
+
Perform health check on the backend and return status information.
|
|
267
|
+
|
|
268
|
+
Args:
|
|
269
|
+
timeout: Maximum time in seconds to wait for health check (default: 5.0)
|
|
270
|
+
|
|
271
|
+
Returns:
|
|
272
|
+
Dictionary containing health check results:
|
|
273
|
+
- status: "healthy" or "unhealthy"
|
|
274
|
+
- connected: bool indicating if backend is connected
|
|
275
|
+
- backend_type: str with backend type (e.g., "sqlite", "neo4j")
|
|
276
|
+
- version: str with backend version (if available)
|
|
277
|
+
- statistics: dict with database statistics (if available)
|
|
278
|
+
- timestamp: ISO format timestamp of the check
|
|
279
|
+
- error: str with error message (if unhealthy)
|
|
280
|
+
"""
|
|
281
|
+
from .backends.factory import BackendFactory
|
|
282
|
+
|
|
283
|
+
result = {
|
|
284
|
+
"status": "unhealthy",
|
|
285
|
+
"connected": False,
|
|
286
|
+
"backend_type": "unknown",
|
|
287
|
+
"timestamp": datetime.now(timezone.utc).isoformat()
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
try:
|
|
291
|
+
# Create backend with timeout
|
|
292
|
+
backend = await asyncio.wait_for(
|
|
293
|
+
BackendFactory.create_backend(),
|
|
294
|
+
timeout=timeout
|
|
295
|
+
)
|
|
296
|
+
|
|
297
|
+
# Run health check
|
|
298
|
+
health_info = await asyncio.wait_for(
|
|
299
|
+
backend.health_check(),
|
|
300
|
+
timeout=timeout
|
|
301
|
+
)
|
|
302
|
+
|
|
303
|
+
# Update result with health check information
|
|
304
|
+
result.update(health_info)
|
|
305
|
+
|
|
306
|
+
# Determine overall status
|
|
307
|
+
if health_info.get("connected", False):
|
|
308
|
+
result["status"] = "healthy"
|
|
309
|
+
else:
|
|
310
|
+
result["status"] = "unhealthy"
|
|
311
|
+
if "error" not in result:
|
|
312
|
+
result["error"] = "Backend reports disconnected status"
|
|
313
|
+
|
|
314
|
+
# Clean up
|
|
315
|
+
await backend.disconnect()
|
|
316
|
+
|
|
317
|
+
except asyncio.TimeoutError:
|
|
318
|
+
result["error"] = f"Health check timed out after {timeout} seconds"
|
|
319
|
+
result["status"] = "unhealthy"
|
|
320
|
+
logger.error(f"Health check timeout after {timeout}s")
|
|
321
|
+
|
|
322
|
+
except Exception as e:
|
|
323
|
+
result["error"] = str(e)
|
|
324
|
+
result["status"] = "unhealthy"
|
|
325
|
+
logger.error(f"Health check failed: {e}", exc_info=True)
|
|
326
|
+
|
|
327
|
+
return result
|
|
328
|
+
|
|
329
|
+
|
|
330
|
+
def print_config_summary() -> None:
|
|
331
|
+
"""Print current configuration summary."""
|
|
332
|
+
config = Config.get_config_summary()
|
|
333
|
+
|
|
334
|
+
print("\nš Current Configuration:")
|
|
335
|
+
print(f" Backend: {config['backend']}")
|
|
336
|
+
print(f" Tool Profile: {Config.TOOL_PROFILE}")
|
|
337
|
+
print(f" Log Level: {config['logging']['level']}")
|
|
338
|
+
|
|
339
|
+
if config['backend'] in ['neo4j', 'auto']:
|
|
340
|
+
print(f"\n Neo4j URI: {config['neo4j']['uri']}")
|
|
341
|
+
print(f" Neo4j User: {config['neo4j']['user']}")
|
|
342
|
+
print(f" Neo4j Password: {'ā Configured' if config['neo4j']['password_configured'] else 'ā Not set'}")
|
|
343
|
+
|
|
344
|
+
if config['backend'] in ['memgraph', 'auto']:
|
|
345
|
+
print(f"\n Memgraph URI: {config['memgraph']['uri']}")
|
|
346
|
+
|
|
347
|
+
if config['backend'] in ['sqlite', 'auto']:
|
|
348
|
+
print(f"\n SQLite Path: {config['sqlite']['path']}")
|
|
349
|
+
|
|
350
|
+
if config['backend'] in ['turso', 'auto']:
|
|
351
|
+
print(f"\n Turso URL: {config['turso']['database_url'] or 'ā Not set'}")
|
|
352
|
+
print(f" Turso Token: {'ā Configured' if config['turso']['auth_token_configured'] else 'ā Not set'}")
|
|
353
|
+
print(f" Turso Local Path: {config['turso']['path']}")
|
|
354
|
+
|
|
355
|
+
if config['backend'] in ['cloud', 'auto']:
|
|
356
|
+
print(f"\n Cloud API URL: {config['cloud']['api_url']}")
|
|
357
|
+
print(f" Cloud API Key: {'ā Configured' if config['cloud']['api_key_configured'] else 'ā Not set'}")
|
|
358
|
+
print(f" Cloud Timeout: {config['cloud']['timeout']}s")
|
|
359
|
+
|
|
360
|
+
if config['backend'] in ['falkordb', 'auto']:
|
|
361
|
+
print(f"\n FalkorDB: Client-server mode")
|
|
362
|
+
print(f" Note: Configuration via MEMORY_FALKORDB_* environment variables")
|
|
363
|
+
|
|
364
|
+
if config['backend'] in ['falkordblite', 'auto']:
|
|
365
|
+
print(f"\n FalkorDBLite: Embedded database")
|
|
366
|
+
print(f" Note: Configuration via MEMORY_FALKORDBLITE_PATH environment variable")
|
|
367
|
+
|
|
368
|
+
print()
|
|
369
|
+
|
|
370
|
+
|
|
371
|
+
def validate_backend(backend: str) -> None:
|
|
372
|
+
"""Validate backend choice."""
|
|
373
|
+
valid_backends = [b.value for b in BackendType]
|
|
374
|
+
if backend not in valid_backends:
|
|
375
|
+
print(f"Error: Invalid backend '{backend}'")
|
|
376
|
+
print(f"Valid options: {', '.join(valid_backends)}")
|
|
377
|
+
sys.exit(1)
|
|
378
|
+
|
|
379
|
+
|
|
380
|
+
def validate_profile(profile: str) -> None:
|
|
381
|
+
"""Validate tool profile choice."""
|
|
382
|
+
valid_profiles = list(TOOL_PROFILES.keys()) + ["lite", "standard", "full"] # Include legacy
|
|
383
|
+
if profile not in valid_profiles:
|
|
384
|
+
print(f"Error: Invalid profile '{profile}'")
|
|
385
|
+
print(f"Valid options: core, extended (or legacy: lite, standard, full)")
|
|
386
|
+
sys.exit(1)
|
|
387
|
+
|
|
388
|
+
# Warn about legacy profiles
|
|
389
|
+
legacy_map = {"lite": "core", "standard": "extended", "full": "extended"}
|
|
390
|
+
if profile in legacy_map:
|
|
391
|
+
print(f"ā ļø Warning: Profile '{profile}' is deprecated. Using '{legacy_map[profile]}' instead.")
|
|
392
|
+
print(f" Update your configuration to use: --profile {legacy_map[profile]}")
|
|
393
|
+
|
|
394
|
+
|
|
395
|
+
def main() -> None:
|
|
396
|
+
"""Main CLI entry point."""
|
|
397
|
+
parser = argparse.ArgumentParser(
|
|
398
|
+
description="MemoryGraph - MCP memory server for AI coding agents",
|
|
399
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
400
|
+
epilog="""
|
|
401
|
+
Examples:
|
|
402
|
+
# Start with default settings (SQLite backend, core profile)
|
|
403
|
+
memorygraph
|
|
404
|
+
|
|
405
|
+
# Use extended profile (11 tools)
|
|
406
|
+
memorygraph --profile extended
|
|
407
|
+
|
|
408
|
+
# Use Neo4j backend with extended profile
|
|
409
|
+
memorygraph --backend neo4j --profile extended
|
|
410
|
+
|
|
411
|
+
# Show current configuration
|
|
412
|
+
memorygraph --show-config
|
|
413
|
+
|
|
414
|
+
# Run health check
|
|
415
|
+
memorygraph --health
|
|
416
|
+
|
|
417
|
+
Environment Variables:
|
|
418
|
+
MEMORY_BACKEND Backend type (sqlite|neo4j|memgraph|falkordb|falkordblite|turso|cloud|auto) [default: sqlite]
|
|
419
|
+
MEMORY_TOOL_PROFILE Tool profile (core|extended) [default: core]
|
|
420
|
+
MEMORY_SQLITE_PATH SQLite database path [default: ~/.memorygraph/memory.db]
|
|
421
|
+
MEMORY_LOG_LEVEL Log level (DEBUG|INFO|WARNING|ERROR) [default: INFO]
|
|
422
|
+
|
|
423
|
+
Neo4j Configuration:
|
|
424
|
+
MEMORY_NEO4J_URI Connection URI [default: bolt://localhost:7687]
|
|
425
|
+
MEMORY_NEO4J_USER Username [default: neo4j]
|
|
426
|
+
MEMORY_NEO4J_PASSWORD Password (required for Neo4j)
|
|
427
|
+
|
|
428
|
+
Memgraph Configuration:
|
|
429
|
+
MEMORY_MEMGRAPH_URI Connection URI [default: bolt://localhost:7687]
|
|
430
|
+
|
|
431
|
+
FalkorDB Configuration:
|
|
432
|
+
MEMORY_FALKORDB_HOST FalkorDB host [default: localhost]
|
|
433
|
+
MEMORY_FALKORDB_PORT FalkorDB port [default: 6379]
|
|
434
|
+
MEMORY_FALKORDB_PASSWORD Password (if required)
|
|
435
|
+
|
|
436
|
+
FalkorDBLite Configuration:
|
|
437
|
+
MEMORY_FALKORDBLITE_PATH Database path [default: ~/.memorygraph/falkordblite.db]
|
|
438
|
+
|
|
439
|
+
Turso Configuration:
|
|
440
|
+
MEMORY_TURSO_URL Turso database URL (required for turso backend)
|
|
441
|
+
MEMORY_TURSO_AUTH_TOKEN Turso authentication token (required for turso backend)
|
|
442
|
+
|
|
443
|
+
Cloud Configuration:
|
|
444
|
+
MEMORYGRAPH_API_KEY API key for MemoryGraph Cloud (required for cloud backend)
|
|
445
|
+
MEMORYGRAPH_API_URL Cloud API URL [default: https://graph-api.memorygraph.dev]
|
|
446
|
+
MEMORYGRAPH_TIMEOUT Request timeout in seconds [default: 30]
|
|
447
|
+
"""
|
|
448
|
+
)
|
|
449
|
+
|
|
450
|
+
parser.add_argument(
|
|
451
|
+
"--version",
|
|
452
|
+
action="version",
|
|
453
|
+
version=f"memorygraph {__version__}"
|
|
454
|
+
)
|
|
455
|
+
|
|
456
|
+
parser.add_argument(
|
|
457
|
+
"--backend",
|
|
458
|
+
type=str,
|
|
459
|
+
choices=["sqlite", "neo4j", "memgraph", "falkordb", "falkordblite", "turso", "cloud", "auto"],
|
|
460
|
+
help="Database backend to use (overrides MEMORY_BACKEND env var)"
|
|
461
|
+
)
|
|
462
|
+
|
|
463
|
+
parser.add_argument(
|
|
464
|
+
"--profile",
|
|
465
|
+
type=str,
|
|
466
|
+
choices=["core", "extended", "lite", "standard", "full"], # Include legacy for compatibility
|
|
467
|
+
help="Tool profile to use: core (default, 9 tools) or extended (11 tools). Legacy profiles lite/standard/full are mapped to core/extended."
|
|
468
|
+
)
|
|
469
|
+
|
|
470
|
+
parser.add_argument(
|
|
471
|
+
"--log-level",
|
|
472
|
+
type=str,
|
|
473
|
+
choices=["DEBUG", "INFO", "WARNING", "ERROR"],
|
|
474
|
+
help="Logging level (overrides MEMORY_LOG_LEVEL env var)"
|
|
475
|
+
)
|
|
476
|
+
|
|
477
|
+
parser.add_argument(
|
|
478
|
+
"--show-config",
|
|
479
|
+
action="store_true",
|
|
480
|
+
help="Show current configuration and exit"
|
|
481
|
+
)
|
|
482
|
+
|
|
483
|
+
parser.add_argument(
|
|
484
|
+
"--health",
|
|
485
|
+
action="store_true",
|
|
486
|
+
help="Run health check and exit"
|
|
487
|
+
)
|
|
488
|
+
|
|
489
|
+
parser.add_argument(
|
|
490
|
+
"--health-json",
|
|
491
|
+
action="store_true",
|
|
492
|
+
help="Output health check as JSON (use with --health)"
|
|
493
|
+
)
|
|
494
|
+
|
|
495
|
+
parser.add_argument(
|
|
496
|
+
"--health-timeout",
|
|
497
|
+
type=float,
|
|
498
|
+
default=5.0,
|
|
499
|
+
help="Health check timeout in seconds (default: 5.0)"
|
|
500
|
+
)
|
|
501
|
+
|
|
502
|
+
# Export/Import subcommand
|
|
503
|
+
subparsers = parser.add_subparsers(dest="command", help="Available commands")
|
|
504
|
+
|
|
505
|
+
# Export command
|
|
506
|
+
export_parser = subparsers.add_parser(
|
|
507
|
+
"export",
|
|
508
|
+
help="Export memories to file (works with all backends)"
|
|
509
|
+
)
|
|
510
|
+
export_parser.add_argument(
|
|
511
|
+
"--format",
|
|
512
|
+
type=str,
|
|
513
|
+
choices=["json", "markdown"],
|
|
514
|
+
required=True,
|
|
515
|
+
help="Export format (json or markdown)"
|
|
516
|
+
)
|
|
517
|
+
export_parser.add_argument(
|
|
518
|
+
"--output",
|
|
519
|
+
type=str,
|
|
520
|
+
required=True,
|
|
521
|
+
help="Output path (file for JSON, directory for Markdown)"
|
|
522
|
+
)
|
|
523
|
+
|
|
524
|
+
# Import command
|
|
525
|
+
import_parser = subparsers.add_parser(
|
|
526
|
+
"import",
|
|
527
|
+
help="Import memories from file (works with all backends)"
|
|
528
|
+
)
|
|
529
|
+
import_parser.add_argument(
|
|
530
|
+
"--format",
|
|
531
|
+
type=str,
|
|
532
|
+
choices=["json"],
|
|
533
|
+
required=True,
|
|
534
|
+
help="Import format (currently only JSON supported)"
|
|
535
|
+
)
|
|
536
|
+
import_parser.add_argument(
|
|
537
|
+
"--input",
|
|
538
|
+
type=str,
|
|
539
|
+
required=True,
|
|
540
|
+
help="Input JSON file path"
|
|
541
|
+
)
|
|
542
|
+
import_parser.add_argument(
|
|
543
|
+
"--skip-duplicates",
|
|
544
|
+
action="store_true",
|
|
545
|
+
help="Skip memories with existing IDs instead of overwriting"
|
|
546
|
+
)
|
|
547
|
+
|
|
548
|
+
# Migrate command
|
|
549
|
+
migrate_parser = subparsers.add_parser(
|
|
550
|
+
"migrate",
|
|
551
|
+
help="Migrate memories between backends"
|
|
552
|
+
)
|
|
553
|
+
migrate_parser.add_argument(
|
|
554
|
+
"--from",
|
|
555
|
+
dest="source_backend",
|
|
556
|
+
type=str,
|
|
557
|
+
choices=["sqlite", "neo4j", "memgraph", "falkordb", "falkordblite", "turso", "cloud"],
|
|
558
|
+
help="Source backend type (defaults to current MEMORY_BACKEND)"
|
|
559
|
+
)
|
|
560
|
+
migrate_parser.add_argument(
|
|
561
|
+
"--from-path",
|
|
562
|
+
type=str,
|
|
563
|
+
help="Source database path (for sqlite/falkordblite/turso)"
|
|
564
|
+
)
|
|
565
|
+
migrate_parser.add_argument(
|
|
566
|
+
"--from-uri",
|
|
567
|
+
type=str,
|
|
568
|
+
help="Source database URI (for neo4j/memgraph/falkordb/turso/cloud)"
|
|
569
|
+
)
|
|
570
|
+
migrate_parser.add_argument(
|
|
571
|
+
"--to",
|
|
572
|
+
dest="target_backend",
|
|
573
|
+
type=str,
|
|
574
|
+
required=True,
|
|
575
|
+
choices=["sqlite", "neo4j", "memgraph", "falkordb", "falkordblite", "turso", "cloud"],
|
|
576
|
+
help="Target backend type"
|
|
577
|
+
)
|
|
578
|
+
migrate_parser.add_argument(
|
|
579
|
+
"--to-path",
|
|
580
|
+
type=str,
|
|
581
|
+
help="Target database path (for sqlite/falkordblite/turso)"
|
|
582
|
+
)
|
|
583
|
+
migrate_parser.add_argument(
|
|
584
|
+
"--to-uri",
|
|
585
|
+
type=str,
|
|
586
|
+
help="Target database URI (for neo4j/memgraph/falkordb/turso/cloud)"
|
|
587
|
+
)
|
|
588
|
+
migrate_parser.add_argument(
|
|
589
|
+
"--dry-run",
|
|
590
|
+
action="store_true",
|
|
591
|
+
help="Validate migration without making changes"
|
|
592
|
+
)
|
|
593
|
+
migrate_parser.add_argument(
|
|
594
|
+
"--verbose",
|
|
595
|
+
action="store_true",
|
|
596
|
+
help="Show detailed progress information"
|
|
597
|
+
)
|
|
598
|
+
migrate_parser.add_argument(
|
|
599
|
+
"--skip-duplicates",
|
|
600
|
+
action="store_true",
|
|
601
|
+
default=True,
|
|
602
|
+
help="Skip memories that already exist in target"
|
|
603
|
+
)
|
|
604
|
+
migrate_parser.add_argument(
|
|
605
|
+
"--no-verify",
|
|
606
|
+
action="store_true",
|
|
607
|
+
help="Skip post-migration verification (faster but less safe)"
|
|
608
|
+
)
|
|
609
|
+
|
|
610
|
+
# Migrate to multi-tenant command
|
|
611
|
+
multitenant_parser = subparsers.add_parser(
|
|
612
|
+
"migrate-to-multitenant",
|
|
613
|
+
help="Migrate existing single-tenant database to multi-tenant mode"
|
|
614
|
+
)
|
|
615
|
+
multitenant_parser.add_argument(
|
|
616
|
+
"--tenant-id",
|
|
617
|
+
type=str,
|
|
618
|
+
default="default",
|
|
619
|
+
help="Tenant ID to assign to existing memories (default: default)"
|
|
620
|
+
)
|
|
621
|
+
multitenant_parser.add_argument(
|
|
622
|
+
"--visibility",
|
|
623
|
+
type=str,
|
|
624
|
+
choices=["private", "project", "team", "public"],
|
|
625
|
+
default="team",
|
|
626
|
+
help="Visibility level to set for existing memories (default: team)"
|
|
627
|
+
)
|
|
628
|
+
multitenant_parser.add_argument(
|
|
629
|
+
"--dry-run",
|
|
630
|
+
action="store_true",
|
|
631
|
+
help="Show what would be changed without making changes"
|
|
632
|
+
)
|
|
633
|
+
multitenant_parser.add_argument(
|
|
634
|
+
"--rollback",
|
|
635
|
+
action="store_true",
|
|
636
|
+
help="Rollback multi-tenancy migration (clear tenant_id fields)"
|
|
637
|
+
)
|
|
638
|
+
|
|
639
|
+
args = parser.parse_args()
|
|
640
|
+
|
|
641
|
+
# Apply CLI arguments to environment variables
|
|
642
|
+
if args.backend:
|
|
643
|
+
validate_backend(args.backend)
|
|
644
|
+
os.environ["MEMORY_BACKEND"] = args.backend
|
|
645
|
+
|
|
646
|
+
if args.profile:
|
|
647
|
+
validate_profile(args.profile)
|
|
648
|
+
os.environ["MEMORY_TOOL_PROFILE"] = args.profile
|
|
649
|
+
|
|
650
|
+
if args.log_level:
|
|
651
|
+
os.environ["MEMORY_LOG_LEVEL"] = args.log_level
|
|
652
|
+
|
|
653
|
+
# Configure logging
|
|
654
|
+
logging.basicConfig(
|
|
655
|
+
level=getattr(logging, Config.LOG_LEVEL),
|
|
656
|
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
|
657
|
+
)
|
|
658
|
+
|
|
659
|
+
# Handle special commands
|
|
660
|
+
if args.show_config:
|
|
661
|
+
print(f"MemoryGraph MCP Server v{__version__}")
|
|
662
|
+
print_config_summary()
|
|
663
|
+
sys.exit(0)
|
|
664
|
+
|
|
665
|
+
if args.health:
|
|
666
|
+
# Perform health check
|
|
667
|
+
result = asyncio.run(perform_health_check(timeout=args.health_timeout))
|
|
668
|
+
|
|
669
|
+
# Output in JSON format if requested
|
|
670
|
+
if args.health_json:
|
|
671
|
+
print(json.dumps(result, indent=2))
|
|
672
|
+
else:
|
|
673
|
+
# Human-readable format
|
|
674
|
+
print(f"MemoryGraph MCP Server v{__version__}")
|
|
675
|
+
print("\nš„ Health Check Results\n")
|
|
676
|
+
print(f"Status: {'ā
Healthy' if result['status'] == 'healthy' else 'ā Unhealthy'}")
|
|
677
|
+
print(f"Backend: {result.get('backend_type', 'unknown')}")
|
|
678
|
+
print(f"Connected: {'Yes' if result.get('connected') else 'No'}")
|
|
679
|
+
|
|
680
|
+
if result.get('version'):
|
|
681
|
+
print(f"Version: {result['version']}")
|
|
682
|
+
|
|
683
|
+
if result.get('db_path'):
|
|
684
|
+
print(f"Database: {result['db_path']}")
|
|
685
|
+
|
|
686
|
+
if result.get('statistics'):
|
|
687
|
+
stats = result['statistics']
|
|
688
|
+
print(f"\nStatistics:")
|
|
689
|
+
if 'memory_count' in stats:
|
|
690
|
+
print(f" Memories: {stats['memory_count']}")
|
|
691
|
+
for key, value in stats.items():
|
|
692
|
+
if key != 'memory_count':
|
|
693
|
+
print(f" {key.replace('_', ' ').title()}: {value}")
|
|
694
|
+
|
|
695
|
+
if result.get('database_size_bytes'):
|
|
696
|
+
size_mb = result['database_size_bytes'] / (1024 * 1024)
|
|
697
|
+
print(f" Database Size: {size_mb:.2f} MB")
|
|
698
|
+
|
|
699
|
+
if result.get('error'):
|
|
700
|
+
print(f"\nError: {result['error']}")
|
|
701
|
+
|
|
702
|
+
print(f"\nTimestamp: {result['timestamp']}")
|
|
703
|
+
|
|
704
|
+
# Exit with appropriate status code
|
|
705
|
+
sys.exit(0 if result['status'] == 'healthy' else 1)
|
|
706
|
+
|
|
707
|
+
# Handle export/import/migrate commands
|
|
708
|
+
if args.command == "export":
|
|
709
|
+
asyncio.run(handle_export(args))
|
|
710
|
+
sys.exit(0)
|
|
711
|
+
|
|
712
|
+
if args.command == "import":
|
|
713
|
+
asyncio.run(handle_import(args))
|
|
714
|
+
sys.exit(0)
|
|
715
|
+
|
|
716
|
+
if args.command == "migrate":
|
|
717
|
+
asyncio.run(handle_migrate(args))
|
|
718
|
+
sys.exit(0)
|
|
719
|
+
|
|
720
|
+
if args.command == "migrate-to-multitenant":
|
|
721
|
+
asyncio.run(handle_migrate_multitenant(args))
|
|
722
|
+
sys.exit(0)
|
|
723
|
+
|
|
724
|
+
# Start the server
|
|
725
|
+
print(f"š Starting MemoryGraph MCP Server v{__version__}")
|
|
726
|
+
print(f"Backend: {Config.BACKEND}")
|
|
727
|
+
print(f"Profile: {Config.TOOL_PROFILE}")
|
|
728
|
+
print(f"Log Level: {Config.LOG_LEVEL}")
|
|
729
|
+
print("\nPress Ctrl+C to stop the server\n")
|
|
730
|
+
|
|
731
|
+
try:
|
|
732
|
+
asyncio.run(server_main())
|
|
733
|
+
except KeyboardInterrupt:
|
|
734
|
+
print("\n\nš Server stopped gracefully")
|
|
735
|
+
sys.exit(0)
|
|
736
|
+
except Exception as e:
|
|
737
|
+
print(f"\nā Server error: {e}")
|
|
738
|
+
logger.error(f"Server error: {e}", exc_info=True)
|
|
739
|
+
sys.exit(1)
|
|
740
|
+
|
|
741
|
+
|
|
742
|
+
if __name__ == "__main__":
|
|
743
|
+
main()
|