memorygraphMCP 0.11.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. memorygraph/__init__.py +50 -0
  2. memorygraph/__main__.py +12 -0
  3. memorygraph/advanced_tools.py +509 -0
  4. memorygraph/analytics/__init__.py +46 -0
  5. memorygraph/analytics/advanced_queries.py +727 -0
  6. memorygraph/backends/__init__.py +21 -0
  7. memorygraph/backends/base.py +179 -0
  8. memorygraph/backends/cloud.py +75 -0
  9. memorygraph/backends/cloud_backend.py +858 -0
  10. memorygraph/backends/factory.py +577 -0
  11. memorygraph/backends/falkordb_backend.py +749 -0
  12. memorygraph/backends/falkordblite_backend.py +746 -0
  13. memorygraph/backends/ladybugdb_backend.py +242 -0
  14. memorygraph/backends/memgraph_backend.py +327 -0
  15. memorygraph/backends/neo4j_backend.py +298 -0
  16. memorygraph/backends/sqlite_fallback.py +463 -0
  17. memorygraph/backends/turso.py +448 -0
  18. memorygraph/cli.py +743 -0
  19. memorygraph/cloud_database.py +297 -0
  20. memorygraph/config.py +295 -0
  21. memorygraph/database.py +933 -0
  22. memorygraph/graph_analytics.py +631 -0
  23. memorygraph/integration/__init__.py +69 -0
  24. memorygraph/integration/context_capture.py +426 -0
  25. memorygraph/integration/project_analysis.py +583 -0
  26. memorygraph/integration/workflow_tracking.py +492 -0
  27. memorygraph/intelligence/__init__.py +59 -0
  28. memorygraph/intelligence/context_retrieval.py +447 -0
  29. memorygraph/intelligence/entity_extraction.py +386 -0
  30. memorygraph/intelligence/pattern_recognition.py +420 -0
  31. memorygraph/intelligence/temporal.py +374 -0
  32. memorygraph/migration/__init__.py +27 -0
  33. memorygraph/migration/manager.py +579 -0
  34. memorygraph/migration/models.py +142 -0
  35. memorygraph/migration/scripts/__init__.py +17 -0
  36. memorygraph/migration/scripts/bitemporal_migration.py +595 -0
  37. memorygraph/migration/scripts/multitenancy_migration.py +452 -0
  38. memorygraph/migration_tools_module.py +146 -0
  39. memorygraph/models.py +684 -0
  40. memorygraph/proactive/__init__.py +46 -0
  41. memorygraph/proactive/outcome_learning.py +444 -0
  42. memorygraph/proactive/predictive.py +410 -0
  43. memorygraph/proactive/session_briefing.py +399 -0
  44. memorygraph/relationships.py +668 -0
  45. memorygraph/server.py +883 -0
  46. memorygraph/sqlite_database.py +1876 -0
  47. memorygraph/tools/__init__.py +59 -0
  48. memorygraph/tools/activity_tools.py +262 -0
  49. memorygraph/tools/memory_tools.py +315 -0
  50. memorygraph/tools/migration_tools.py +181 -0
  51. memorygraph/tools/relationship_tools.py +147 -0
  52. memorygraph/tools/search_tools.py +406 -0
  53. memorygraph/tools/temporal_tools.py +339 -0
  54. memorygraph/utils/__init__.py +10 -0
  55. memorygraph/utils/context_extractor.py +429 -0
  56. memorygraph/utils/error_handling.py +151 -0
  57. memorygraph/utils/export_import.py +425 -0
  58. memorygraph/utils/graph_algorithms.py +200 -0
  59. memorygraph/utils/pagination.py +149 -0
  60. memorygraph/utils/project_detection.py +133 -0
  61. memorygraphmcp-0.11.7.dist-info/METADATA +970 -0
  62. memorygraphmcp-0.11.7.dist-info/RECORD +65 -0
  63. memorygraphmcp-0.11.7.dist-info/WHEEL +4 -0
  64. memorygraphmcp-0.11.7.dist-info/entry_points.txt +2 -0
  65. memorygraphmcp-0.11.7.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,463 @@
1
+ """
2
+ SQLite fallback backend implementation for MemoryGraph.
3
+
4
+ This module provides a zero-dependency fallback using SQLite for persistence
5
+ and NetworkX for graph operations. This enables the memory server to work
6
+ without requiring Neo4j or Memgraph installation.
7
+ """
8
+
9
+ import logging
10
+ import os
11
+ import json
12
+ import sqlite3
13
+ import uuid
14
+ from typing import Any, Optional
15
+ from pathlib import Path
16
+
17
+ try:
18
+ import networkx as nx
19
+ except ImportError:
20
+ nx = None
21
+
22
+ from .base import GraphBackend
23
+ from ..models import DatabaseConnectionError, SchemaError
24
+ from ..config import Config
25
+
26
+ logger = logging.getLogger(__name__)
27
+
28
+
29
+ class SQLiteFallbackBackend(GraphBackend):
30
+ """SQLite + NetworkX fallback implementation of the GraphBackend interface."""
31
+
32
+ def __init__(
33
+ self,
34
+ db_path: Optional[str] = None
35
+ ):
36
+ """
37
+ Initialize SQLite fallback backend.
38
+
39
+ Args:
40
+ db_path: Path to SQLite database file (defaults to ~/.memorygraph/memory.db)
41
+
42
+ Raises:
43
+ DatabaseConnectionError: If NetworkX is not installed
44
+ """
45
+ if nx is None:
46
+ raise DatabaseConnectionError(
47
+ "NetworkX is required for SQLite fallback backend. "
48
+ "Install with: pip install networkx"
49
+ )
50
+
51
+ default_path = os.path.expanduser("~/.memorygraph/memory.db")
52
+ resolved_path = db_path or os.getenv("MEMORY_SQLITE_PATH", default_path)
53
+ self.db_path: str = resolved_path if resolved_path else default_path
54
+ self.conn: Optional[sqlite3.Connection] = None
55
+ self.graph: Optional[nx.DiGraph] = None # type: ignore[misc,no-any-unimported]
56
+ self._connected = False
57
+
58
+ # Ensure directory exists
59
+ Path(self.db_path).parent.mkdir(parents=True, exist_ok=True)
60
+
61
+ async def connect(self) -> bool:
62
+ """
63
+ Establish connection to SQLite database and initialize graph.
64
+
65
+ Returns:
66
+ True if connection successful
67
+
68
+ Raises:
69
+ DatabaseConnectionError: If connection fails
70
+ """
71
+ try:
72
+ self.conn = sqlite3.connect(self.db_path, check_same_thread=False)
73
+ self.conn.row_factory = sqlite3.Row # Enable column access by name
74
+ self.graph = nx.DiGraph()
75
+ self._connected = True
76
+
77
+ # Load existing graph into memory
78
+ await self._load_graph_to_memory()
79
+
80
+ logger.info(f"Successfully connected to SQLite database at {self.db_path}")
81
+ return True
82
+
83
+ except Exception as e:
84
+ logger.error(f"Failed to connect to SQLite: {e}")
85
+ raise DatabaseConnectionError(f"Failed to connect to SQLite: {e}")
86
+
87
+ async def disconnect(self) -> None:
88
+ """Close the database connection."""
89
+ if self.conn:
90
+ # Sync graph to SQLite before closing
91
+ await self._sync_to_sqlite()
92
+ self.conn.close()
93
+ self.conn = None
94
+ self.graph = None
95
+ self._connected = False
96
+ logger.info("SQLite connection closed")
97
+
98
+ async def execute_query(
99
+ self,
100
+ query: str,
101
+ parameters: Optional[dict[str, Any]] = None,
102
+ write: bool = False
103
+ ) -> list[dict[str, Any]]:
104
+ """
105
+ Execute a Cypher-like query translated to SQLite/NetworkX operations.
106
+
107
+ Args:
108
+ query: Cypher-style query string
109
+ parameters: Query parameters
110
+ write: Whether this is a write operation
111
+
112
+ Returns:
113
+ List of result records as dictionaries
114
+
115
+ Raises:
116
+ DatabaseConnectionError: If not connected
117
+ NotImplementedError: For complex Cypher queries
118
+
119
+ Note:
120
+ This is a simplified implementation that supports basic operations.
121
+ Complex Cypher queries will raise NotImplementedError.
122
+ """
123
+ if not self._connected or not self.conn:
124
+ raise DatabaseConnectionError("Not connected to SQLite. Call connect() first.")
125
+
126
+ params = parameters or {}
127
+
128
+ # For schema operations, we can execute directly
129
+ if query.strip().upper().startswith(("CREATE", "DROP", "ALTER")):
130
+ try:
131
+ cursor = self.conn.cursor()
132
+ # SQLite doesn't support Cypher, so we'll handle schema separately
133
+ return []
134
+ except sqlite3.Error as e:
135
+ raise DatabaseConnectionError(f"SQLite query failed: {e}")
136
+
137
+ # For data operations, translate to SQLite/NetworkX
138
+ # This is a simplified implementation - full Cypher translation would be complex
139
+ logger.warning("Direct Cypher execution not supported in SQLite backend. Use database.py methods.")
140
+ return []
141
+
142
+ async def initialize_schema(self) -> None:
143
+ """
144
+ Initialize database schema including indexes.
145
+
146
+ Raises:
147
+ SchemaError: If schema initialization fails
148
+ """
149
+ logger.info("Initializing SQLite schema for Claude Memory...")
150
+
151
+ if not self.conn:
152
+ raise SchemaError("Not connected to database")
153
+
154
+ cursor = self.conn.cursor()
155
+
156
+ try:
157
+ # Create nodes table
158
+ cursor.execute("""
159
+ CREATE TABLE IF NOT EXISTS nodes (
160
+ id TEXT PRIMARY KEY,
161
+ label TEXT NOT NULL,
162
+ properties TEXT NOT NULL,
163
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
164
+ updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
165
+ )
166
+ """)
167
+
168
+ # Create relationships table (with bi-temporal fields)
169
+ cursor.execute("""
170
+ CREATE TABLE IF NOT EXISTS relationships (
171
+ id TEXT PRIMARY KEY,
172
+ from_id TEXT NOT NULL,
173
+ to_id TEXT NOT NULL,
174
+ rel_type TEXT NOT NULL,
175
+ properties TEXT NOT NULL,
176
+ created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
177
+
178
+ -- Bi-temporal tracking fields (Phase 2.2)
179
+ valid_from TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
180
+ valid_until TIMESTAMP,
181
+ recorded_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
182
+ invalidated_by TEXT,
183
+
184
+ FOREIGN KEY (from_id) REFERENCES nodes(id) ON DELETE CASCADE,
185
+ FOREIGN KEY (to_id) REFERENCES nodes(id) ON DELETE CASCADE,
186
+ FOREIGN KEY (invalidated_by) REFERENCES relationships(id) ON DELETE SET NULL
187
+ )
188
+ """)
189
+
190
+ # Create indexes
191
+ cursor.execute("CREATE INDEX IF NOT EXISTS idx_nodes_label ON nodes(label)")
192
+ cursor.execute("CREATE INDEX IF NOT EXISTS idx_nodes_created ON nodes(created_at)")
193
+ cursor.execute("CREATE INDEX IF NOT EXISTS idx_rel_from ON relationships(from_id)")
194
+ cursor.execute("CREATE INDEX IF NOT EXISTS idx_rel_to ON relationships(to_id)")
195
+ cursor.execute("CREATE INDEX IF NOT EXISTS idx_rel_type ON relationships(rel_type)")
196
+
197
+ # Temporal indexes (Phase 2.2)
198
+ cursor.execute("""
199
+ CREATE INDEX IF NOT EXISTS idx_relationships_temporal
200
+ ON relationships(valid_from, valid_until)
201
+ """)
202
+ cursor.execute("""
203
+ CREATE INDEX IF NOT EXISTS idx_relationships_current
204
+ ON relationships(valid_until)
205
+ WHERE valid_until IS NULL
206
+ """)
207
+ cursor.execute("""
208
+ CREATE INDEX IF NOT EXISTS idx_relationships_recorded
209
+ ON relationships(recorded_at)
210
+ """)
211
+
212
+ # Conditional multi-tenant indexes (Phase 1)
213
+ if Config.is_multi_tenant_mode():
214
+ self._create_multitenant_indexes(cursor)
215
+
216
+ # Create FTS5 virtual table for full-text search
217
+ try:
218
+ cursor.execute("""
219
+ CREATE VIRTUAL TABLE IF NOT EXISTS nodes_fts USING fts5(
220
+ id,
221
+ title,
222
+ content,
223
+ summary,
224
+ content='nodes',
225
+ content_rowid='rowid'
226
+ )
227
+ """)
228
+ logger.debug("Created FTS5 table for full-text search")
229
+ except sqlite3.Error as e:
230
+ logger.warning(f"Could not create FTS5 table (may not be available): {e}")
231
+
232
+ self.conn.commit()
233
+ logger.info("Schema initialization completed")
234
+
235
+ except sqlite3.Error as e:
236
+ self.conn.rollback()
237
+ raise SchemaError(f"Failed to initialize schema: {e}")
238
+
239
+ def _create_multitenant_indexes(self, cursor: sqlite3.Cursor) -> None:
240
+ """
241
+ Create indexes for multi-tenant queries.
242
+
243
+ Only called when MEMORY_MULTI_TENANT_MODE=true. These indexes optimize
244
+ queries filtering by tenant_id, team_id, visibility, and created_by.
245
+
246
+ Args:
247
+ cursor: SQLite cursor for executing index creation
248
+
249
+ Note:
250
+ Context fields are stored as JSON in properties column, so we use
251
+ JSON extraction for indexing (requires SQLite 3.9.0+)
252
+ """
253
+ logger.info("Creating multi-tenant indexes...")
254
+
255
+ try:
256
+ # Tenant index - for tenant isolation queries
257
+ cursor.execute("""
258
+ CREATE INDEX IF NOT EXISTS idx_memory_tenant
259
+ ON nodes(json_extract(properties, '$.context.tenant_id'))
260
+ WHERE label = 'Memory'
261
+ """)
262
+
263
+ # Team index - for team-scoped queries
264
+ cursor.execute("""
265
+ CREATE INDEX IF NOT EXISTS idx_memory_team
266
+ ON nodes(json_extract(properties, '$.context.team_id'))
267
+ WHERE label = 'Memory'
268
+ """)
269
+
270
+ # Visibility index - for access control filtering
271
+ cursor.execute("""
272
+ CREATE INDEX IF NOT EXISTS idx_memory_visibility
273
+ ON nodes(json_extract(properties, '$.context.visibility'))
274
+ WHERE label = 'Memory'
275
+ """)
276
+
277
+ # Created_by index - for user-specific queries
278
+ cursor.execute("""
279
+ CREATE INDEX IF NOT EXISTS idx_memory_created_by
280
+ ON nodes(json_extract(properties, '$.context.created_by'))
281
+ WHERE label = 'Memory'
282
+ """)
283
+
284
+ # Composite index for common query pattern (tenant + visibility)
285
+ cursor.execute("""
286
+ CREATE INDEX IF NOT EXISTS idx_memory_tenant_visibility
287
+ ON nodes(
288
+ json_extract(properties, '$.context.tenant_id'),
289
+ json_extract(properties, '$.context.visibility')
290
+ )
291
+ WHERE label = 'Memory'
292
+ """)
293
+
294
+ # Version index for optimistic locking
295
+ cursor.execute("""
296
+ CREATE INDEX IF NOT EXISTS idx_memory_version
297
+ ON nodes(json_extract(properties, '$.version'))
298
+ WHERE label = 'Memory'
299
+ """)
300
+
301
+ logger.info("Multi-tenant indexes created successfully")
302
+
303
+ except sqlite3.Error as e:
304
+ logger.warning(f"Could not create some multi-tenant indexes: {e}")
305
+ # Don't fail schema initialization if indexes fail
306
+ # (e.g., older SQLite versions without JSON support)
307
+
308
+ async def _load_graph_to_memory(self) -> None:
309
+ """Load graph data from SQLite into NetworkX graph."""
310
+ if not self.conn or not self.graph:
311
+ return
312
+
313
+ cursor = self.conn.cursor()
314
+
315
+ # Load nodes
316
+ cursor.execute("SELECT id, label, properties FROM nodes")
317
+ for row in cursor.fetchall():
318
+ node_id = row[0]
319
+ label = row[1]
320
+ properties = json.loads(row[2])
321
+ self.graph.add_node(node_id, label=label, **properties)
322
+
323
+ # Load relationships
324
+ cursor.execute("SELECT id, from_id, to_id, rel_type, properties FROM relationships")
325
+ for row in cursor.fetchall():
326
+ rel_id = row[0]
327
+ from_id = row[1]
328
+ to_id = row[2]
329
+ rel_type = row[3]
330
+ properties = json.loads(row[4])
331
+ self.graph.add_edge(from_id, to_id, id=rel_id, type=rel_type, **properties)
332
+
333
+ logger.debug(f"Loaded {self.graph.number_of_nodes()} nodes and {self.graph.number_of_edges()} edges into memory")
334
+
335
+ async def _sync_to_sqlite(self) -> None:
336
+ """Sync in-memory NetworkX graph to SQLite database."""
337
+ if not self.conn or not self.graph:
338
+ return
339
+
340
+ # This is a simplified sync - in production, we'd track changes
341
+ # For now, we'll rely on direct SQLite operations for writes
342
+ logger.debug("Graph sync to SQLite (using direct operations)")
343
+
344
+ async def health_check(self) -> dict[str, Any]:
345
+ """
346
+ Check backend health and return status information.
347
+
348
+ Returns:
349
+ Dictionary with health check results
350
+ """
351
+ health_info = {
352
+ "connected": self._connected,
353
+ "backend_type": "sqlite",
354
+ "db_path": self.db_path
355
+ }
356
+
357
+ if self._connected and self.conn:
358
+ try:
359
+ cursor = self.conn.cursor()
360
+ cursor.execute("SELECT COUNT(*) FROM nodes WHERE label = 'Memory'")
361
+ count = cursor.fetchone()[0]
362
+
363
+ health_info["statistics"] = {
364
+ "memory_count": count
365
+ }
366
+
367
+ # Get SQLite version
368
+ cursor.execute("SELECT sqlite_version()")
369
+ health_info["version"] = cursor.fetchone()[0]
370
+
371
+ # Get database size
372
+ db_size = os.path.getsize(self.db_path) if os.path.exists(self.db_path) else 0
373
+ health_info["database_size_bytes"] = db_size
374
+
375
+ except Exception as e:
376
+ logger.warning(f"Could not get detailed health info: {e}")
377
+ health_info["warning"] = str(e)
378
+
379
+ return health_info
380
+
381
+ def backend_name(self) -> str:
382
+ """Return the name of this backend implementation."""
383
+ return "sqlite"
384
+
385
+ def supports_fulltext_search(self) -> bool:
386
+ """
387
+ Check if this backend supports full-text search.
388
+
389
+ Returns:
390
+ True if FTS5 is available in SQLite
391
+ """
392
+ if not self.conn:
393
+ return False
394
+
395
+ try:
396
+ cursor = self.conn.cursor()
397
+ cursor.execute("SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name='nodes_fts'")
398
+ result = cursor.fetchone()
399
+ return bool(result[0] > 0) if result else False
400
+ except Exception:
401
+ return False
402
+
403
+ def supports_transactions(self) -> bool:
404
+ """Check if this backend supports ACID transactions."""
405
+ return True # SQLite supports transactions
406
+
407
+ @classmethod
408
+ async def create(cls, db_path: Optional[str] = None) -> "SQLiteFallbackBackend":
409
+ """
410
+ Factory method to create and connect to a SQLite backend.
411
+
412
+ Args:
413
+ db_path: Path to SQLite database file
414
+
415
+ Returns:
416
+ Connected SQLiteFallbackBackend instance
417
+
418
+ Raises:
419
+ DatabaseConnectionError: If connection fails
420
+ """
421
+ backend = cls(db_path)
422
+ await backend.connect()
423
+ return backend
424
+
425
+ # Helper methods for direct database operations (used by MemoryDatabase)
426
+
427
+ def execute_sync(self, query: str, parameters: Optional[tuple[Any, ...]] = None) -> list[dict[str, Any]]:
428
+ """
429
+ Execute a synchronous SQL query (for internal use).
430
+
431
+ Args:
432
+ query: SQL query string
433
+ parameters: Query parameters as tuple
434
+
435
+ Returns:
436
+ List of result rows as dictionaries
437
+ """
438
+ if not self.conn:
439
+ raise DatabaseConnectionError("Not connected to SQLite")
440
+
441
+ cursor = self.conn.cursor()
442
+ if parameters:
443
+ cursor.execute(query, parameters)
444
+ else:
445
+ cursor.execute(query)
446
+
447
+ # Convert rows to dictionaries
448
+ columns = [desc[0] for desc in cursor.description] if cursor.description else []
449
+ results = []
450
+ for row in cursor.fetchall():
451
+ results.append(dict(zip(columns, row)))
452
+
453
+ return results
454
+
455
+ def commit(self) -> None:
456
+ """Commit current transaction."""
457
+ if self.conn:
458
+ self.conn.commit()
459
+
460
+ def rollback(self) -> None:
461
+ """Rollback current transaction."""
462
+ if self.conn:
463
+ self.conn.rollback()