memorygraphMCP 0.11.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. memorygraph/__init__.py +50 -0
  2. memorygraph/__main__.py +12 -0
  3. memorygraph/advanced_tools.py +509 -0
  4. memorygraph/analytics/__init__.py +46 -0
  5. memorygraph/analytics/advanced_queries.py +727 -0
  6. memorygraph/backends/__init__.py +21 -0
  7. memorygraph/backends/base.py +179 -0
  8. memorygraph/backends/cloud.py +75 -0
  9. memorygraph/backends/cloud_backend.py +858 -0
  10. memorygraph/backends/factory.py +577 -0
  11. memorygraph/backends/falkordb_backend.py +749 -0
  12. memorygraph/backends/falkordblite_backend.py +746 -0
  13. memorygraph/backends/ladybugdb_backend.py +242 -0
  14. memorygraph/backends/memgraph_backend.py +327 -0
  15. memorygraph/backends/neo4j_backend.py +298 -0
  16. memorygraph/backends/sqlite_fallback.py +463 -0
  17. memorygraph/backends/turso.py +448 -0
  18. memorygraph/cli.py +743 -0
  19. memorygraph/cloud_database.py +297 -0
  20. memorygraph/config.py +295 -0
  21. memorygraph/database.py +933 -0
  22. memorygraph/graph_analytics.py +631 -0
  23. memorygraph/integration/__init__.py +69 -0
  24. memorygraph/integration/context_capture.py +426 -0
  25. memorygraph/integration/project_analysis.py +583 -0
  26. memorygraph/integration/workflow_tracking.py +492 -0
  27. memorygraph/intelligence/__init__.py +59 -0
  28. memorygraph/intelligence/context_retrieval.py +447 -0
  29. memorygraph/intelligence/entity_extraction.py +386 -0
  30. memorygraph/intelligence/pattern_recognition.py +420 -0
  31. memorygraph/intelligence/temporal.py +374 -0
  32. memorygraph/migration/__init__.py +27 -0
  33. memorygraph/migration/manager.py +579 -0
  34. memorygraph/migration/models.py +142 -0
  35. memorygraph/migration/scripts/__init__.py +17 -0
  36. memorygraph/migration/scripts/bitemporal_migration.py +595 -0
  37. memorygraph/migration/scripts/multitenancy_migration.py +452 -0
  38. memorygraph/migration_tools_module.py +146 -0
  39. memorygraph/models.py +684 -0
  40. memorygraph/proactive/__init__.py +46 -0
  41. memorygraph/proactive/outcome_learning.py +444 -0
  42. memorygraph/proactive/predictive.py +410 -0
  43. memorygraph/proactive/session_briefing.py +399 -0
  44. memorygraph/relationships.py +668 -0
  45. memorygraph/server.py +883 -0
  46. memorygraph/sqlite_database.py +1876 -0
  47. memorygraph/tools/__init__.py +59 -0
  48. memorygraph/tools/activity_tools.py +262 -0
  49. memorygraph/tools/memory_tools.py +315 -0
  50. memorygraph/tools/migration_tools.py +181 -0
  51. memorygraph/tools/relationship_tools.py +147 -0
  52. memorygraph/tools/search_tools.py +406 -0
  53. memorygraph/tools/temporal_tools.py +339 -0
  54. memorygraph/utils/__init__.py +10 -0
  55. memorygraph/utils/context_extractor.py +429 -0
  56. memorygraph/utils/error_handling.py +151 -0
  57. memorygraph/utils/export_import.py +425 -0
  58. memorygraph/utils/graph_algorithms.py +200 -0
  59. memorygraph/utils/pagination.py +149 -0
  60. memorygraph/utils/project_detection.py +133 -0
  61. memorygraphmcp-0.11.7.dist-info/METADATA +970 -0
  62. memorygraphmcp-0.11.7.dist-info/RECORD +65 -0
  63. memorygraphmcp-0.11.7.dist-info/WHEEL +4 -0
  64. memorygraphmcp-0.11.7.dist-info/entry_points.txt +2 -0
  65. memorygraphmcp-0.11.7.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,1876 @@
1
+ """
2
+ SQLite-specific database implementation for MemoryGraph.
3
+
4
+ This module provides a SQLiteMemoryDatabase class that uses SQL queries
5
+ instead of Cypher. It works with the SQLiteFallbackBackend to provide
6
+ memory storage without requiring Neo4j.
7
+ """
8
+
9
+ import logging
10
+ import json
11
+ import uuid
12
+ from typing import Dict, List, Optional, Any, Tuple
13
+ from datetime import datetime, timedelta, timezone
14
+
15
+ from .models import (
16
+ Memory, MemoryType, MemoryNode, Relationship, RelationshipType,
17
+ RelationshipProperties, SearchQuery, MemoryContext,
18
+ MemoryError, MemoryNotFoundError, RelationshipError,
19
+ ValidationError, DatabaseConnectionError, SchemaError, PaginatedResult
20
+ )
21
+ from .backends.sqlite_fallback import SQLiteFallbackBackend
22
+ from .config import Config
23
+ from .utils.graph_algorithms import has_cycle
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+
28
+ def _simple_stem(word: str) -> str:
29
+ """
30
+ Simple word stemming for fuzzy search.
31
+
32
+ Handles common English plurals and verb tenses.
33
+ This is a lightweight alternative to full NLP stemming.
34
+
35
+ Args:
36
+ word: Word to stem
37
+
38
+ Returns:
39
+ Stemmed word
40
+ """
41
+ word = word.lower().strip()
42
+
43
+ if len(word) <= 3:
44
+ return word
45
+
46
+ # Handle 'ied' suffix specially (retried -> retry, not retri)
47
+ if word.endswith('ied') and len(word) > 4:
48
+ # Remove 'ied' and add 'y' back
49
+ stem = word[:-3] + 'y'
50
+ if len(stem) >= 3:
51
+ return stem
52
+
53
+ # Handle 'ies' suffix specially (retries -> retry, not retr)
54
+ if word.endswith('ies') and len(word) > 4:
55
+ # Remove 'ies' and add 'y' back
56
+ stem = word[:-3] + 'y'
57
+ if len(stem) >= 3:
58
+ return stem
59
+
60
+ # Remove common suffixes (ordered by specificity)
61
+ suffixes = [
62
+ 'es', # boxes -> box
63
+ 'ing', # retrying -> retry
64
+ 'ed', # timed -> tim
65
+ 's', # errors -> error
66
+ ]
67
+
68
+ for suffix in suffixes:
69
+ if word.endswith(suffix):
70
+ stem = word[:-len(suffix)]
71
+ # Don't stem too aggressively (keep at least 3 chars)
72
+ if len(stem) >= 3:
73
+ return stem
74
+
75
+ return word
76
+
77
+
78
+ def _generate_fuzzy_patterns(query: str) -> list:
79
+ """
80
+ Generate fuzzy search patterns from a query string.
81
+
82
+ Creates multiple patterns to match variations of words.
83
+
84
+ Args:
85
+ query: Search query string
86
+
87
+ Returns:
88
+ List of (pattern, weight) tuples for matching
89
+ """
90
+ patterns = []
91
+ query_lower = query.lower().strip()
92
+
93
+ # Exact match pattern (highest priority)
94
+ patterns.append((f"%{query_lower}%", 1.0))
95
+
96
+ # Split into words for multi-word queries
97
+ words = query_lower.split()
98
+
99
+ for word in words:
100
+ if len(word) <= 2:
101
+ continue
102
+
103
+ # Stem the word
104
+ stem = _simple_stem(word)
105
+
106
+ # Add stemmed pattern if different from original
107
+ if stem != word and len(stem) >= 3:
108
+ patterns.append((f"%{stem}%", 0.8))
109
+
110
+ # Also add patterns for common variations that would stem to this word
111
+ # This helps match: "retry" -> "retries", "retrying", "retried"
112
+ if len(word) >= 4:
113
+ # Add common suffixes
114
+ variations = []
115
+
116
+ # Handle words ending in 'y' specially (retry -> retries, not retrys)
117
+ if word.endswith('y'):
118
+ variations.extend([
119
+ word[:-1] + "ies", # retry -> retries
120
+ word + "ing", # retry -> retrying
121
+ word[:-1] + "ied", # retry -> retried
122
+ ])
123
+ else:
124
+ variations.extend([
125
+ word + "s", # cache -> caches
126
+ word + "es", # box -> boxes
127
+ word + "ing", # cache -> caching
128
+ word + "ed", # cache -> cached
129
+ ])
130
+
131
+ for var in variations:
132
+ var_stem = _simple_stem(var)
133
+ # Only add if it stems back to our word's stem
134
+ if var_stem == stem and len(var_stem) >= 3:
135
+ patterns.append((f"%{var}%", 0.9))
136
+
137
+ # Remove duplicates while preserving order
138
+ seen = set()
139
+ unique_patterns = []
140
+ for pattern, weight in patterns:
141
+ if pattern not in seen:
142
+ seen.add(pattern)
143
+ unique_patterns.append((pattern, weight))
144
+
145
+ return unique_patterns
146
+
147
+
148
+ class SQLiteMemoryDatabase:
149
+ """SQLite-specific implementation of memory database operations."""
150
+
151
+ def __init__(self, backend: SQLiteFallbackBackend):
152
+ """
153
+ Initialize with a SQLite backend connection.
154
+
155
+ Args:
156
+ backend: SQLiteFallbackBackend instance
157
+ """
158
+ self.backend = backend
159
+
160
+ async def initialize_schema(self) -> None:
161
+ """
162
+ Create database schema, constraints, and indexes.
163
+
164
+ This method ensures the SQLite backend has the proper schema
165
+ for storing Memory objects as nodes.
166
+
167
+ Raises:
168
+ SchemaError: If schema creation fails
169
+ """
170
+ logger.info("Initializing SQLite schema for Memory storage...")
171
+
172
+ try:
173
+ # The backend already creates basic tables, but we may need additional indexes
174
+ # for Memory-specific queries
175
+
176
+ # Create index on properties for common queries
177
+ # These are in addition to the basic indexes created by the backend
178
+ try:
179
+ self.backend.execute_sync(
180
+ "CREATE INDEX IF NOT EXISTS idx_nodes_memory ON nodes(label) WHERE label = 'Memory'"
181
+ )
182
+ except Exception as e:
183
+ logger.debug(f"Index creation skipped (may already exist): {e}")
184
+
185
+ logger.info("Memory schema initialization completed")
186
+
187
+ except Exception as e:
188
+ logger.error(f"Failed to initialize schema: {e}")
189
+ raise SchemaError(f"Failed to initialize schema: {e}")
190
+
191
+ async def store_memory(self, memory: Memory) -> str:
192
+ """
193
+ Store a memory in the database and return its ID.
194
+
195
+ Args:
196
+ memory: Memory object to store
197
+
198
+ Returns:
199
+ ID of the stored memory
200
+
201
+ Raises:
202
+ ValidationError: If memory data is invalid
203
+ DatabaseConnectionError: If storage fails
204
+ """
205
+ try:
206
+ if not memory.id:
207
+ memory.id = str(uuid.uuid4())
208
+
209
+ memory.updated_at = datetime.now(timezone.utc)
210
+
211
+ # Convert memory to properties dict
212
+ memory_node = MemoryNode(memory=memory)
213
+ properties = memory_node.to_neo4j_properties()
214
+
215
+ # Serialize properties as JSON
216
+ properties_json = json.dumps(properties)
217
+
218
+ # Check if memory already exists (MERGE behavior)
219
+ existing = self.backend.execute_sync(
220
+ "SELECT id FROM nodes WHERE id = ? AND label = 'Memory'",
221
+ (memory.id,)
222
+ )
223
+
224
+ if existing:
225
+ # Update existing
226
+ self.backend.execute_sync(
227
+ """
228
+ UPDATE nodes
229
+ SET properties = ?, updated_at = CURRENT_TIMESTAMP
230
+ WHERE id = ? AND label = 'Memory'
231
+ """,
232
+ (properties_json, memory.id)
233
+ )
234
+ else:
235
+ # Insert new
236
+ self.backend.execute_sync(
237
+ """
238
+ INSERT INTO nodes (id, label, properties, created_at, updated_at)
239
+ VALUES (?, 'Memory', ?, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP)
240
+ """,
241
+ (memory.id, properties_json)
242
+ )
243
+
244
+ self.backend.commit()
245
+ logger.info(f"Stored memory: {memory.id} ({memory.type})")
246
+ return memory.id
247
+
248
+ except Exception as e:
249
+ self.backend.rollback()
250
+ if isinstance(e, (DatabaseConnectionError, ValidationError)):
251
+ raise
252
+ logger.error(f"Failed to store memory: {e}")
253
+ raise DatabaseConnectionError(f"Failed to store memory: {e}")
254
+
255
+ async def get_memory(self, memory_id: str, include_relationships: bool = True) -> Optional[Memory]:
256
+ """
257
+ Retrieve a memory by ID.
258
+
259
+ Args:
260
+ memory_id: ID of the memory to retrieve
261
+ include_relationships: Whether to include relationships (not currently used)
262
+
263
+ Returns:
264
+ Memory object if found, None otherwise
265
+
266
+ Raises:
267
+ DatabaseConnectionError: If query fails
268
+ """
269
+ try:
270
+ result = self.backend.execute_sync(
271
+ "SELECT properties FROM nodes WHERE id = ? AND label = 'Memory'",
272
+ (memory_id,)
273
+ )
274
+
275
+ if not result:
276
+ return None
277
+
278
+ properties_json = result[0]['properties']
279
+ properties = json.loads(properties_json)
280
+
281
+ return self._properties_to_memory(properties)
282
+
283
+ except Exception as e:
284
+ if isinstance(e, DatabaseConnectionError):
285
+ raise
286
+ logger.error(f"Failed to get memory {memory_id}: {e}")
287
+ raise DatabaseConnectionError(f"Failed to get memory: {e}")
288
+
289
+ async def search_memories(self, search_query: SearchQuery) -> List[Memory]:
290
+ """
291
+ Search for memories based on query parameters.
292
+
293
+ Args:
294
+ search_query: SearchQuery object with filter criteria
295
+
296
+ Returns:
297
+ List of Memory objects matching the search criteria
298
+
299
+ Raises:
300
+ DatabaseConnectionError: If search fails
301
+ """
302
+ try:
303
+ # Build SQL WHERE conditions
304
+ where_conditions = ["label = 'Memory'"]
305
+ params = []
306
+
307
+ # Multi-term search (takes precedence over single query)
308
+ if search_query.terms:
309
+ tolerance = search_query.search_tolerance or "normal"
310
+ match_mode = search_query.match_mode or "any"
311
+
312
+ term_conditions = []
313
+ for term in search_query.terms:
314
+ if tolerance == "strict":
315
+ # Strict mode: exact substring match only
316
+ pattern = f"%{term.lower()}%"
317
+ term_conditions.append(
318
+ "(json_extract(properties, '$.title') LIKE ? OR "
319
+ "json_extract(properties, '$.content') LIKE ? OR "
320
+ "json_extract(properties, '$.summary') LIKE ?)"
321
+ )
322
+ params.extend([pattern, pattern, pattern])
323
+ else:
324
+ # Normal/fuzzy mode: use stemming
325
+ patterns = _generate_fuzzy_patterns(term)
326
+ pattern_conditions = []
327
+ for pattern, weight in patterns:
328
+ pattern_conditions.append(
329
+ "(json_extract(properties, '$.title') LIKE ? OR "
330
+ "json_extract(properties, '$.content') LIKE ? OR "
331
+ "json_extract(properties, '$.summary') LIKE ?)"
332
+ )
333
+ params.extend([pattern, pattern, pattern])
334
+ if pattern_conditions:
335
+ term_conditions.append(f"({' OR '.join(pattern_conditions)})")
336
+
337
+ # Combine term conditions based on match_mode
338
+ if term_conditions:
339
+ if match_mode == "all":
340
+ # AND: all terms must match
341
+ where_conditions.append(f"({' AND '.join(term_conditions)})")
342
+ else:
343
+ # OR: any term matches (default)
344
+ where_conditions.append(f"({' OR '.join(term_conditions)})")
345
+
346
+ # Text search with tolerance-based matching (single query)
347
+ elif search_query.query:
348
+ tolerance = search_query.search_tolerance or "normal"
349
+
350
+ if tolerance == "strict":
351
+ # Strict mode: exact substring match only (no stemming)
352
+ pattern = f"%{search_query.query.lower()}%"
353
+ pattern_conditions = [
354
+ "(json_extract(properties, '$.title') LIKE ? OR "
355
+ "json_extract(properties, '$.content') LIKE ? OR "
356
+ "json_extract(properties, '$.summary') LIKE ?)"
357
+ ]
358
+ params.extend([pattern, pattern, pattern])
359
+ where_conditions.append(f"({' OR '.join(pattern_conditions)})")
360
+
361
+ elif tolerance == "fuzzy":
362
+ # Fuzzy mode: use same as normal for now (future: trigram similarity)
363
+ # Generate fuzzy patterns (exact match + stemmed variations)
364
+ patterns = _generate_fuzzy_patterns(search_query.query)
365
+
366
+ # Build OR condition for all patterns across all text fields
367
+ pattern_conditions = []
368
+ for pattern, weight in patterns:
369
+ # Each pattern matches against title, content, or summary
370
+ pattern_conditions.append(
371
+ "(json_extract(properties, '$.title') LIKE ? OR "
372
+ "json_extract(properties, '$.content') LIKE ? OR "
373
+ "json_extract(properties, '$.summary') LIKE ?)"
374
+ )
375
+ # Add pattern three times (once for each field)
376
+ params.extend([pattern, pattern, pattern])
377
+
378
+ # Combine all pattern conditions with OR
379
+ if pattern_conditions:
380
+ where_conditions.append(f"({' OR '.join(pattern_conditions)})")
381
+
382
+ else: # tolerance == "normal" (default)
383
+ # Normal mode: fuzzy matching with stemming
384
+ patterns = _generate_fuzzy_patterns(search_query.query)
385
+
386
+ # Build OR condition for all patterns across all text fields
387
+ pattern_conditions = []
388
+ for pattern, weight in patterns:
389
+ # Each pattern matches against title, content, or summary
390
+ pattern_conditions.append(
391
+ "(json_extract(properties, '$.title') LIKE ? OR "
392
+ "json_extract(properties, '$.content') LIKE ? OR "
393
+ "json_extract(properties, '$.summary') LIKE ?)"
394
+ )
395
+ # Add pattern three times (once for each field)
396
+ params.extend([pattern, pattern, pattern])
397
+
398
+ # Combine all pattern conditions with OR
399
+ if pattern_conditions:
400
+ where_conditions.append(f"({' OR '.join(pattern_conditions)})")
401
+
402
+ # Memory type filter
403
+ if search_query.memory_types:
404
+ type_placeholders = ','.join('?' * len(search_query.memory_types))
405
+ where_conditions.append(f"json_extract(properties, '$.type') IN ({type_placeholders})")
406
+ params.extend([t.value for t in search_query.memory_types])
407
+
408
+ # Tags filter (check if any tag matches)
409
+ if search_query.tags:
410
+ # For SQLite, we need to check JSON array
411
+ tag_conditions = []
412
+ for tag in search_query.tags:
413
+ tag_conditions.append("json_extract(properties, '$.tags') LIKE ?")
414
+ params.append(f'%"{tag}"%')
415
+ where_conditions.append(f"({' OR '.join(tag_conditions)})")
416
+
417
+ # Project path filter
418
+ if search_query.project_path:
419
+ where_conditions.append("json_extract(properties, '$.context_project_path') = ?")
420
+ params.append(search_query.project_path)
421
+
422
+ # Importance filter
423
+ if search_query.min_importance is not None:
424
+ where_conditions.append("CAST(json_extract(properties, '$.importance') AS REAL) >= ?")
425
+ params.append(search_query.min_importance)
426
+
427
+ # Confidence filter
428
+ if search_query.min_confidence is not None:
429
+ where_conditions.append("CAST(json_extract(properties, '$.confidence') AS REAL) >= ?")
430
+ params.append(search_query.min_confidence)
431
+
432
+ # Date filters
433
+ if search_query.created_after:
434
+ where_conditions.append("json_extract(properties, '$.created_at') >= ?")
435
+ params.append(search_query.created_after.isoformat())
436
+
437
+ if search_query.created_before:
438
+ where_conditions.append("json_extract(properties, '$.created_at') <= ?")
439
+ params.append(search_query.created_before.isoformat())
440
+
441
+ # Build complete query
442
+ where_clause = " AND ".join(where_conditions)
443
+ query = f"""
444
+ SELECT properties FROM nodes
445
+ WHERE {where_clause}
446
+ ORDER BY
447
+ CAST(json_extract(properties, '$.importance') AS REAL) DESC,
448
+ json_extract(properties, '$.created_at') DESC
449
+ LIMIT ?
450
+ """
451
+ params.append(search_query.limit)
452
+
453
+ result = self.backend.execute_sync(query, tuple(params))
454
+
455
+ memories = []
456
+ for row in result:
457
+ properties = json.loads(row['properties'])
458
+ memory = self._properties_to_memory(properties)
459
+ if memory:
460
+ memories.append(memory)
461
+
462
+ # Enrich results with relationships and match info if requested
463
+ if search_query.include_relationships:
464
+ # Use terms for enrichment if provided, otherwise use query
465
+ search_text = (search_query.terms[0] if search_query.terms
466
+ else search_query.query)
467
+ memories = await self._enrich_search_results(
468
+ memories,
469
+ search_text
470
+ )
471
+
472
+ # Apply relationship filter if specified
473
+ if search_query.relationship_filter:
474
+ filtered_memories = []
475
+ for memory in memories:
476
+ # Check if memory has any of the specified relationship types
477
+ if hasattr(memory, 'relationships') and memory.relationships:
478
+ # relationships is a dict like {"SOLVES": ["title1", "title2"], ...}
479
+ has_matching_relationship = any(
480
+ rel_type in search_query.relationship_filter
481
+ for rel_type in memory.relationships.keys()
482
+ )
483
+ if has_matching_relationship:
484
+ filtered_memories.append(memory)
485
+ else:
486
+ # If relationship_filter is specified but memory has no relationships,
487
+ # we need to query relationships manually
488
+ query_rels = """
489
+ SELECT type FROM relationships
490
+ WHERE from_memory_id = ? OR to_memory_id = ?
491
+ """
492
+ rel_result = self.backend.execute_sync(
493
+ query_rels,
494
+ (memory.id, memory.id)
495
+ )
496
+ rel_types = {row['type'] for row in rel_result}
497
+ if any(rel_type in search_query.relationship_filter for rel_type in rel_types):
498
+ filtered_memories.append(memory)
499
+ memories = filtered_memories
500
+
501
+ logger.info(f"Found {len(memories)} memories for search query")
502
+ return memories
503
+
504
+ except Exception as e:
505
+ if isinstance(e, DatabaseConnectionError):
506
+ raise
507
+ logger.error(f"Failed to search memories: {e}")
508
+ raise DatabaseConnectionError(f"Failed to search memories: {e}")
509
+
510
+ async def search_memories_paginated(self, search_query: SearchQuery) -> PaginatedResult:
511
+ """
512
+ Search for memories with pagination support.
513
+
514
+ Args:
515
+ search_query: SearchQuery object with filter criteria, limit, and offset
516
+
517
+ Returns:
518
+ PaginatedResult with memories and pagination metadata
519
+
520
+ Raises:
521
+ DatabaseConnectionError: If search fails
522
+ """
523
+ try:
524
+ # Build SQL WHERE conditions (same logic as search_memories)
525
+ where_conditions = ["label = 'Memory'"]
526
+ params = []
527
+
528
+ # Multi-term search (takes precedence over single query)
529
+ if search_query.terms:
530
+ tolerance = search_query.search_tolerance or "normal"
531
+ match_mode = search_query.match_mode or "any"
532
+
533
+ term_conditions = []
534
+ for term in search_query.terms:
535
+ if tolerance == "strict":
536
+ pattern = f"%{term.lower()}%"
537
+ term_conditions.append(
538
+ "(json_extract(properties, '$.title') LIKE ? OR "
539
+ "json_extract(properties, '$.content') LIKE ? OR "
540
+ "json_extract(properties, '$.summary') LIKE ?)"
541
+ )
542
+ params.extend([pattern, pattern, pattern])
543
+ else:
544
+ patterns = _generate_fuzzy_patterns(term)
545
+ pattern_conditions = []
546
+ for pattern, weight in patterns:
547
+ pattern_conditions.append(
548
+ "(json_extract(properties, '$.title') LIKE ? OR "
549
+ "json_extract(properties, '$.content') LIKE ? OR "
550
+ "json_extract(properties, '$.summary') LIKE ?)"
551
+ )
552
+ params.extend([pattern, pattern, pattern])
553
+ if pattern_conditions:
554
+ term_conditions.append(f"({' OR '.join(pattern_conditions)})")
555
+
556
+ if term_conditions:
557
+ if match_mode == "all":
558
+ where_conditions.append(f"({' AND '.join(term_conditions)})")
559
+ else:
560
+ where_conditions.append(f"({' OR '.join(term_conditions)})")
561
+
562
+ elif search_query.query:
563
+ tolerance = search_query.search_tolerance or "normal"
564
+
565
+ if tolerance == "strict":
566
+ pattern = f"%{search_query.query.lower()}%"
567
+ pattern_conditions = [
568
+ "(json_extract(properties, '$.title') LIKE ? OR "
569
+ "json_extract(properties, '$.content') LIKE ? OR "
570
+ "json_extract(properties, '$.summary') LIKE ?)"
571
+ ]
572
+ params.extend([pattern, pattern, pattern])
573
+ where_conditions.append(f"({' OR '.join(pattern_conditions)})")
574
+ else:
575
+ patterns = _generate_fuzzy_patterns(search_query.query)
576
+ pattern_conditions = []
577
+ for pattern, weight in patterns:
578
+ pattern_conditions.append(
579
+ "(json_extract(properties, '$.title') LIKE ? OR "
580
+ "json_extract(properties, '$.content') LIKE ? OR "
581
+ "json_extract(properties, '$.summary') LIKE ?)"
582
+ )
583
+ params.extend([pattern, pattern, pattern])
584
+ if pattern_conditions:
585
+ where_conditions.append(f"({' OR '.join(pattern_conditions)})")
586
+
587
+ # Memory type filter
588
+ if search_query.memory_types:
589
+ type_placeholders = ','.join('?' * len(search_query.memory_types))
590
+ where_conditions.append(f"json_extract(properties, '$.type') IN ({type_placeholders})")
591
+ params.extend([t.value for t in search_query.memory_types])
592
+
593
+ # Tags filter
594
+ if search_query.tags:
595
+ tag_conditions = []
596
+ for tag in search_query.tags:
597
+ tag_conditions.append("json_extract(properties, '$.tags') LIKE ?")
598
+ params.append(f'%"{tag}"%')
599
+ where_conditions.append(f"({' OR '.join(tag_conditions)})")
600
+
601
+ # Project path filter
602
+ if search_query.project_path:
603
+ where_conditions.append("json_extract(properties, '$.context_project_path') = ?")
604
+ params.append(search_query.project_path)
605
+
606
+ # Importance filter
607
+ if search_query.min_importance is not None:
608
+ where_conditions.append("CAST(json_extract(properties, '$.importance') AS REAL) >= ?")
609
+ params.append(search_query.min_importance)
610
+
611
+ # Confidence filter
612
+ if search_query.min_confidence is not None:
613
+ where_conditions.append("CAST(json_extract(properties, '$.confidence') AS REAL) >= ?")
614
+ params.append(search_query.min_confidence)
615
+
616
+ # Date filters
617
+ if search_query.created_after:
618
+ where_conditions.append("json_extract(properties, '$.created_at') >= ?")
619
+ params.append(search_query.created_after.isoformat())
620
+
621
+ if search_query.created_before:
622
+ where_conditions.append("json_extract(properties, '$.created_at') <= ?")
623
+ params.append(search_query.created_before.isoformat())
624
+
625
+ # Build where clause
626
+ where_clause = " AND ".join(where_conditions)
627
+
628
+ # First, get total count
629
+ count_query = f"SELECT COUNT(*) as total FROM nodes WHERE {where_clause}"
630
+ count_result = self.backend.execute_sync(count_query, tuple(params))
631
+ total_count = count_result[0]['total'] if count_result else 0
632
+
633
+ # Then get paginated results
634
+ results_query = f"""
635
+ SELECT properties FROM nodes
636
+ WHERE {where_clause}
637
+ ORDER BY
638
+ CAST(json_extract(properties, '$.importance') AS REAL) DESC,
639
+ json_extract(properties, '$.created_at') DESC
640
+ LIMIT ? OFFSET ?
641
+ """
642
+ results_params = params + [search_query.limit, search_query.offset]
643
+
644
+ result = self.backend.execute_sync(results_query, tuple(results_params))
645
+
646
+ memories = []
647
+ for row in result:
648
+ properties = json.loads(row['properties'])
649
+ memory = self._properties_to_memory(properties)
650
+ if memory:
651
+ memories.append(memory)
652
+
653
+ # Calculate pagination metadata
654
+ has_more = (search_query.offset + search_query.limit) < total_count
655
+ next_offset = (search_query.offset + search_query.limit) if has_more else None
656
+
657
+ logger.info(f"Found {len(memories)} memories (page {search_query.offset}-{search_query.offset + len(memories)} of {total_count})")
658
+
659
+ return PaginatedResult(
660
+ results=memories,
661
+ total_count=total_count,
662
+ limit=search_query.limit,
663
+ offset=search_query.offset,
664
+ has_more=has_more,
665
+ next_offset=next_offset
666
+ )
667
+
668
+ except Exception as e:
669
+ if isinstance(e, DatabaseConnectionError):
670
+ raise
671
+ logger.error(f"Failed to search memories (paginated): {e}")
672
+ raise DatabaseConnectionError(f"Failed to search memories (paginated): {e}")
673
+
674
+ async def _enrich_search_results(
675
+ self,
676
+ memories: List[Memory],
677
+ query: Optional[str] = None
678
+ ) -> List[Memory]:
679
+ """
680
+ Enrich search results with relationship context and match quality hints.
681
+
682
+ Args:
683
+ memories: List of memories to enrich
684
+ query: Original search query for match analysis
685
+
686
+ Returns:
687
+ List of enriched Memory objects with relationships, match_info, and context_summary
688
+ """
689
+ try:
690
+ enriched_memories = []
691
+
692
+ for memory in memories:
693
+ # Get immediate relationships for this memory
694
+ related = await self.get_related_memories(
695
+ memory.id,
696
+ relationship_types=None, # Get all types
697
+ max_depth=1 # Only immediate relationships
698
+ )
699
+
700
+ # Group relationships by type
701
+ relationships_by_type = {}
702
+ for related_memory, relationship in related:
703
+ rel_type_key = relationship.type.value.lower()
704
+
705
+ if rel_type_key not in relationships_by_type:
706
+ relationships_by_type[rel_type_key] = []
707
+
708
+ # Add related memory title to the list
709
+ relationships_by_type[rel_type_key].append(related_memory.title)
710
+
711
+ # Add match quality hints
712
+ match_info = self._generate_match_info(memory, query)
713
+
714
+ # Generate context summary
715
+ context_summary = self._generate_context_summary(
716
+ memory,
717
+ relationships_by_type
718
+ )
719
+
720
+ # Update memory with enriched data
721
+ memory.relationships = relationships_by_type if relationships_by_type else {}
722
+ memory.match_info = match_info
723
+ memory.context_summary = context_summary
724
+
725
+ enriched_memories.append(memory)
726
+
727
+ return enriched_memories
728
+
729
+ except Exception as e:
730
+ # If enrichment fails, log warning and return original memories
731
+ logger.warning(f"Failed to enrich search results: {e}")
732
+ return memories
733
+
734
+ def _generate_match_info(
735
+ self,
736
+ memory: Memory,
737
+ query: Optional[str] = None
738
+ ) -> Dict[str, Any]:
739
+ """
740
+ Generate match quality hints for a search result.
741
+
742
+ Args:
743
+ memory: Memory object
744
+ query: Search query string
745
+
746
+ Returns:
747
+ Dictionary with match information
748
+ """
749
+ matched_fields = []
750
+ matched_terms = []
751
+ match_quality = "low"
752
+
753
+ if query:
754
+ query_lower = query.lower()
755
+ query_terms = query_lower.split()
756
+
757
+ # Check which fields matched
758
+ if memory.title and query_lower in memory.title.lower():
759
+ matched_fields.append("title")
760
+ match_quality = "high" # Title matches are high quality
761
+
762
+ if memory.content and query_lower in memory.content.lower():
763
+ matched_fields.append("content")
764
+ if match_quality == "low":
765
+ match_quality = "medium"
766
+
767
+ if memory.summary and query_lower in memory.summary.lower():
768
+ matched_fields.append("summary")
769
+ if match_quality == "low":
770
+ match_quality = "medium"
771
+
772
+ # Check tags
773
+ for tag in memory.tags:
774
+ if any(term in tag.lower() for term in query_terms):
775
+ matched_fields.append("tags")
776
+ break
777
+
778
+ # Identify which terms matched
779
+ for term in query_terms:
780
+ term_found = False
781
+ if memory.title and term in memory.title.lower():
782
+ term_found = True
783
+ elif memory.content and term in memory.content.lower():
784
+ term_found = True
785
+ elif memory.summary and term in memory.summary.lower():
786
+ term_found = True
787
+
788
+ if term_found:
789
+ matched_terms.append(term)
790
+
791
+ return {
792
+ "matched_fields": matched_fields,
793
+ "matched_terms": matched_terms,
794
+ "match_quality": match_quality
795
+ }
796
+
797
+ def _generate_context_summary(
798
+ self,
799
+ memory: Memory,
800
+ relationships: Dict[str, List[str]]
801
+ ) -> str:
802
+ """
803
+ Generate a natural language context summary for a memory.
804
+
805
+ Args:
806
+ memory: Memory object
807
+ relationships: Dict of relationship types to related memory titles
808
+
809
+ Returns:
810
+ Concise natural language summary (<100 chars)
811
+ """
812
+ summary_parts = []
813
+
814
+ # Start with memory type
815
+ summary_parts.append(memory.type.value.replace('_', ' ').capitalize())
816
+
817
+ # Add key relationship information
818
+ if 'solves' in relationships and relationships['solves']:
819
+ problems = relationships['solves'][:2] # Limit to 2
820
+ summary_parts.append(f"solves {', '.join(problems)}")
821
+ elif 'solved_by' in relationships and relationships['solved_by']:
822
+ solutions = relationships['solved_by'][:1]
823
+ summary_parts.append(f"solved by {solutions[0]}")
824
+
825
+ if 'used_in' in relationships and relationships['used_in']:
826
+ projects = relationships['used_in'][:1]
827
+ summary_parts.append(f"in {projects[0]}")
828
+
829
+ # Join parts with appropriate separators
830
+ if len(summary_parts) == 1:
831
+ return summary_parts[0]
832
+ elif len(summary_parts) == 2:
833
+ return f"{summary_parts[0]} {summary_parts[1]}"
834
+ else:
835
+ return f"{summary_parts[0]} {summary_parts[1]}, {summary_parts[2]}"
836
+
837
+ async def update_memory(self, memory: Memory) -> bool:
838
+ """
839
+ Update an existing memory.
840
+
841
+ Args:
842
+ memory: Memory object with updated fields
843
+
844
+ Returns:
845
+ True if update succeeded, False otherwise
846
+
847
+ Raises:
848
+ ValidationError: If memory ID is missing
849
+ DatabaseConnectionError: If update fails
850
+ """
851
+ try:
852
+ if not memory.id:
853
+ raise ValidationError("Memory must have an ID to update")
854
+
855
+ memory.updated_at = datetime.now(timezone.utc)
856
+
857
+ # Convert memory to properties dict
858
+ memory_node = MemoryNode(memory=memory)
859
+ properties = memory_node.to_neo4j_properties()
860
+ properties_json = json.dumps(properties)
861
+
862
+ result = self.backend.execute_sync(
863
+ """
864
+ UPDATE nodes
865
+ SET properties = ?, updated_at = CURRENT_TIMESTAMP
866
+ WHERE id = ? AND label = 'Memory'
867
+ """,
868
+ (properties_json, memory.id)
869
+ )
870
+
871
+ self.backend.commit()
872
+
873
+ # Check if any rows were updated
874
+ # SQLite doesn't return affected rows in execute_sync result,
875
+ # so we need to check if the memory exists
876
+ check = self.backend.execute_sync(
877
+ "SELECT id FROM nodes WHERE id = ? AND label = 'Memory'",
878
+ (memory.id,)
879
+ )
880
+
881
+ success = len(check) > 0
882
+ if success:
883
+ logger.info(f"Updated memory: {memory.id}")
884
+
885
+ return success
886
+
887
+ except Exception as e:
888
+ self.backend.rollback()
889
+ if isinstance(e, (ValidationError, DatabaseConnectionError)):
890
+ raise
891
+ logger.error(f"Failed to update memory {memory.id}: {e}")
892
+ raise DatabaseConnectionError(f"Failed to update memory: {e}")
893
+
894
+ async def delete_memory(self, memory_id: str) -> bool:
895
+ """
896
+ Delete a memory and all its relationships.
897
+
898
+ Args:
899
+ memory_id: ID of the memory to delete
900
+
901
+ Returns:
902
+ True if deletion succeeded, False otherwise
903
+
904
+ Raises:
905
+ DatabaseConnectionError: If deletion fails
906
+ """
907
+ try:
908
+ # Check if memory exists
909
+ existing = self.backend.execute_sync(
910
+ "SELECT id FROM nodes WHERE id = ? AND label = 'Memory'",
911
+ (memory_id,)
912
+ )
913
+
914
+ if not existing:
915
+ return False
916
+
917
+ # Delete relationships (CASCADE should handle this, but let's be explicit)
918
+ self.backend.execute_sync(
919
+ "DELETE FROM relationships WHERE from_id = ? OR to_id = ?",
920
+ (memory_id, memory_id)
921
+ )
922
+
923
+ # Delete the memory node
924
+ self.backend.execute_sync(
925
+ "DELETE FROM nodes WHERE id = ? AND label = 'Memory'",
926
+ (memory_id,)
927
+ )
928
+
929
+ self.backend.commit()
930
+ logger.info(f"Deleted memory: {memory_id}")
931
+ return True
932
+
933
+ except Exception as e:
934
+ self.backend.rollback()
935
+ if isinstance(e, DatabaseConnectionError):
936
+ raise
937
+ logger.error(f"Failed to delete memory {memory_id}: {e}")
938
+ raise DatabaseConnectionError(f"Failed to delete memory: {e}")
939
+
940
+ async def create_relationship(
941
+ self,
942
+ from_memory_id: str,
943
+ to_memory_id: str,
944
+ relationship_type: RelationshipType,
945
+ properties: RelationshipProperties = None,
946
+ **kwargs
947
+ ) -> str:
948
+ """
949
+ Create a relationship between two memories with bi-temporal tracking.
950
+
951
+ Args:
952
+ from_memory_id: Source memory ID
953
+ to_memory_id: Target memory ID
954
+ relationship_type: Type of relationship
955
+ properties: Relationship properties (optional)
956
+ **kwargs: Additional parameters including:
957
+ - valid_from: When the fact became true (defaults to now)
958
+ - strength: Relationship strength (0.0-1.0)
959
+ - confidence: Confidence level (0.0-1.0)
960
+ - context: Optional context string
961
+
962
+ Returns:
963
+ ID of the created relationship
964
+
965
+ Raises:
966
+ RelationshipError: If relationship creation fails
967
+ DatabaseConnectionError: If database operation fails
968
+ """
969
+ try:
970
+ relationship_id = str(uuid.uuid4())
971
+
972
+ if properties is None:
973
+ properties = RelationshipProperties()
974
+
975
+ # Override property fields from kwargs if provided
976
+ if 'strength' in kwargs:
977
+ properties.strength = kwargs['strength']
978
+ if 'confidence' in kwargs:
979
+ properties.confidence = kwargs['confidence']
980
+ if 'context' in kwargs:
981
+ properties.context = kwargs['context']
982
+ if 'valid_from' in kwargs:
983
+ valid_from_value = kwargs['valid_from']
984
+ if not isinstance(valid_from_value, datetime):
985
+ raise ValidationError(
986
+ "valid_from must be a datetime object",
987
+ {"provided": type(valid_from_value).__name__}
988
+ )
989
+ if valid_from_value > datetime.now(timezone.utc):
990
+ logger.warning(f"valid_from is in the future: {valid_from_value.isoformat()}")
991
+ properties.valid_from = valid_from_value
992
+
993
+ # Convert properties to dict
994
+ props_dict = properties.model_dump()
995
+ props_dict['id'] = relationship_id
996
+ props_dict['created_at'] = props_dict['created_at'].isoformat()
997
+ props_dict['last_validated'] = props_dict['last_validated'].isoformat()
998
+
999
+ # Handle temporal fields
1000
+ props_dict['valid_from'] = props_dict['valid_from'].isoformat()
1001
+ props_dict['recorded_at'] = props_dict['recorded_at'].isoformat()
1002
+ if props_dict.get('valid_until'):
1003
+ props_dict['valid_until'] = props_dict['valid_until'].isoformat()
1004
+
1005
+ # Serialize properties as JSON
1006
+ properties_json = json.dumps(props_dict)
1007
+
1008
+ # Verify both memories exist
1009
+ from_exists = self.backend.execute_sync(
1010
+ "SELECT id FROM nodes WHERE id = ? AND label = 'Memory'",
1011
+ (from_memory_id,)
1012
+ )
1013
+ to_exists = self.backend.execute_sync(
1014
+ "SELECT id FROM nodes WHERE id = ? AND label = 'Memory'",
1015
+ (to_memory_id,)
1016
+ )
1017
+
1018
+ if not from_exists or not to_exists:
1019
+ raise RelationshipError(
1020
+ f"One or both memories not found: {from_memory_id}, {to_memory_id}",
1021
+ {"from_id": from_memory_id, "to_id": to_memory_id}
1022
+ )
1023
+
1024
+ # Check for cycles (unless explicitly allowed by configuration)
1025
+ if not Config.ALLOW_RELATIONSHIP_CYCLES:
1026
+ cycle_detected = await has_cycle(
1027
+ self,
1028
+ from_memory_id,
1029
+ to_memory_id,
1030
+ relationship_type
1031
+ )
1032
+ if cycle_detected:
1033
+ raise ValidationError(
1034
+ f"Cannot create relationship {from_memory_id} → {to_memory_id}: "
1035
+ f"Would create a cycle in the {relationship_type.value} relationship graph",
1036
+ {
1037
+ "from_id": from_memory_id,
1038
+ "to_id": to_memory_id,
1039
+ "relationship_type": relationship_type.value,
1040
+ "suggestion": "Check your relationship chain before creating, or enable cycles with MEMORY_ALLOW_CYCLES=true"
1041
+ }
1042
+ )
1043
+
1044
+ # Insert relationship with temporal fields
1045
+ self.backend.execute_sync(
1046
+ """
1047
+ INSERT INTO relationships (
1048
+ id, from_id, to_id, rel_type, properties, created_at,
1049
+ valid_from, valid_until, recorded_at, invalidated_by
1050
+ )
1051
+ VALUES (?, ?, ?, ?, ?, CURRENT_TIMESTAMP, ?, ?, ?, ?)
1052
+ """,
1053
+ (
1054
+ relationship_id, from_memory_id, to_memory_id,
1055
+ relationship_type.value, properties_json,
1056
+ props_dict['valid_from'],
1057
+ props_dict.get('valid_until'),
1058
+ props_dict['recorded_at'],
1059
+ props_dict.get('invalidated_by')
1060
+ )
1061
+ )
1062
+
1063
+ self.backend.commit()
1064
+ logger.info(f"Created relationship: {relationship_type.value} between {from_memory_id} and {to_memory_id}")
1065
+ return relationship_id
1066
+
1067
+ except Exception as e:
1068
+ self.backend.rollback()
1069
+ if isinstance(e, (RelationshipError, DatabaseConnectionError, ValidationError)):
1070
+ raise
1071
+ logger.error(f"Failed to create relationship: {e}")
1072
+ raise RelationshipError(f"Failed to create relationship: {e}")
1073
+
1074
+ async def get_related_memories(
1075
+ self,
1076
+ memory_id: str,
1077
+ relationship_types: List[RelationshipType] = None,
1078
+ max_depth: int = 2,
1079
+ as_of: datetime = None
1080
+ ) -> List[Tuple[Memory, Relationship]]:
1081
+ """
1082
+ Get memories related to a specific memory, with optional point-in-time query.
1083
+
1084
+ Args:
1085
+ memory_id: ID of the memory to find relations for
1086
+ relationship_types: Filter by specific relationship types (optional)
1087
+ max_depth: Maximum depth for graph traversal (currently only supports depth 1)
1088
+ as_of: Optional datetime for point-in-time query (defaults to current time)
1089
+
1090
+ Returns:
1091
+ List of tuples containing (Memory, Relationship)
1092
+
1093
+ Raises:
1094
+ DatabaseConnectionError: If query fails
1095
+ """
1096
+ try:
1097
+ # Build relationship type filter
1098
+ where_conditions = ["(r.from_id = ? OR r.to_id = ?)"]
1099
+ params = [memory_id, memory_id]
1100
+
1101
+ # Add temporal filter for current or point-in-time query
1102
+ if as_of is None:
1103
+ # Default: only current relationships (valid_until IS NULL)
1104
+ where_conditions.append("r.valid_until IS NULL")
1105
+ else:
1106
+ # Point-in-time query
1107
+ where_conditions.append("r.valid_from <= ?")
1108
+ where_conditions.append("(r.valid_until IS NULL OR r.valid_until > ?)")
1109
+ as_of_str = as_of.isoformat()
1110
+ params.extend([as_of_str, as_of_str])
1111
+
1112
+ if relationship_types:
1113
+ type_placeholders = ','.join('?' * len(relationship_types))
1114
+ where_conditions.append(f"r.rel_type IN ({type_placeholders})")
1115
+ params.extend([rt.value for rt in relationship_types])
1116
+
1117
+ where_clause = " AND ".join(where_conditions)
1118
+
1119
+ # Query for relationships and related nodes
1120
+ # For simplicity, we only do depth 1 (direct relationships)
1121
+ query = f"""
1122
+ SELECT
1123
+ n.id as related_id,
1124
+ n.properties as related_props,
1125
+ r.id as rel_id,
1126
+ r.rel_type as rel_type,
1127
+ r.properties as rel_props,
1128
+ r.from_id as rel_from,
1129
+ r.to_id as rel_to
1130
+ FROM relationships r
1131
+ JOIN nodes n ON (
1132
+ CASE
1133
+ WHEN r.from_id = ? THEN n.id = r.to_id
1134
+ WHEN r.to_id = ? THEN n.id = r.from_id
1135
+ END
1136
+ )
1137
+ WHERE {where_clause}
1138
+ AND n.label = 'Memory'
1139
+ AND n.id != ?
1140
+ ORDER BY
1141
+ CAST(json_extract(r.properties, '$.strength') AS REAL) DESC,
1142
+ CAST(json_extract(n.properties, '$.importance') AS REAL) DESC
1143
+ LIMIT 20
1144
+ """
1145
+
1146
+ # Add memory_id params for the JOIN conditions and final filter
1147
+ query_params = [memory_id, memory_id] + params + [memory_id]
1148
+
1149
+ result = self.backend.execute_sync(query, tuple(query_params))
1150
+
1151
+ related_memories = []
1152
+ for row in result:
1153
+ # Parse related memory
1154
+ related_props = json.loads(row['related_props'])
1155
+ memory = self._properties_to_memory(related_props)
1156
+
1157
+ if memory:
1158
+ # Parse relationship properties
1159
+ rel_props = json.loads(row['rel_props'])
1160
+ rel_type_str = row['rel_type']
1161
+
1162
+ try:
1163
+ rel_type = RelationshipType(rel_type_str)
1164
+ except ValueError:
1165
+ rel_type = RelationshipType.RELATED_TO
1166
+
1167
+ relationship = Relationship(
1168
+ id=row['rel_id'],
1169
+ from_memory_id=row['rel_from'],
1170
+ to_memory_id=row['rel_to'],
1171
+ type=rel_type,
1172
+ properties=RelationshipProperties(
1173
+ strength=rel_props.get("strength", 0.5),
1174
+ confidence=rel_props.get("confidence", 0.8),
1175
+ context=rel_props.get("context"),
1176
+ evidence_count=rel_props.get("evidence_count", 1)
1177
+ )
1178
+ )
1179
+ related_memories.append((memory, relationship))
1180
+
1181
+ logger.info(f"Found {len(related_memories)} related memories for {memory_id}")
1182
+ return related_memories
1183
+
1184
+ except Exception as e:
1185
+ if isinstance(e, DatabaseConnectionError):
1186
+ raise
1187
+ logger.error(f"Failed to get related memories for {memory_id}: {e}")
1188
+ raise DatabaseConnectionError(f"Failed to get related memories: {e}")
1189
+
1190
+ async def invalidate_relationship(
1191
+ self,
1192
+ relationship_id: str,
1193
+ invalidated_by: str = None
1194
+ ) -> None:
1195
+ """
1196
+ Invalidate a relationship by setting valid_until to now.
1197
+
1198
+ Args:
1199
+ relationship_id: ID of the relationship to invalidate
1200
+ invalidated_by: Optional ID of relationship that supersedes this one
1201
+
1202
+ Raises:
1203
+ RelationshipError: If relationship not found
1204
+ DatabaseConnectionError: If database operation fails
1205
+ """
1206
+ try:
1207
+ # Check if relationship exists
1208
+ result = self.backend.execute_sync(
1209
+ "SELECT id FROM relationships WHERE id = ?",
1210
+ (relationship_id,)
1211
+ )
1212
+
1213
+ if not result:
1214
+ raise RelationshipError(
1215
+ f"Relationship not found: {relationship_id}",
1216
+ {"relationship_id": relationship_id}
1217
+ )
1218
+
1219
+ # Set valid_until to now
1220
+ now = datetime.now(timezone.utc).isoformat()
1221
+ self.backend.execute_sync(
1222
+ """
1223
+ UPDATE relationships
1224
+ SET valid_until = ?, invalidated_by = ?
1225
+ WHERE id = ?
1226
+ """,
1227
+ (now, invalidated_by, relationship_id)
1228
+ )
1229
+
1230
+ self.backend.commit()
1231
+ logger.info(f"Invalidated relationship: {relationship_id}")
1232
+
1233
+ except Exception as e:
1234
+ self.backend.rollback()
1235
+ if isinstance(e, (RelationshipError, DatabaseConnectionError)):
1236
+ raise
1237
+ logger.error(f"Failed to invalidate relationship {relationship_id}: {e}")
1238
+ raise DatabaseConnectionError(f"Failed to invalidate relationship: {e}")
1239
+
1240
+ async def get_relationship_history(
1241
+ self,
1242
+ memory_id: str,
1243
+ relationship_types: List[RelationshipType] = None,
1244
+ limit: int = 100,
1245
+ offset: int = 0
1246
+ ) -> List[Relationship]:
1247
+ """
1248
+ Get full history of relationships for a memory, including invalidated ones.
1249
+
1250
+ Args:
1251
+ memory_id: ID of the memory to get history for
1252
+ relationship_types: Optional filter by relationship types
1253
+ limit: Maximum number of results to return (default: 100)
1254
+ offset: Number of results to skip for pagination (default: 0)
1255
+
1256
+ Returns:
1257
+ List of Relationship objects, ordered chronologically by valid_from
1258
+
1259
+ Raises:
1260
+ DatabaseConnectionError: If query fails
1261
+ """
1262
+ try:
1263
+ # Build query
1264
+ where_conditions = ["(r.from_id = ? OR r.to_id = ?)"]
1265
+ params = [memory_id, memory_id]
1266
+
1267
+ if relationship_types:
1268
+ type_placeholders = ','.join('?' * len(relationship_types))
1269
+ where_conditions.append(f"r.rel_type IN ({type_placeholders})")
1270
+ params.extend([rt.value for rt in relationship_types])
1271
+
1272
+ where_clause = " AND ".join(where_conditions)
1273
+
1274
+ # Query for all relationships (including invalidated ones)
1275
+ query = f"""
1276
+ SELECT
1277
+ r.id as rel_id,
1278
+ r.from_id as rel_from,
1279
+ r.to_id as rel_to,
1280
+ r.rel_type as rel_type,
1281
+ r.properties as rel_props,
1282
+ r.valid_from,
1283
+ r.valid_until,
1284
+ r.recorded_at,
1285
+ r.invalidated_by
1286
+ FROM relationships r
1287
+ WHERE {where_clause}
1288
+ ORDER BY r.valid_from ASC
1289
+ LIMIT ? OFFSET ?
1290
+ """
1291
+
1292
+ params_query = [memory_id, memory_id] + params[2:] + [limit, offset]
1293
+ result = self.backend.execute_sync(query, tuple(params_query))
1294
+
1295
+ relationships = []
1296
+ for row in result:
1297
+ rel_props = json.loads(row['rel_props'])
1298
+ rel_type_str = row['rel_type']
1299
+
1300
+ try:
1301
+ rel_type = RelationshipType(rel_type_str)
1302
+ except ValueError:
1303
+ rel_type = RelationshipType.RELATED_TO
1304
+
1305
+ # Parse temporal fields
1306
+ valid_from = datetime.fromisoformat(row['valid_from']) if row['valid_from'] else None
1307
+ valid_until = datetime.fromisoformat(row['valid_until']) if row['valid_until'] else None
1308
+ recorded_at = datetime.fromisoformat(row['recorded_at']) if row['recorded_at'] else None
1309
+
1310
+ relationship = Relationship(
1311
+ id=row['rel_id'],
1312
+ from_memory_id=row['rel_from'],
1313
+ to_memory_id=row['rel_to'],
1314
+ type=rel_type,
1315
+ properties=RelationshipProperties(
1316
+ strength=rel_props.get("strength", 0.5),
1317
+ confidence=rel_props.get("confidence", 0.8),
1318
+ context=rel_props.get("context"),
1319
+ evidence_count=rel_props.get("evidence_count", 1),
1320
+ valid_from=valid_from,
1321
+ valid_until=valid_until,
1322
+ recorded_at=recorded_at,
1323
+ invalidated_by=row['invalidated_by']
1324
+ )
1325
+ )
1326
+ relationships.append(relationship)
1327
+
1328
+ logger.info(f"Found {len(relationships)} relationships in history for {memory_id}")
1329
+ return relationships
1330
+
1331
+ except Exception as e:
1332
+ if isinstance(e, DatabaseConnectionError):
1333
+ raise
1334
+ logger.error(f"Failed to get relationship history for {memory_id}: {e}")
1335
+ raise DatabaseConnectionError(f"Failed to get relationship history: {e}")
1336
+
1337
+ async def what_changed(
1338
+ self,
1339
+ since: datetime
1340
+ ) -> Dict[str, List[Relationship]]:
1341
+ """
1342
+ Get all relationship changes since a given time.
1343
+
1344
+ Args:
1345
+ since: DateTime to query changes from
1346
+
1347
+ Returns:
1348
+ Dictionary with "new_relationships" and "invalidated_relationships" lists
1349
+
1350
+ Raises:
1351
+ DatabaseConnectionError: If query fails
1352
+ """
1353
+ try:
1354
+ since_str = since.isoformat()
1355
+
1356
+ # Query for new relationships (recorded_at >= since)
1357
+ new_query = """
1358
+ SELECT
1359
+ r.id as rel_id,
1360
+ r.from_id as rel_from,
1361
+ r.to_id as rel_to,
1362
+ r.rel_type as rel_type,
1363
+ r.properties as rel_props,
1364
+ r.valid_from,
1365
+ r.valid_until,
1366
+ r.recorded_at,
1367
+ r.invalidated_by
1368
+ FROM relationships r
1369
+ WHERE r.recorded_at >= ?
1370
+ ORDER BY r.recorded_at DESC
1371
+ """
1372
+
1373
+ new_result = self.backend.execute_sync(new_query, (since_str,))
1374
+
1375
+ # Query for invalidated relationships (valid_until set since)
1376
+ invalidated_query = """
1377
+ SELECT
1378
+ r.id as rel_id,
1379
+ r.from_id as rel_from,
1380
+ r.to_id as rel_to,
1381
+ r.rel_type as rel_type,
1382
+ r.properties as rel_props,
1383
+ r.valid_from,
1384
+ r.valid_until,
1385
+ r.recorded_at,
1386
+ r.invalidated_by
1387
+ FROM relationships r
1388
+ WHERE r.valid_until IS NOT NULL AND r.valid_until >= ?
1389
+ ORDER BY r.valid_until DESC
1390
+ """
1391
+
1392
+ invalidated_result = self.backend.execute_sync(invalidated_query, (since_str,))
1393
+
1394
+ # Parse results
1395
+ new_relationships = []
1396
+ for row in new_result:
1397
+ rel_props = json.loads(row['rel_props'])
1398
+ rel_type_str = row['rel_type']
1399
+
1400
+ try:
1401
+ rel_type = RelationshipType(rel_type_str)
1402
+ except ValueError:
1403
+ rel_type = RelationshipType.RELATED_TO
1404
+
1405
+ valid_from = datetime.fromisoformat(row['valid_from']) if row['valid_from'] else None
1406
+ valid_until = datetime.fromisoformat(row['valid_until']) if row['valid_until'] else None
1407
+ recorded_at = datetime.fromisoformat(row['recorded_at']) if row['recorded_at'] else None
1408
+
1409
+ relationship = Relationship(
1410
+ id=row['rel_id'],
1411
+ from_memory_id=row['rel_from'],
1412
+ to_memory_id=row['rel_to'],
1413
+ type=rel_type,
1414
+ properties=RelationshipProperties(
1415
+ strength=rel_props.get("strength", 0.5),
1416
+ confidence=rel_props.get("confidence", 0.8),
1417
+ context=rel_props.get("context"),
1418
+ valid_from=valid_from,
1419
+ valid_until=valid_until,
1420
+ recorded_at=recorded_at,
1421
+ invalidated_by=row['invalidated_by']
1422
+ )
1423
+ )
1424
+ new_relationships.append(relationship)
1425
+
1426
+ invalidated_relationships = []
1427
+ for row in invalidated_result:
1428
+ rel_props = json.loads(row['rel_props'])
1429
+ rel_type_str = row['rel_type']
1430
+
1431
+ try:
1432
+ rel_type = RelationshipType(rel_type_str)
1433
+ except ValueError:
1434
+ rel_type = RelationshipType.RELATED_TO
1435
+
1436
+ valid_from = datetime.fromisoformat(row['valid_from']) if row['valid_from'] else None
1437
+ valid_until = datetime.fromisoformat(row['valid_until']) if row['valid_until'] else None
1438
+ recorded_at = datetime.fromisoformat(row['recorded_at']) if row['recorded_at'] else None
1439
+
1440
+ relationship = Relationship(
1441
+ id=row['rel_id'],
1442
+ from_memory_id=row['rel_from'],
1443
+ to_memory_id=row['rel_to'],
1444
+ type=rel_type,
1445
+ properties=RelationshipProperties(
1446
+ strength=rel_props.get("strength", 0.5),
1447
+ confidence=rel_props.get("confidence", 0.8),
1448
+ context=rel_props.get("context"),
1449
+ valid_from=valid_from,
1450
+ valid_until=valid_until,
1451
+ recorded_at=recorded_at,
1452
+ invalidated_by=row['invalidated_by']
1453
+ )
1454
+ )
1455
+ invalidated_relationships.append(relationship)
1456
+
1457
+ logger.info(f"Found {len(new_relationships)} new and {len(invalidated_relationships)} invalidated relationships since {since}")
1458
+ return {
1459
+ "new_relationships": new_relationships,
1460
+ "invalidated_relationships": invalidated_relationships
1461
+ }
1462
+
1463
+ except Exception as e:
1464
+ if isinstance(e, DatabaseConnectionError):
1465
+ raise
1466
+ logger.error(f"Failed to get changes since {since}: {e}")
1467
+ raise DatabaseConnectionError(f"Failed to get changes: {e}")
1468
+
1469
+ async def search_relationships_by_context(
1470
+ self,
1471
+ scope: Optional[str] = None,
1472
+ conditions: Optional[List[str]] = None,
1473
+ has_evidence: Optional[bool] = None,
1474
+ evidence: Optional[List[str]] = None,
1475
+ components: Optional[List[str]] = None,
1476
+ temporal: Optional[str] = None,
1477
+ limit: int = 20
1478
+ ) -> List[Relationship]:
1479
+ """
1480
+ Search relationships by structured context fields.
1481
+
1482
+ This method queries relationships based on their extracted context structure
1483
+ (scope, conditions, evidence, components, temporal). It parses the context
1484
+ JSON from each relationship and filters based on the provided criteria.
1485
+
1486
+ Args:
1487
+ scope: Filter by scope (partial, full, conditional)
1488
+ conditions: Filter by conditions (OR logic - matches any)
1489
+ has_evidence: Filter by presence/absence of evidence
1490
+ evidence: Filter by specific evidence mentions (OR logic - matches any)
1491
+ components: Filter by components mentioned (OR logic - matches any)
1492
+ temporal: Filter by temporal information
1493
+ limit: Maximum number of results to return (default: 20)
1494
+
1495
+ Returns:
1496
+ List of Relationship objects matching the criteria, ordered by strength
1497
+
1498
+ Raises:
1499
+ DatabaseConnectionError: If query fails
1500
+
1501
+ Examples:
1502
+ # Find all partial implementations
1503
+ await db.search_relationships_by_context(scope="partial")
1504
+
1505
+ # Find relationships verified by tests
1506
+ await db.search_relationships_by_context(has_evidence=True)
1507
+
1508
+ # Find production-only relationships
1509
+ await db.search_relationships_by_context(conditions=["production"])
1510
+
1511
+ # Combined filters: partial scope AND production condition
1512
+ await db.search_relationships_by_context(
1513
+ scope="partial",
1514
+ conditions=["production"]
1515
+ )
1516
+ """
1517
+ from .utils.context_extractor import parse_context
1518
+
1519
+ try:
1520
+ # Get all relationships
1521
+ query = """
1522
+ SELECT
1523
+ r.id as rel_id,
1524
+ r.from_id as rel_from,
1525
+ r.to_id as rel_to,
1526
+ r.rel_type as rel_type,
1527
+ r.properties as rel_props
1528
+ FROM relationships r
1529
+ """
1530
+
1531
+ result = self.backend.execute_sync(query)
1532
+
1533
+ # Filter relationships in Python by parsing context
1534
+ matching_relationships = []
1535
+
1536
+ for row in result:
1537
+ # Parse relationship properties
1538
+ rel_props = json.loads(row['rel_props'])
1539
+ context_text = rel_props.get("context")
1540
+
1541
+ # Parse context to get structure
1542
+ context_struct = parse_context(context_text)
1543
+
1544
+ # Apply filters
1545
+ matches = True
1546
+
1547
+ # Filter by scope
1548
+ if scope is not None:
1549
+ if context_struct.get("scope") != scope:
1550
+ matches = False
1551
+
1552
+ # Filter by conditions (OR logic - match any)
1553
+ if conditions is not None and matches:
1554
+ if not context_struct.get("conditions"):
1555
+ matches = False
1556
+ else:
1557
+ # Check if any provided condition matches any extracted condition
1558
+ extracted_conditions = context_struct.get("conditions", [])
1559
+ condition_match = any(
1560
+ any(cond.lower() in extracted.lower() for extracted in extracted_conditions)
1561
+ for cond in conditions
1562
+ )
1563
+ if not condition_match:
1564
+ matches = False
1565
+
1566
+ # Filter by evidence presence
1567
+ if has_evidence is not None and matches:
1568
+ has_extracted_evidence = bool(context_struct.get("evidence"))
1569
+ if has_evidence != has_extracted_evidence:
1570
+ matches = False
1571
+
1572
+ # Filter by specific evidence (OR logic - match any)
1573
+ if evidence is not None and matches:
1574
+ if not context_struct.get("evidence"):
1575
+ matches = False
1576
+ else:
1577
+ # Check if any provided evidence matches any extracted evidence
1578
+ extracted_evidence = context_struct.get("evidence", [])
1579
+ evidence_match = any(
1580
+ any(ev.lower() in extracted.lower() for extracted in extracted_evidence)
1581
+ for ev in evidence
1582
+ )
1583
+ if not evidence_match:
1584
+ matches = False
1585
+
1586
+ # Filter by components (OR logic - match any)
1587
+ if components is not None and matches:
1588
+ if not context_struct.get("components"):
1589
+ matches = False
1590
+ else:
1591
+ # Check if any provided component matches any extracted component
1592
+ extracted_components = context_struct.get("components", [])
1593
+ component_match = any(
1594
+ any(comp.lower() in extracted.lower() for extracted in extracted_components)
1595
+ for comp in components
1596
+ )
1597
+ if not component_match:
1598
+ matches = False
1599
+
1600
+ # Filter by temporal
1601
+ if temporal is not None and matches:
1602
+ extracted_temporal = context_struct.get("temporal")
1603
+ if not extracted_temporal or temporal.lower() not in extracted_temporal.lower():
1604
+ matches = False
1605
+
1606
+ # If all filters match, add to results
1607
+ if matches:
1608
+ try:
1609
+ rel_type = RelationshipType(row['rel_type'])
1610
+ except ValueError:
1611
+ rel_type = RelationshipType.RELATED_TO
1612
+
1613
+ relationship = Relationship(
1614
+ id=row['rel_id'],
1615
+ from_memory_id=row['rel_from'],
1616
+ to_memory_id=row['rel_to'],
1617
+ type=rel_type,
1618
+ properties=RelationshipProperties(
1619
+ strength=rel_props.get("strength", 0.5),
1620
+ confidence=rel_props.get("confidence", 0.8),
1621
+ context=rel_props.get("context"),
1622
+ evidence_count=rel_props.get("evidence_count", 1)
1623
+ )
1624
+ )
1625
+ matching_relationships.append(relationship)
1626
+
1627
+ # Sort by strength (descending) and limit
1628
+ matching_relationships.sort(key=lambda r: r.properties.strength, reverse=True)
1629
+ matching_relationships = matching_relationships[:limit]
1630
+
1631
+ logger.info(f"Found {len(matching_relationships)} relationships matching context filters")
1632
+ return matching_relationships
1633
+
1634
+ except Exception as e:
1635
+ if isinstance(e, DatabaseConnectionError):
1636
+ raise
1637
+ logger.error(f"Failed to search relationships by context: {e}")
1638
+ raise DatabaseConnectionError(f"Failed to search relationships by context: {e}")
1639
+
1640
+ async def get_memory_statistics(self) -> Dict[str, Any]:
1641
+ """
1642
+ Get database statistics and metrics.
1643
+
1644
+ Returns:
1645
+ Dictionary containing various database statistics
1646
+
1647
+ Raises:
1648
+ DatabaseConnectionError: If query fails
1649
+ """
1650
+ try:
1651
+ stats = {}
1652
+
1653
+ # Total memories
1654
+ result = self.backend.execute_sync(
1655
+ "SELECT COUNT(*) as count FROM nodes WHERE label = 'Memory'"
1656
+ )
1657
+ stats['total_memories'] = result[0] if result else {'count': 0}
1658
+
1659
+ # Memories by type
1660
+ result = self.backend.execute_sync(
1661
+ """
1662
+ SELECT
1663
+ json_extract(properties, '$.type') as type,
1664
+ COUNT(*) as count
1665
+ FROM nodes
1666
+ WHERE label = 'Memory'
1667
+ GROUP BY json_extract(properties, '$.type')
1668
+ ORDER BY count DESC
1669
+ """
1670
+ )
1671
+ stats['memories_by_type'] = {row['type']: row['count'] for row in result} if result else {}
1672
+
1673
+ # Total relationships
1674
+ result = self.backend.execute_sync(
1675
+ "SELECT COUNT(*) as count FROM relationships"
1676
+ )
1677
+ stats['total_relationships'] = result[0] if result else {'count': 0}
1678
+
1679
+ # Average importance
1680
+ result = self.backend.execute_sync(
1681
+ """
1682
+ SELECT AVG(CAST(json_extract(properties, '$.importance') AS REAL)) as avg_importance
1683
+ FROM nodes
1684
+ WHERE label = 'Memory'
1685
+ """
1686
+ )
1687
+ stats['avg_importance'] = result[0] if result else {'avg_importance': 0}
1688
+
1689
+ # Average confidence
1690
+ result = self.backend.execute_sync(
1691
+ """
1692
+ SELECT AVG(CAST(json_extract(properties, '$.confidence') AS REAL)) as avg_confidence
1693
+ FROM nodes
1694
+ WHERE label = 'Memory'
1695
+ """
1696
+ )
1697
+ stats['avg_confidence'] = result[0] if result else {'avg_confidence': 0}
1698
+
1699
+ return stats
1700
+
1701
+ except Exception as e:
1702
+ logger.error(f"Failed to get statistics: {e}")
1703
+ raise DatabaseConnectionError(f"Failed to get statistics: {e}")
1704
+
1705
+ async def get_recent_activity(
1706
+ self,
1707
+ days: int = 7,
1708
+ project: Optional[str] = None
1709
+ ) -> Dict[str, Any]:
1710
+ """
1711
+ Get recent activity summary for session briefing.
1712
+
1713
+ Args:
1714
+ days: Number of days to look back (default: 7)
1715
+ project: Optional project path filter
1716
+
1717
+ Returns:
1718
+ Dictionary containing:
1719
+ - total_count: Total number of memories in timeframe
1720
+ - memories_by_type: Count of memories grouped by type
1721
+ - recent_memories: List of recent memories (limited to 20)
1722
+ - unresolved_problems: List of problems with no SOLVES relationship
1723
+ - days: Number of days queried
1724
+ - project: Project filter applied (if any)
1725
+
1726
+ Raises:
1727
+ DatabaseConnectionError: If query fails
1728
+ """
1729
+ try:
1730
+ # Calculate cutoff date
1731
+ cutoff_date = datetime.now(timezone.utc) - timedelta(days=days)
1732
+ cutoff_iso = cutoff_date.isoformat()
1733
+
1734
+ # Build WHERE conditions
1735
+ where_conditions = [
1736
+ "label = 'Memory'",
1737
+ "json_extract(properties, '$.created_at') >= ?"
1738
+ ]
1739
+ params = [cutoff_iso]
1740
+
1741
+ # Add project filter if specified
1742
+ if project:
1743
+ where_conditions.append("json_extract(properties, '$.context_project_path') = ?")
1744
+ params.append(project)
1745
+
1746
+ where_clause = " AND ".join(where_conditions)
1747
+
1748
+ # Get total count
1749
+ count_query = f"SELECT COUNT(*) as count FROM nodes WHERE {where_clause}"
1750
+ count_result = self.backend.execute_sync(count_query, tuple(params))
1751
+ total_count = count_result[0]['count'] if count_result else 0
1752
+
1753
+ # Get memories by type
1754
+ type_query = f"""
1755
+ SELECT
1756
+ json_extract(properties, '$.type') as type,
1757
+ COUNT(*) as count
1758
+ FROM nodes
1759
+ WHERE {where_clause}
1760
+ GROUP BY json_extract(properties, '$.type')
1761
+ """
1762
+ type_result = self.backend.execute_sync(type_query, tuple(params))
1763
+ memories_by_type = {row['type']: row['count'] for row in type_result} if type_result else {}
1764
+
1765
+ # Get recent memories (limited to 20)
1766
+ recent_query = f"""
1767
+ SELECT properties
1768
+ FROM nodes
1769
+ WHERE {where_clause}
1770
+ ORDER BY json_extract(properties, '$.created_at') DESC
1771
+ LIMIT 20
1772
+ """
1773
+ recent_result = self.backend.execute_sync(recent_query, tuple(params))
1774
+
1775
+ recent_memories = []
1776
+ for row in recent_result:
1777
+ properties = json.loads(row['properties'])
1778
+ memory = self._properties_to_memory(properties)
1779
+ if memory:
1780
+ recent_memories.append(memory)
1781
+
1782
+ # Find unresolved problems (problems with no incoming SOLVES relationships)
1783
+ unresolved_query = f"""
1784
+ SELECT n.properties
1785
+ FROM nodes n
1786
+ WHERE {where_clause}
1787
+ AND json_extract(properties, '$.type') IN ('problem', 'error')
1788
+ AND NOT EXISTS (
1789
+ SELECT 1
1790
+ FROM relationships r
1791
+ WHERE r.to_id = n.id
1792
+ AND r.rel_type IN ('SOLVES', 'FIXES', 'ADDRESSES')
1793
+ )
1794
+ ORDER BY CAST(json_extract(properties, '$.importance') AS REAL) DESC
1795
+ LIMIT 10
1796
+ """
1797
+ unresolved_result = self.backend.execute_sync(unresolved_query, tuple(params))
1798
+
1799
+ unresolved_problems = []
1800
+ for row in unresolved_result:
1801
+ properties = json.loads(row['properties'])
1802
+ memory = self._properties_to_memory(properties)
1803
+ if memory:
1804
+ unresolved_problems.append(memory)
1805
+
1806
+ return {
1807
+ "total_count": total_count,
1808
+ "memories_by_type": memories_by_type,
1809
+ "recent_memories": recent_memories,
1810
+ "unresolved_problems": unresolved_problems,
1811
+ "days": days,
1812
+ "project": project
1813
+ }
1814
+
1815
+ except Exception as e:
1816
+ logger.error(f"Failed to get recent activity: {e}")
1817
+ raise DatabaseConnectionError(f"Failed to get recent activity: {e}")
1818
+
1819
+ def _properties_to_memory(self, properties: Dict[str, Any]) -> Optional[Memory]:
1820
+ """
1821
+ Convert properties dictionary to Memory object.
1822
+
1823
+ Args:
1824
+ properties: Dictionary of memory properties
1825
+
1826
+ Returns:
1827
+ Memory object or None if conversion fails
1828
+ """
1829
+ try:
1830
+ # Extract basic memory fields
1831
+ memory_data = {
1832
+ "id": properties.get("id"),
1833
+ "type": MemoryType(properties.get("type")),
1834
+ "title": properties.get("title"),
1835
+ "content": properties.get("content"),
1836
+ "summary": properties.get("summary"),
1837
+ "tags": properties.get("tags", []),
1838
+ "importance": properties.get("importance", 0.5),
1839
+ "confidence": properties.get("confidence", 0.8),
1840
+ "effectiveness": properties.get("effectiveness"),
1841
+ "usage_count": properties.get("usage_count", 0),
1842
+ "created_at": datetime.fromisoformat(properties.get("created_at")),
1843
+ "updated_at": datetime.fromisoformat(properties.get("updated_at")),
1844
+ }
1845
+
1846
+ # Handle optional last_accessed field
1847
+ if properties.get("last_accessed"):
1848
+ memory_data["last_accessed"] = datetime.fromisoformat(properties["last_accessed"])
1849
+
1850
+ # Extract context information
1851
+ context_data = {}
1852
+ for key, value in properties.items():
1853
+ if key.startswith("context_") and value is not None:
1854
+ context_key = key[8:] # Remove "context_" prefix
1855
+
1856
+ # Deserialize JSON strings back to Python objects
1857
+ if isinstance(value, str) and context_key in ["additional_metadata", "files_involved", "languages", "frameworks", "technologies"]:
1858
+ try:
1859
+ context_data[context_key] = json.loads(value)
1860
+ except json.JSONDecodeError:
1861
+ context_data[context_key] = value
1862
+ else:
1863
+ context_data[context_key] = value
1864
+
1865
+ if context_data:
1866
+ # Handle timestamp fields in context
1867
+ if "timestamp" in context_data and isinstance(context_data["timestamp"], str):
1868
+ context_data["timestamp"] = datetime.fromisoformat(context_data["timestamp"])
1869
+
1870
+ memory_data["context"] = MemoryContext(**context_data)
1871
+
1872
+ return Memory(**memory_data)
1873
+
1874
+ except Exception as e:
1875
+ logger.error(f"Failed to convert properties to Memory: {e}")
1876
+ return None