memorygraphMCP 0.11.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. memorygraph/__init__.py +50 -0
  2. memorygraph/__main__.py +12 -0
  3. memorygraph/advanced_tools.py +509 -0
  4. memorygraph/analytics/__init__.py +46 -0
  5. memorygraph/analytics/advanced_queries.py +727 -0
  6. memorygraph/backends/__init__.py +21 -0
  7. memorygraph/backends/base.py +179 -0
  8. memorygraph/backends/cloud.py +75 -0
  9. memorygraph/backends/cloud_backend.py +858 -0
  10. memorygraph/backends/factory.py +577 -0
  11. memorygraph/backends/falkordb_backend.py +749 -0
  12. memorygraph/backends/falkordblite_backend.py +746 -0
  13. memorygraph/backends/ladybugdb_backend.py +242 -0
  14. memorygraph/backends/memgraph_backend.py +327 -0
  15. memorygraph/backends/neo4j_backend.py +298 -0
  16. memorygraph/backends/sqlite_fallback.py +463 -0
  17. memorygraph/backends/turso.py +448 -0
  18. memorygraph/cli.py +743 -0
  19. memorygraph/cloud_database.py +297 -0
  20. memorygraph/config.py +295 -0
  21. memorygraph/database.py +933 -0
  22. memorygraph/graph_analytics.py +631 -0
  23. memorygraph/integration/__init__.py +69 -0
  24. memorygraph/integration/context_capture.py +426 -0
  25. memorygraph/integration/project_analysis.py +583 -0
  26. memorygraph/integration/workflow_tracking.py +492 -0
  27. memorygraph/intelligence/__init__.py +59 -0
  28. memorygraph/intelligence/context_retrieval.py +447 -0
  29. memorygraph/intelligence/entity_extraction.py +386 -0
  30. memorygraph/intelligence/pattern_recognition.py +420 -0
  31. memorygraph/intelligence/temporal.py +374 -0
  32. memorygraph/migration/__init__.py +27 -0
  33. memorygraph/migration/manager.py +579 -0
  34. memorygraph/migration/models.py +142 -0
  35. memorygraph/migration/scripts/__init__.py +17 -0
  36. memorygraph/migration/scripts/bitemporal_migration.py +595 -0
  37. memorygraph/migration/scripts/multitenancy_migration.py +452 -0
  38. memorygraph/migration_tools_module.py +146 -0
  39. memorygraph/models.py +684 -0
  40. memorygraph/proactive/__init__.py +46 -0
  41. memorygraph/proactive/outcome_learning.py +444 -0
  42. memorygraph/proactive/predictive.py +410 -0
  43. memorygraph/proactive/session_briefing.py +399 -0
  44. memorygraph/relationships.py +668 -0
  45. memorygraph/server.py +883 -0
  46. memorygraph/sqlite_database.py +1876 -0
  47. memorygraph/tools/__init__.py +59 -0
  48. memorygraph/tools/activity_tools.py +262 -0
  49. memorygraph/tools/memory_tools.py +315 -0
  50. memorygraph/tools/migration_tools.py +181 -0
  51. memorygraph/tools/relationship_tools.py +147 -0
  52. memorygraph/tools/search_tools.py +406 -0
  53. memorygraph/tools/temporal_tools.py +339 -0
  54. memorygraph/utils/__init__.py +10 -0
  55. memorygraph/utils/context_extractor.py +429 -0
  56. memorygraph/utils/error_handling.py +151 -0
  57. memorygraph/utils/export_import.py +425 -0
  58. memorygraph/utils/graph_algorithms.py +200 -0
  59. memorygraph/utils/pagination.py +149 -0
  60. memorygraph/utils/project_detection.py +133 -0
  61. memorygraphmcp-0.11.7.dist-info/METADATA +970 -0
  62. memorygraphmcp-0.11.7.dist-info/RECORD +65 -0
  63. memorygraphmcp-0.11.7.dist-info/WHEEL +4 -0
  64. memorygraphmcp-0.11.7.dist-info/entry_points.txt +2 -0
  65. memorygraphmcp-0.11.7.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,595 @@
1
+ """
2
+ 002_add_bitemporal - Migration to add bi-temporal tracking to relationships.
3
+
4
+ This migration:
5
+ 1. Adds temporal columns (valid_from, valid_until, recorded_at, invalidated_by)
6
+ 2. Sets default values for existing relationships
7
+ 3. Creates temporal indexes if not already present
8
+ 4. Supports rollback (WARNING: loses temporal data)
9
+
10
+ Usage:
11
+ from memorygraph.migration.scripts import migrate_to_bitemporal
12
+
13
+ # Migrate existing database
14
+ await migrate_to_bitemporal(backend)
15
+
16
+ # Rollback (WARNING: loses temporal data)
17
+ await rollback_from_bitemporal(backend)
18
+ """
19
+
20
+ import json
21
+ import logging
22
+ import sqlite3
23
+ from typing import Optional
24
+ from ...backends.base import GraphBackend
25
+ from ...backends.sqlite_fallback import SQLiteFallbackBackend
26
+ from ...models import DatabaseConnectionError
27
+
28
+ logger = logging.getLogger(__name__)
29
+
30
+
31
+ async def migrate_to_bitemporal(
32
+ backend: GraphBackend,
33
+ dry_run: bool = False
34
+ ) -> dict:
35
+ """
36
+ Migrate existing database to bi-temporal schema.
37
+
38
+ This function adds temporal fields to the relationships table and sets
39
+ sensible defaults for existing relationships:
40
+ - valid_from = created_at (when the fact became true)
41
+ - valid_until = NULL (still valid)
42
+ - recorded_at = created_at (when we learned it)
43
+ - invalidated_by = NULL (not superseded)
44
+
45
+ Args:
46
+ backend: Backend instance (must be connected)
47
+ dry_run: If True, only report what would be changed without making changes
48
+
49
+ Returns:
50
+ Dictionary with migration statistics:
51
+ {
52
+ "success": bool,
53
+ "dry_run": bool,
54
+ "relationships_updated": int,
55
+ "indexes_created": int,
56
+ "errors": list
57
+ }
58
+
59
+ Raises:
60
+ DatabaseConnectionError: If backend is not connected
61
+
62
+ Example:
63
+ >>> backend = SQLiteFallbackBackend()
64
+ >>> await backend.connect()
65
+ >>> result = await migrate_to_bitemporal(backend)
66
+ >>> print(f"Updated {result['relationships_updated']} relationships")
67
+ """
68
+ # Check backend connection (use _connected attribute for SQLite backends)
69
+ is_connected = getattr(backend, '_connected', False)
70
+ if not backend or not is_connected:
71
+ raise DatabaseConnectionError("Backend must be connected before migration")
72
+
73
+ logger.info(f"Starting bi-temporal migration (dry_run={dry_run})")
74
+
75
+ errors = []
76
+ relationships_updated = 0
77
+ indexes_created = 0
78
+
79
+ try:
80
+ # SQLite-based backends (SQLite, Turso) - use duck typing to avoid
81
+ # isinstance issues when modules are reloaded during testing.
82
+ # SQLite backends have a 'conn' attribute for the database connection.
83
+ if hasattr(backend, 'conn') and backend.conn is not None:
84
+ relationships_updated, indexes_created = await _migrate_sqlite_backend(
85
+ backend, dry_run
86
+ )
87
+
88
+ # Neo4j/Memgraph backends - use execute_query method
89
+ elif hasattr(backend, 'execute_query'):
90
+ relationships_updated, indexes_created = await _migrate_graph_backend(
91
+ backend, dry_run
92
+ )
93
+
94
+ else:
95
+ raise ValueError(f"Unsupported backend type: {type(backend).__name__}")
96
+
97
+ logger.info(
98
+ f"Migration completed: {relationships_updated} relationships updated, "
99
+ f"{indexes_created} indexes created"
100
+ )
101
+
102
+ return {
103
+ "success": True,
104
+ "dry_run": dry_run,
105
+ "relationships_updated": relationships_updated,
106
+ "indexes_created": indexes_created,
107
+ "errors": errors
108
+ }
109
+
110
+ except Exception as e:
111
+ logger.error(f"Migration failed: {e}")
112
+ errors.append(str(e))
113
+ return {
114
+ "success": False,
115
+ "dry_run": dry_run,
116
+ "relationships_updated": relationships_updated,
117
+ "indexes_created": indexes_created,
118
+ "errors": errors
119
+ }
120
+
121
+
122
+ async def _migrate_sqlite_backend(
123
+ backend: GraphBackend,
124
+ dry_run: bool
125
+ ) -> tuple[int, int]:
126
+ """
127
+ Migrate SQLite-based backend to bi-temporal schema.
128
+
129
+ Args:
130
+ backend: SQLite backend instance
131
+ dry_run: If True, only count without updating
132
+
133
+ Returns:
134
+ Tuple of (relationships_updated, indexes_created)
135
+ """
136
+ # Runtime check for conn attribute (duck typing for SQLite backends)
137
+ if not hasattr(backend, 'conn') or backend.conn is None:
138
+ raise ValueError("Backend must have a 'conn' attribute for SQLite operations")
139
+
140
+ cursor = backend.conn.cursor()
141
+
142
+ # Check if temporal columns already exist
143
+ cursor.execute("PRAGMA table_info(relationships)")
144
+ columns = {row[1] for row in cursor.fetchall()}
145
+
146
+ temporal_columns = {'valid_from', 'valid_until', 'recorded_at', 'invalidated_by'}
147
+ existing_temporal = temporal_columns & columns
148
+ missing_temporal = temporal_columns - columns
149
+
150
+ if not missing_temporal:
151
+ logger.info("Bi-temporal schema already exists, no migration needed")
152
+ return 0, 0
153
+
154
+ if existing_temporal and missing_temporal:
155
+ logger.warning(
156
+ f"Partial temporal schema detected. "
157
+ f"Existing: {existing_temporal}, Missing: {missing_temporal}"
158
+ )
159
+
160
+ # Count relationships that need migration
161
+ cursor.execute("SELECT COUNT(*) FROM relationships")
162
+ count = cursor.fetchone()[0]
163
+ logger.info(f"Found {count} relationships to migrate")
164
+
165
+ if dry_run:
166
+ logger.info(f"DRY RUN: Would update {count} relationships and create 3 indexes")
167
+ return count, 3
168
+
169
+ # Add temporal columns if missing
170
+ for column in missing_temporal:
171
+ try:
172
+ if column == 'valid_from':
173
+ cursor.execute("""
174
+ ALTER TABLE relationships
175
+ ADD COLUMN valid_from TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
176
+ """)
177
+ logger.info("Added valid_from column")
178
+
179
+ elif column == 'valid_until':
180
+ cursor.execute("""
181
+ ALTER TABLE relationships
182
+ ADD COLUMN valid_until TIMESTAMP
183
+ """)
184
+ logger.info("Added valid_until column")
185
+
186
+ elif column == 'recorded_at':
187
+ cursor.execute("""
188
+ ALTER TABLE relationships
189
+ ADD COLUMN recorded_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
190
+ """)
191
+ logger.info("Added recorded_at column")
192
+
193
+ elif column == 'invalidated_by':
194
+ cursor.execute("""
195
+ ALTER TABLE relationships
196
+ ADD COLUMN invalidated_by TEXT
197
+ """)
198
+ logger.info("Added invalidated_by column")
199
+
200
+ except sqlite3.Error as e:
201
+ # Column might already exist from a previous partial migration
202
+ logger.warning(f"Could not add column {column}: {e}")
203
+ except Exception as e:
204
+ # Unexpected error, re-raise
205
+ logger.error(f"Unexpected error adding column {column}: {e}")
206
+ raise
207
+
208
+ # Set defaults for existing relationships using created_at
209
+ # valid_from = created_at, recorded_at = created_at, valid_until = NULL
210
+ cursor.execute("""
211
+ UPDATE relationships
212
+ SET valid_from = COALESCE(valid_from, created_at, CURRENT_TIMESTAMP),
213
+ recorded_at = COALESCE(recorded_at, created_at, CURRENT_TIMESTAMP),
214
+ valid_until = NULL,
215
+ invalidated_by = NULL
216
+ WHERE valid_from IS NULL OR recorded_at IS NULL
217
+ """)
218
+
219
+ updated = cursor.rowcount
220
+ logger.info(f"Set temporal defaults for {updated} relationships")
221
+
222
+ # Create temporal indexes
223
+ indexes_created = 0
224
+
225
+ try:
226
+ cursor.execute("""
227
+ CREATE INDEX IF NOT EXISTS idx_relationships_temporal
228
+ ON relationships(valid_from, valid_until)
229
+ """)
230
+ indexes_created += 1
231
+ logger.info("Created idx_relationships_temporal")
232
+ except sqlite3.Error as e:
233
+ logger.warning(f"Could not create temporal index: {e}")
234
+
235
+ try:
236
+ cursor.execute("""
237
+ CREATE INDEX IF NOT EXISTS idx_relationships_current
238
+ ON relationships(valid_until)
239
+ WHERE valid_until IS NULL
240
+ """)
241
+ indexes_created += 1
242
+ logger.info("Created idx_relationships_current (partial index)")
243
+ except sqlite3.Error as e:
244
+ logger.warning(f"Could not create current index: {e}")
245
+
246
+ try:
247
+ cursor.execute("""
248
+ CREATE INDEX IF NOT EXISTS idx_relationships_recorded
249
+ ON relationships(recorded_at)
250
+ """)
251
+ indexes_created += 1
252
+ logger.info("Created idx_relationships_recorded")
253
+ except sqlite3.Error as e:
254
+ logger.warning(f"Could not create recorded index: {e}")
255
+
256
+ backend.conn.commit()
257
+ logger.info(
258
+ f"SQLite migration complete: {updated} relationships, "
259
+ f"{indexes_created} indexes"
260
+ )
261
+
262
+ return updated, indexes_created
263
+
264
+
265
+ async def _migrate_graph_backend(
266
+ backend: GraphBackend,
267
+ dry_run: bool
268
+ ) -> tuple[int, int]:
269
+ """
270
+ Migrate graph-based backend (Neo4j/Memgraph) to bi-temporal schema.
271
+
272
+ For Neo4j/Memgraph, temporal fields are properties on relationships.
273
+ We update all relationships to have the temporal properties.
274
+
275
+ Args:
276
+ backend: Graph backend instance
277
+ dry_run: If True, only count without updating
278
+
279
+ Returns:
280
+ Tuple of (relationships_updated, indexes_created)
281
+ """
282
+ # Count relationships without temporal properties
283
+ count_query = """
284
+ MATCH ()-[r]->()
285
+ WHERE r.valid_from IS NULL
286
+ RETURN count(r) as count
287
+ """
288
+
289
+ count_result = await backend.execute_query(count_query)
290
+ count = count_result[0]['count'] if count_result else 0
291
+
292
+ logger.info(f"Found {count} relationships without temporal properties")
293
+
294
+ if dry_run:
295
+ logger.info(f"DRY RUN: Would update {count} relationships and create 3 indexes")
296
+ return count, 3
297
+
298
+ # Update relationships with temporal properties
299
+ # Set valid_from = created_at (or now if created_at missing)
300
+ # Set recorded_at = created_at (or now if created_at missing)
301
+ # Set valid_until = NULL, invalidated_by = NULL
302
+ update_query = """
303
+ MATCH ()-[r]->()
304
+ WHERE r.valid_from IS NULL
305
+ SET r.valid_from = COALESCE(r.created_at, datetime()),
306
+ r.recorded_at = COALESCE(r.created_at, datetime()),
307
+ r.valid_until = NULL,
308
+ r.invalidated_by = NULL
309
+ RETURN count(r) as updated
310
+ """
311
+
312
+ result = await backend.execute_query(update_query, write=True)
313
+ updated = result[0]['updated'] if result else 0
314
+
315
+ logger.info(f"Updated {updated} relationships with temporal properties")
316
+
317
+ # Create indexes for temporal queries
318
+ indexes_created = 0
319
+
320
+ try:
321
+ # Index on valid_from for point-in-time queries
322
+ await backend.execute_query(
323
+ "CREATE INDEX rel_valid_from IF NOT EXISTS FOR ()-[r]-() ON (r.valid_from)",
324
+ write=True
325
+ )
326
+ indexes_created += 1
327
+ logger.info("Created index on valid_from")
328
+ except Exception as e:
329
+ logger.warning(f"Could not create valid_from index: {e}")
330
+
331
+ try:
332
+ # Index on valid_until for current relationship queries
333
+ await backend.execute_query(
334
+ "CREATE INDEX rel_valid_until IF NOT EXISTS FOR ()-[r]-() ON (r.valid_until)",
335
+ write=True
336
+ )
337
+ indexes_created += 1
338
+ logger.info("Created index on valid_until")
339
+ except Exception as e:
340
+ logger.warning(f"Could not create valid_until index: {e}")
341
+
342
+ try:
343
+ # Index on recorded_at for "what changed" queries
344
+ await backend.execute_query(
345
+ "CREATE INDEX rel_recorded_at IF NOT EXISTS FOR ()-[r]-() ON (r.recorded_at)",
346
+ write=True
347
+ )
348
+ indexes_created += 1
349
+ logger.info("Created index on recorded_at")
350
+ except Exception as e:
351
+ logger.warning(f"Could not create recorded_at index: {e}")
352
+
353
+ logger.info(
354
+ f"Graph migration complete: {updated} relationships, "
355
+ f"{indexes_created} indexes"
356
+ )
357
+
358
+ return updated, indexes_created
359
+
360
+
361
+ async def rollback_from_bitemporal(
362
+ backend: GraphBackend,
363
+ dry_run: bool = False
364
+ ) -> dict:
365
+ """
366
+ Rollback bi-temporal migration by removing temporal fields.
367
+
368
+ WARNING: This operation loses all temporal data (valid_from, valid_until,
369
+ recorded_at, invalidated_by). Use with caution!
370
+
371
+ Args:
372
+ backend: Backend instance (must be connected)
373
+ dry_run: If True, only report what would be changed
374
+
375
+ Returns:
376
+ Dictionary with rollback statistics
377
+
378
+ Example:
379
+ >>> result = await rollback_from_bitemporal(backend)
380
+ >>> print(f"Rolled back {result['relationships_updated']} relationships")
381
+ """
382
+ # Check backend connection (use _connected attribute for SQLite backends)
383
+ is_connected = getattr(backend, '_connected', False)
384
+ if not backend or not is_connected:
385
+ raise DatabaseConnectionError("Backend must be connected before rollback")
386
+
387
+ logger.warning("Starting bi-temporal rollback - THIS WILL LOSE TEMPORAL DATA")
388
+
389
+ errors = []
390
+ relationships_updated = 0
391
+ indexes_dropped = 0
392
+
393
+ try:
394
+ # SQLite-based backends - use duck typing (check for conn attribute)
395
+ if hasattr(backend, 'conn') and backend.conn is not None:
396
+ relationships_updated, indexes_dropped = await _rollback_sqlite_backend(
397
+ backend, dry_run
398
+ )
399
+
400
+ # Graph backends - use execute_query method
401
+ elif hasattr(backend, 'execute_query'):
402
+ relationships_updated, indexes_dropped = await _rollback_graph_backend(
403
+ backend, dry_run
404
+ )
405
+
406
+ else:
407
+ raise ValueError(f"Unsupported backend type: {type(backend).__name__}")
408
+
409
+ logger.info(
410
+ f"Rollback completed: {relationships_updated} relationships updated, "
411
+ f"{indexes_dropped} indexes dropped"
412
+ )
413
+
414
+ return {
415
+ "success": True,
416
+ "dry_run": dry_run,
417
+ "relationships_updated": relationships_updated,
418
+ "indexes_dropped": indexes_dropped,
419
+ "errors": errors
420
+ }
421
+
422
+ except Exception as e:
423
+ logger.error(f"Rollback failed: {e}")
424
+ errors.append(str(e))
425
+ return {
426
+ "success": False,
427
+ "dry_run": dry_run,
428
+ "relationships_updated": relationships_updated,
429
+ "indexes_dropped": indexes_dropped,
430
+ "errors": errors
431
+ }
432
+
433
+
434
+ async def _rollback_sqlite_backend(
435
+ backend: GraphBackend,
436
+ dry_run: bool
437
+ ) -> tuple[int, int]:
438
+ """
439
+ Rollback SQLite backend from bi-temporal schema.
440
+
441
+ NOTE: SQLite does not support DROP COLUMN easily, so we:
442
+ 1. Drop temporal indexes
443
+ 2. Set temporal columns to NULL (preserves schema but clears data)
444
+
445
+ Args:
446
+ backend: SQLite backend instance
447
+ dry_run: If True, only count without updating
448
+
449
+ Returns:
450
+ Tuple of (relationships_updated, indexes_dropped)
451
+ """
452
+ # Runtime check for conn attribute (duck typing for SQLite backends)
453
+ if not hasattr(backend, 'conn') or backend.conn is None:
454
+ raise ValueError("Backend must have a 'conn' attribute for SQLite operations")
455
+
456
+ cursor = backend.conn.cursor()
457
+
458
+ # Count relationships with temporal data
459
+ cursor.execute("""
460
+ SELECT COUNT(*) FROM relationships
461
+ WHERE valid_from IS NOT NULL OR recorded_at IS NOT NULL
462
+ """)
463
+
464
+ count = cursor.fetchone()[0]
465
+ logger.info(f"Found {count} relationships with temporal data")
466
+
467
+ if dry_run:
468
+ logger.info(
469
+ f"DRY RUN: Would clear temporal data from {count} relationships "
470
+ f"and drop 3 indexes"
471
+ )
472
+ return count, 3
473
+
474
+ # Clear temporal data (set to NULL)
475
+ cursor.execute("""
476
+ UPDATE relationships
477
+ SET valid_from = NULL,
478
+ valid_until = NULL,
479
+ recorded_at = NULL,
480
+ invalidated_by = NULL
481
+ WHERE valid_from IS NOT NULL OR recorded_at IS NOT NULL
482
+ """)
483
+
484
+ updated = cursor.rowcount
485
+ logger.info(f"Cleared temporal data from {updated} relationships")
486
+
487
+ # Drop temporal indexes
488
+ indexes_dropped = 0
489
+
490
+ try:
491
+ cursor.execute("DROP INDEX IF EXISTS idx_relationships_temporal")
492
+ indexes_dropped += 1
493
+ logger.info("Dropped idx_relationships_temporal")
494
+ except sqlite3.Error as e:
495
+ logger.warning(f"Could not drop temporal index: {e}")
496
+
497
+ try:
498
+ cursor.execute("DROP INDEX IF EXISTS idx_relationships_current")
499
+ indexes_dropped += 1
500
+ logger.info("Dropped idx_relationships_current")
501
+ except sqlite3.Error as e:
502
+ logger.warning(f"Could not drop current index: {e}")
503
+
504
+ try:
505
+ cursor.execute("DROP INDEX IF EXISTS idx_relationships_recorded")
506
+ indexes_dropped += 1
507
+ logger.info("Dropped idx_relationships_recorded")
508
+ except sqlite3.Error as e:
509
+ logger.warning(f"Could not drop recorded index: {e}")
510
+
511
+ backend.conn.commit()
512
+ logger.info(
513
+ f"SQLite rollback complete: {updated} relationships, "
514
+ f"{indexes_dropped} indexes dropped"
515
+ )
516
+
517
+ return updated, indexes_dropped
518
+
519
+
520
+ async def _rollback_graph_backend(
521
+ backend: GraphBackend,
522
+ dry_run: bool
523
+ ) -> tuple[int, int]:
524
+ """
525
+ Rollback graph backend from bi-temporal schema.
526
+
527
+ Args:
528
+ backend: Graph backend instance
529
+ dry_run: If True, only count without updating
530
+
531
+ Returns:
532
+ Tuple of (relationships_updated, indexes_dropped)
533
+ """
534
+ # Count relationships with temporal properties
535
+ count_query = """
536
+ MATCH ()-[r]->()
537
+ WHERE r.valid_from IS NOT NULL
538
+ RETURN count(r) as count
539
+ """
540
+
541
+ count_result = await backend.execute_query(count_query)
542
+ count = count_result[0]['count'] if count_result else 0
543
+
544
+ logger.info(f"Found {count} relationships with temporal properties")
545
+
546
+ if dry_run:
547
+ logger.info(
548
+ f"DRY RUN: Would remove temporal properties from {count} relationships "
549
+ f"and drop 3 indexes"
550
+ )
551
+ return count, 3
552
+
553
+ # Remove temporal properties
554
+ update_query = """
555
+ MATCH ()-[r]->()
556
+ WHERE r.valid_from IS NOT NULL
557
+ REMOVE r.valid_from, r.valid_until, r.recorded_at, r.invalidated_by
558
+ RETURN count(r) as updated
559
+ """
560
+
561
+ result = await backend.execute_query(update_query, write=True)
562
+ updated = result[0]['updated'] if result else 0
563
+
564
+ logger.info(f"Removed temporal properties from {updated} relationships")
565
+
566
+ # Drop temporal indexes
567
+ indexes_dropped = 0
568
+
569
+ try:
570
+ await backend.execute_query("DROP INDEX rel_valid_from IF EXISTS", write=True)
571
+ indexes_dropped += 1
572
+ logger.info("Dropped valid_from index")
573
+ except Exception as e:
574
+ logger.warning(f"Could not drop valid_from index: {e}")
575
+
576
+ try:
577
+ await backend.execute_query("DROP INDEX rel_valid_until IF EXISTS", write=True)
578
+ indexes_dropped += 1
579
+ logger.info("Dropped valid_until index")
580
+ except Exception as e:
581
+ logger.warning(f"Could not drop valid_until index: {e}")
582
+
583
+ try:
584
+ await backend.execute_query("DROP INDEX rel_recorded_at IF EXISTS", write=True)
585
+ indexes_dropped += 1
586
+ logger.info("Dropped recorded_at index")
587
+ except Exception as e:
588
+ logger.warning(f"Could not drop recorded_at index: {e}")
589
+
590
+ logger.info(
591
+ f"Graph rollback complete: {updated} relationships, "
592
+ f"{indexes_dropped} indexes dropped"
593
+ )
594
+
595
+ return updated, indexes_dropped