mcp-code-indexer 2.0.1__tar.gz → 2.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. {mcp_code_indexer-2.0.1/src/mcp_code_indexer.egg-info → mcp_code_indexer-2.1.0}/PKG-INFO +3 -3
  2. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/README.md +2 -2
  3. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/pyproject.toml +1 -1
  4. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/src/mcp_code_indexer/database/connection_health.py +187 -3
  5. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/src/mcp_code_indexer/database/database.py +94 -70
  6. mcp_code_indexer-2.1.0/src/mcp_code_indexer/database/exceptions.py +303 -0
  7. mcp_code_indexer-2.1.0/src/mcp_code_indexer/database/retry_executor.py +359 -0
  8. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/src/mcp_code_indexer/server/mcp_server.py +79 -12
  9. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0/src/mcp_code_indexer.egg-info}/PKG-INFO +3 -3
  10. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/src/mcp_code_indexer.egg-info/SOURCES.txt +2 -1
  11. mcp_code_indexer-2.0.1/src/mcp_code_indexer/database/retry_handler.py +0 -344
  12. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/LICENSE +0 -0
  13. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/MANIFEST.in +0 -0
  14. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/docs/api-reference.md +0 -0
  15. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/docs/architecture.md +0 -0
  16. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/docs/configuration.md +0 -0
  17. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/docs/contributing.md +0 -0
  18. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/docs/database-resilience.md +0 -0
  19. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/docs/git-hook-setup.md +0 -0
  20. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/docs/monitoring.md +0 -0
  21. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/docs/performance-tuning.md +0 -0
  22. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/migrations/001_initial.sql +0 -0
  23. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/migrations/002_performance_indexes.sql +0 -0
  24. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/migrations/003_project_overviews.sql +0 -0
  25. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/requirements.txt +0 -0
  26. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/setup.cfg +0 -0
  27. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/setup.py +0 -0
  28. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/src/mcp_code_indexer/__init__.py +0 -0
  29. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/src/mcp_code_indexer/__main__.py +0 -0
  30. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/src/mcp_code_indexer/data/stop_words_english.txt +0 -0
  31. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/src/mcp_code_indexer/database/__init__.py +0 -0
  32. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/src/mcp_code_indexer/database/models.py +0 -0
  33. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/src/mcp_code_indexer/error_handler.py +0 -0
  34. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/src/mcp_code_indexer/file_scanner.py +0 -0
  35. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/src/mcp_code_indexer/git_hook_handler.py +0 -0
  36. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/src/mcp_code_indexer/logging_config.py +0 -0
  37. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/src/mcp_code_indexer/main.py +0 -0
  38. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/src/mcp_code_indexer/merge_handler.py +0 -0
  39. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/src/mcp_code_indexer/middleware/__init__.py +0 -0
  40. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/src/mcp_code_indexer/middleware/error_middleware.py +0 -0
  41. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/src/mcp_code_indexer/server/__init__.py +0 -0
  42. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/src/mcp_code_indexer/tiktoken_cache/9b5ad71b2ce5302211f9c61530b329a4922fc6a4 +0 -0
  43. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/src/mcp_code_indexer/token_counter.py +0 -0
  44. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/src/mcp_code_indexer/tools/__init__.py +0 -0
  45. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/src/mcp_code_indexer.egg-info/dependency_links.txt +0 -0
  46. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/src/mcp_code_indexer.egg-info/entry_points.txt +0 -0
  47. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/src/mcp_code_indexer.egg-info/requires.txt +0 -0
  48. {mcp_code_indexer-2.0.1 → mcp_code_indexer-2.1.0}/src/mcp_code_indexer.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mcp-code-indexer
3
- Version: 2.0.1
3
+ Version: 2.1.0
4
4
  Summary: MCP server that tracks file descriptions across codebases, enabling AI agents to efficiently navigate and understand code through searchable summaries and token-aware overviews.
5
5
  Author: MCP Code Indexer Contributors
6
6
  Maintainer: MCP Code Indexer Contributors
@@ -59,8 +59,8 @@ Dynamic: requires-python
59
59
 
60
60
  # MCP Code Indexer 🚀
61
61
 
62
- [![PyPI version](https://badge.fury.io/py/mcp-code-indexer.svg?12)](https://badge.fury.io/py/mcp-code-indexer)
63
- [![Python](https://img.shields.io/pypi/pyversions/mcp-code-indexer.svg?12)](https://pypi.org/project/mcp-code-indexer/)
62
+ [![PyPI version](https://badge.fury.io/py/mcp-code-indexer.svg?14)](https://badge.fury.io/py/mcp-code-indexer)
63
+ [![Python](https://img.shields.io/pypi/pyversions/mcp-code-indexer.svg?14)](https://pypi.org/project/mcp-code-indexer/)
64
64
  [![License](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
65
65
 
66
66
  A production-ready **Model Context Protocol (MCP) server** that revolutionizes how AI agents navigate and understand codebases. Built for high-concurrency environments with advanced database resilience, the server provides instant access to intelligent descriptions, semantic search, and context-aware recommendations while maintaining 800+ writes/sec throughput.
@@ -1,7 +1,7 @@
1
1
  # MCP Code Indexer 🚀
2
2
 
3
- [![PyPI version](https://badge.fury.io/py/mcp-code-indexer.svg?12)](https://badge.fury.io/py/mcp-code-indexer)
4
- [![Python](https://img.shields.io/pypi/pyversions/mcp-code-indexer.svg?12)](https://pypi.org/project/mcp-code-indexer/)
3
+ [![PyPI version](https://badge.fury.io/py/mcp-code-indexer.svg?14)](https://badge.fury.io/py/mcp-code-indexer)
4
+ [![Python](https://img.shields.io/pypi/pyversions/mcp-code-indexer.svg?14)](https://pypi.org/project/mcp-code-indexer/)
5
5
  [![License](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
6
6
 
7
7
  A production-ready **Model Context Protocol (MCP) server** that revolutionizes how AI agents navigate and understand codebases. Built for high-concurrency environments with advanced database resilience, the server provides instant access to intelligent descriptions, semantic search, and context-aware recommendations while maintaining 800+ writes/sec throughput.
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "mcp-code-indexer"
7
- version = "2.0.1"
7
+ version = "2.1.0"
8
8
  description = "MCP server that tracks file descriptions across codebases, enabling AI agents to efficiently navigate and understand code through searchable summaries and token-aware overviews."
9
9
  readme = "README.md"
10
10
  license = {text = "MIT"}
@@ -262,12 +262,15 @@ class ConnectionHealthMonitor:
262
262
  }
263
263
  )
264
264
 
265
- def get_health_status(self) -> Dict:
265
+ def get_health_status(self, include_retry_stats: bool = True) -> Dict:
266
266
  """
267
267
  Get current health status and metrics.
268
268
 
269
+ Args:
270
+ include_retry_stats: Whether to include retry executor statistics
271
+
269
272
  Returns:
270
- Dictionary with health status, metrics, and recent history
273
+ Dictionary with health status, metrics, recent history, and retry stats
271
274
  """
272
275
  # Get recent health status (last 5 checks)
273
276
  recent_checks = self._health_history[-5:] if self._health_history else []
@@ -276,7 +279,7 @@ class ConnectionHealthMonitor:
276
279
  if recent_checks else 0
277
280
  )
278
281
 
279
- return {
282
+ health_status = {
280
283
  "is_monitoring": self._is_monitoring,
281
284
  "current_status": {
282
285
  "is_healthy": (
@@ -301,6 +304,22 @@ class ConnectionHealthMonitor:
301
304
  "timeout_seconds": self.timeout_seconds
302
305
  }
303
306
  }
307
+
308
+ # Include retry executor statistics if available
309
+ if include_retry_stats and hasattr(self.database_manager, '_retry_executor'):
310
+ retry_executor = self.database_manager._retry_executor
311
+ if retry_executor:
312
+ health_status["retry_statistics"] = retry_executor.get_retry_stats()
313
+
314
+ # Include database-level statistics if available
315
+ if hasattr(self.database_manager, 'get_database_stats'):
316
+ try:
317
+ db_stats = self.database_manager.get_database_stats()
318
+ health_status["database_statistics"] = db_stats
319
+ except Exception as e:
320
+ logger.warning(f"Failed to get database statistics: {e}")
321
+
322
+ return health_status
304
323
 
305
324
  def get_recent_history(self, count: int = 10) -> List[Dict]:
306
325
  """
@@ -322,6 +341,171 @@ class ConnectionHealthMonitor:
322
341
  }
323
342
  for check in recent_checks
324
343
  ]
344
+
345
+ def get_comprehensive_diagnostics(self) -> Dict:
346
+ """
347
+ Get comprehensive database health diagnostics for monitoring.
348
+
349
+ This method provides detailed diagnostics suitable for the
350
+ check_database_health MCP tool.
351
+
352
+ Returns:
353
+ Comprehensive health diagnostics including retry metrics,
354
+ performance data, and resilience statistics
355
+ """
356
+ # Get base health status with retry stats
357
+ base_status = self.get_health_status(include_retry_stats=True)
358
+
359
+ # Add detailed performance analysis
360
+ diagnostics = {
361
+ **base_status,
362
+ "performance_analysis": {
363
+ "health_check_performance": {
364
+ "avg_response_time_ms": self.metrics.avg_response_time_ms,
365
+ "response_time_threshold_exceeded": self.metrics.avg_response_time_ms > 100,
366
+ "recent_performance_trend": self._get_performance_trend()
367
+ },
368
+ "failure_analysis": {
369
+ "failure_rate_percent": (
370
+ (self.metrics.failed_checks / self.metrics.total_checks * 100)
371
+ if self.metrics.total_checks > 0 else 0
372
+ ),
373
+ "consecutive_failures": self.metrics.consecutive_failures,
374
+ "approaching_failure_threshold": (
375
+ self.metrics.consecutive_failures >= self.failure_threshold - 1
376
+ ),
377
+ "pool_refresh_frequency": self.metrics.pool_refreshes
378
+ }
379
+ },
380
+ "resilience_indicators": {
381
+ "overall_health_score": self._calculate_health_score(),
382
+ "retry_effectiveness": self._analyze_retry_effectiveness(),
383
+ "connection_stability": self._assess_connection_stability(),
384
+ "recommendations": self._generate_health_recommendations()
385
+ },
386
+ "recent_history": self.get_recent_history(count=5)
387
+ }
388
+
389
+ return diagnostics
390
+
391
+ def _get_performance_trend(self) -> str:
392
+ """Analyze recent performance trend."""
393
+ if len(self._health_history) < 5:
394
+ return "insufficient_data"
395
+
396
+ recent_times = [
397
+ check.response_time_ms for check in self._health_history[-5:]
398
+ if check.is_healthy
399
+ ]
400
+
401
+ if len(recent_times) < 2:
402
+ return "insufficient_healthy_checks"
403
+
404
+ # Simple trend analysis
405
+ if recent_times[-1] > recent_times[0] * 1.5:
406
+ return "degrading"
407
+ elif recent_times[-1] < recent_times[0] * 0.7:
408
+ return "improving"
409
+ else:
410
+ return "stable"
411
+
412
+ def _calculate_health_score(self) -> float:
413
+ """Calculate overall health score (0-100)."""
414
+ if self.metrics.total_checks == 0:
415
+ return 100.0
416
+
417
+ # Base score from success rate
418
+ success_rate = (self.metrics.successful_checks / self.metrics.total_checks) * 100
419
+
420
+ # Penalize consecutive failures
421
+ failure_penalty = min(self.metrics.consecutive_failures * 10, 50)
422
+
423
+ # Penalize high response times
424
+ response_penalty = min(max(0, self.metrics.avg_response_time_ms - 50) / 10, 20)
425
+
426
+ # Calculate final score
427
+ score = success_rate - failure_penalty - response_penalty
428
+ return max(0.0, min(100.0, score))
429
+
430
+ def _analyze_retry_effectiveness(self) -> Dict:
431
+ """Analyze retry mechanism effectiveness."""
432
+ if not hasattr(self.database_manager, '_retry_executor'):
433
+ return {"status": "no_retry_executor"}
434
+
435
+ retry_executor = self.database_manager._retry_executor
436
+ if not retry_executor:
437
+ return {"status": "retry_executor_not_initialized"}
438
+
439
+ retry_stats = retry_executor.get_retry_stats()
440
+
441
+ return {
442
+ "status": "active",
443
+ "effectiveness_score": retry_stats.get("success_rate_percent", 0),
444
+ "retry_frequency": retry_stats.get("retry_rate_percent", 0),
445
+ "avg_attempts_per_operation": retry_stats.get("average_attempts_per_operation", 0),
446
+ "is_effective": retry_stats.get("success_rate_percent", 0) > 85
447
+ }
448
+
449
+ def _assess_connection_stability(self) -> Dict:
450
+ """Assess connection stability."""
451
+ stability_score = 100.0
452
+
453
+ # Penalize pool refreshes
454
+ if self.metrics.pool_refreshes > 0:
455
+ stability_score -= min(self.metrics.pool_refreshes * 15, 60)
456
+
457
+ # Penalize consecutive failures
458
+ if self.metrics.consecutive_failures > 0:
459
+ stability_score -= min(self.metrics.consecutive_failures * 20, 80)
460
+
461
+ return {
462
+ "stability_score": max(0.0, stability_score),
463
+ "pool_refreshes": self.metrics.pool_refreshes,
464
+ "consecutive_failures": self.metrics.consecutive_failures,
465
+ "is_stable": stability_score > 70
466
+ }
467
+
468
+ def _generate_health_recommendations(self) -> List[str]:
469
+ """Generate health recommendations based on current metrics."""
470
+ recommendations = []
471
+
472
+ # High failure rate
473
+ if self.metrics.total_checks > 0:
474
+ failure_rate = (self.metrics.failed_checks / self.metrics.total_checks) * 100
475
+ if failure_rate > 20:
476
+ recommendations.append(
477
+ f"High failure rate ({failure_rate:.1f}%) - check database configuration"
478
+ )
479
+
480
+ # High response times
481
+ if self.metrics.avg_response_time_ms > 100:
482
+ recommendations.append(
483
+ f"High response times ({self.metrics.avg_response_time_ms:.1f}ms) - consider optimizing queries"
484
+ )
485
+
486
+ # Approaching failure threshold
487
+ if self.metrics.consecutive_failures >= self.failure_threshold - 1:
488
+ recommendations.append(
489
+ "Approaching failure threshold - pool refresh imminent"
490
+ )
491
+
492
+ # Frequent pool refreshes
493
+ if self.metrics.pool_refreshes > 3:
494
+ recommendations.append(
495
+ "Frequent pool refreshes detected - investigate underlying connection issues"
496
+ )
497
+
498
+ # No recent successful checks
499
+ if (self.metrics.last_success_time and
500
+ datetime.utcnow() - self.metrics.last_success_time > timedelta(minutes=5)):
501
+ recommendations.append(
502
+ "No successful health checks in last 5 minutes - database may be unavailable"
503
+ )
504
+
505
+ if not recommendations:
506
+ recommendations.append("Database health is optimal")
507
+
508
+ return recommendations
325
509
 
326
510
 
327
511
  class DatabaseMetricsCollector:
@@ -21,8 +21,11 @@ from mcp_code_indexer.database.models import (
21
21
  Project, FileDescription, MergeConflict, SearchResult,
22
22
  CodebaseSizeInfo, ProjectOverview, WordFrequencyResult, WordFrequencyTerm
23
23
  )
24
- from mcp_code_indexer.database.retry_handler import (
25
- RetryHandler, ConnectionRecoveryManager, create_retry_handler
24
+ from mcp_code_indexer.database.retry_executor import (
25
+ RetryExecutor, create_retry_executor
26
+ )
27
+ from mcp_code_indexer.database.exceptions import (
28
+ DatabaseError, DatabaseLockError, classify_sqlite_error, is_retryable_error
26
29
  )
27
30
  from mcp_code_indexer.database.connection_health import (
28
31
  ConnectionHealthMonitor, DatabaseMetricsCollector
@@ -45,7 +48,10 @@ class DatabaseManager:
45
48
  retry_count: int = 5,
46
49
  timeout: float = 10.0,
47
50
  enable_wal_mode: bool = True,
48
- health_check_interval: float = 30.0):
51
+ health_check_interval: float = 30.0,
52
+ retry_min_wait: float = 0.1,
53
+ retry_max_wait: float = 2.0,
54
+ retry_jitter: float = 0.2):
49
55
  """Initialize database manager with path to SQLite database."""
50
56
  self.db_path = db_path
51
57
  self.pool_size = pool_size
@@ -53,15 +59,20 @@ class DatabaseManager:
53
59
  self.timeout = timeout
54
60
  self.enable_wal_mode = enable_wal_mode
55
61
  self.health_check_interval = health_check_interval
62
+ self.retry_min_wait = retry_min_wait
63
+ self.retry_max_wait = retry_max_wait
64
+ self.retry_jitter = retry_jitter
56
65
  self._connection_pool: List[aiosqlite.Connection] = []
57
66
  self._pool_lock = None # Will be initialized in async context
58
67
  self._write_lock = None # Write serialization lock, initialized in async context
59
68
 
60
69
  # Retry and recovery components - configure with provided settings
61
- from .retry_handler import RetryConfig
62
- retry_config = RetryConfig(max_attempts=retry_count)
63
- self._retry_handler = create_retry_handler(retry_config)
64
- self._recovery_manager = None # Initialized in async context
70
+ self._retry_executor = create_retry_executor(
71
+ max_attempts=retry_count,
72
+ min_wait_seconds=retry_min_wait,
73
+ max_wait_seconds=retry_max_wait,
74
+ jitter_max_seconds=retry_jitter
75
+ )
65
76
 
66
77
  # Health monitoring and metrics
67
78
  self._health_monitor = None # Initialized in async context
@@ -75,8 +86,7 @@ class DatabaseManager:
75
86
  self._pool_lock = asyncio.Lock()
76
87
  self._write_lock = asyncio.Lock()
77
88
 
78
- # Initialize connection recovery manager
79
- self._recovery_manager = ConnectionRecoveryManager(self)
89
+ # Connection recovery is now handled by the retry executor
80
90
 
81
91
  # Initialize health monitoring with configured interval
82
92
  self._health_monitor = ConnectionHealthMonitor(
@@ -89,6 +99,8 @@ class DatabaseManager:
89
99
  # Ensure database directory exists
90
100
  self.db_path.parent.mkdir(parents=True, exist_ok=True)
91
101
 
102
+ # Database initialization now uses the modern retry executor directly
103
+
92
104
  # Apply migrations in order
93
105
  migrations_dir = Path(__file__).parent.parent.parent.parent / "migrations"
94
106
  migration_files = sorted(migrations_dir.glob("*.sql"))
@@ -219,30 +231,48 @@ class DatabaseManager:
219
231
  """
220
232
  Get a database connection with write serialization and automatic retry logic.
221
233
 
222
- This combines write serialization with retry handling for maximum resilience
223
- against database locking issues.
234
+ This uses the new RetryExecutor to properly handle retry logic without
235
+ the broken yield-in-retry-loop pattern that caused generator errors.
224
236
 
225
237
  Args:
226
238
  operation_name: Name of the operation for logging and monitoring
227
239
  """
228
- if self._write_lock is None or self._retry_handler is None:
240
+ if self._write_lock is None:
229
241
  raise RuntimeError("DatabaseManager not initialized - call initialize() first")
230
242
 
231
- async with self._retry_handler.with_retry(operation_name):
243
+ async def get_write_connection():
244
+ """Inner function to get connection - will be retried by executor."""
245
+ async with self._write_lock:
246
+ async with self.get_connection() as conn:
247
+ return conn
248
+
249
+ try:
250
+ # Use retry executor to handle connection acquisition with retries
251
+ connection = await self._retry_executor.execute_with_retry(
252
+ get_write_connection,
253
+ operation_name
254
+ )
255
+
232
256
  try:
233
- async with self._write_lock:
234
- async with self.get_connection() as conn:
235
- yield conn
236
-
237
- # Reset failure count on success
238
- if self._recovery_manager:
239
- self._recovery_manager.reset_failure_count()
257
+ yield connection
258
+
259
+ # Success - retry executor handles all failure tracking
240
260
 
241
261
  except Exception as e:
242
- # Handle persistent failures
243
- if self._recovery_manager:
244
- await self._recovery_manager.handle_persistent_failure(operation_name, e)
262
+ # Error handling is managed by the retry executor
245
263
  raise
264
+
265
+ except DatabaseError:
266
+ # Re-raise our custom database errors as-is
267
+ raise
268
+ except Exception as e:
269
+ # Classify and wrap other exceptions
270
+ classified_error = classify_sqlite_error(e, operation_name)
271
+ logger.error(
272
+ f"Database operation '{operation_name}' failed: {classified_error.message}",
273
+ extra={"structured_data": classified_error.to_dict()}
274
+ )
275
+ raise classified_error
246
276
 
247
277
  def get_database_stats(self) -> Dict[str, Any]:
248
278
  """
@@ -255,14 +285,11 @@ class DatabaseManager:
255
285
  "connection_pool": {
256
286
  "configured_size": self.pool_size,
257
287
  "current_size": len(self._connection_pool)
258
- }
288
+ },
289
+ "retry_executor": self._retry_executor.get_retry_stats() if self._retry_executor else {},
259
290
  }
260
291
 
261
- if self._retry_handler:
262
- stats["retry_stats"] = self._retry_handler.get_retry_stats()
263
-
264
- if self._recovery_manager:
265
- stats["recovery_stats"] = self._recovery_manager.get_recovery_stats()
292
+ # Legacy retry handler removed - retry executor stats are included above
266
293
 
267
294
  if self._health_monitor:
268
295
  stats["health_status"] = self._health_monitor.get_health_status()
@@ -349,10 +376,13 @@ class DatabaseManager:
349
376
  """
350
377
  Execute a database operation within a transaction with automatic retry.
351
378
 
379
+ Uses the new RetryExecutor for robust retry handling with proper error
380
+ classification and exponential backoff.
381
+
352
382
  Args:
353
383
  operation_func: Async function that takes a connection and performs the operation
354
384
  operation_name: Name of the operation for logging
355
- max_retries: Maximum retry attempts
385
+ max_retries: Maximum retry attempts (overrides default retry executor config)
356
386
  timeout_seconds: Transaction timeout in seconds
357
387
 
358
388
  Returns:
@@ -365,9 +395,9 @@ class DatabaseManager:
365
395
 
366
396
  result = await db.execute_transaction_with_retry(my_operation, "insert_data")
367
397
  """
368
- last_error = None
369
398
 
370
- for attempt in range(1, max_retries + 1):
399
+ async def execute_transaction():
400
+ """Inner function to execute transaction - will be retried by executor."""
371
401
  try:
372
402
  async with self.get_immediate_transaction(operation_name, timeout_seconds) as conn:
373
403
  result = await operation_func(conn)
@@ -384,34 +414,15 @@ class DatabaseManager:
384
414
  return result
385
415
 
386
416
  except (aiosqlite.OperationalError, asyncio.TimeoutError) as e:
387
- last_error = e
388
-
389
417
  # Record locking event for metrics
390
418
  if self._metrics_collector and "locked" in str(e).lower():
391
419
  self._metrics_collector.record_locking_event(operation_name, str(e))
392
420
 
393
- if attempt < max_retries:
394
- # Exponential backoff with jitter
395
- delay = 0.1 * (2 ** (attempt - 1))
396
- jitter = delay * 0.1 * (2 * random.random() - 1) # ±10% jitter
397
- wait_time = max(0.05, delay + jitter)
398
-
399
- logger.warning(
400
- f"Transaction attempt {attempt} failed for {operation_name}, retrying in {wait_time:.2f}s: {e}",
401
- extra={
402
- "structured_data": {
403
- "transaction_retry": {
404
- "operation": operation_name,
405
- "attempt": attempt,
406
- "delay_seconds": wait_time,
407
- "error": str(e)
408
- }
409
- }
410
- }
411
- )
412
- await asyncio.sleep(wait_time)
413
- else:
414
- # Record failed operation metrics
421
+ # Classify the error for better handling
422
+ classified_error = classify_sqlite_error(e, operation_name)
423
+
424
+ # Record failed operation metrics for non-retryable errors
425
+ if not is_retryable_error(classified_error):
415
426
  if self._metrics_collector:
416
427
  self._metrics_collector.record_operation(
417
428
  operation_name,
@@ -419,21 +430,34 @@ class DatabaseManager:
419
430
  False,
420
431
  len(self._connection_pool)
421
432
  )
422
-
423
- logger.error(
424
- f"Transaction failed after {max_retries} attempts for {operation_name}: {e}",
425
- extra={
426
- "structured_data": {
427
- "transaction_failure": {
428
- "operation": operation_name,
429
- "max_retries": max_retries,
430
- "final_error": str(e)
431
- }
432
- }
433
- }
434
- )
433
+
434
+ raise classified_error
435
435
 
436
- raise last_error
436
+ try:
437
+ # Create a temporary retry executor with custom max_retries if different from default
438
+ if max_retries != self._retry_executor.config.max_attempts:
439
+ from mcp_code_indexer.database.retry_executor import RetryConfig, RetryExecutor
440
+ temp_config = RetryConfig(
441
+ max_attempts=max_retries,
442
+ min_wait_seconds=self._retry_executor.config.min_wait_seconds,
443
+ max_wait_seconds=self._retry_executor.config.max_wait_seconds,
444
+ jitter_max_seconds=self._retry_executor.config.jitter_max_seconds
445
+ )
446
+ temp_executor = RetryExecutor(temp_config)
447
+ return await temp_executor.execute_with_retry(execute_transaction, operation_name)
448
+ else:
449
+ return await self._retry_executor.execute_with_retry(execute_transaction, operation_name)
450
+
451
+ except DatabaseError as e:
452
+ # Record failed operation metrics for final failure
453
+ if self._metrics_collector:
454
+ self._metrics_collector.record_operation(
455
+ operation_name,
456
+ timeout_seconds * 1000,
457
+ False,
458
+ len(self._connection_pool)
459
+ )
460
+ raise
437
461
 
438
462
  # Project operations
439
463