mcp-code-indexer 2.0.2__py3-none-any.whl → 2.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -54,7 +54,10 @@ class MCPCodeIndexServer:
54
54
  db_retry_count: int = 5,
55
55
  db_timeout: float = 10.0,
56
56
  enable_wal_mode: bool = True,
57
- health_check_interval: float = 30.0
57
+ health_check_interval: float = 30.0,
58
+ retry_min_wait: float = 0.1,
59
+ retry_max_wait: float = 2.0,
60
+ retry_jitter: float = 0.2
58
61
  ):
59
62
  """
60
63
  Initialize the MCP Code Index Server.
@@ -68,6 +71,9 @@ class MCPCodeIndexServer:
68
71
  db_timeout: Database transaction timeout in seconds
69
72
  enable_wal_mode: Enable WAL mode for better concurrent access
70
73
  health_check_interval: Database health check interval in seconds
74
+ retry_min_wait: Minimum wait time between retries in seconds
75
+ retry_max_wait: Maximum wait time between retries in seconds
76
+ retry_jitter: Maximum jitter to add to retry delays in seconds
71
77
  """
72
78
  self.token_limit = token_limit
73
79
  self.db_path = db_path or Path.home() / ".mcp-code-index" / "tracker.db"
@@ -79,7 +85,10 @@ class MCPCodeIndexServer:
79
85
  "retry_count": db_retry_count,
80
86
  "timeout": db_timeout,
81
87
  "enable_wal_mode": enable_wal_mode,
82
- "health_check_interval": health_check_interval
88
+ "health_check_interval": health_check_interval,
89
+ "retry_min_wait": retry_min_wait,
90
+ "retry_max_wait": retry_max_wait,
91
+ "retry_jitter": retry_jitter
83
92
  }
84
93
 
85
94
  # Initialize components
@@ -89,7 +98,10 @@ class MCPCodeIndexServer:
89
98
  retry_count=db_retry_count,
90
99
  timeout=db_timeout,
91
100
  enable_wal_mode=enable_wal_mode,
92
- health_check_interval=health_check_interval
101
+ health_check_interval=health_check_interval,
102
+ retry_min_wait=retry_min_wait,
103
+ retry_max_wait=retry_max_wait,
104
+ retry_jitter=retry_jitter
93
105
  )
94
106
  self.token_counter = TokenCounter(token_limit)
95
107
  self.merge_handler = MergeHandler(self.db_manager)
@@ -312,7 +324,7 @@ class MCPCodeIndexServer:
312
324
  ),
313
325
  types.Tool(
314
326
  name="search_descriptions",
315
- description="Searches through all file descriptions in a project to find files related to specific functionality. Use this for large codebases instead of loading the entire structure.",
327
+ description="Searches through all file descriptions in a project to find files related to specific functionality. Use this for large codebases instead of loading the entire structure. Always start with the fewest terms possible; if the tool returns a lot of results (more than 20) or the results are not relevant, then narrow it down by increasing the number of search terms. Start broad, then narrow the focus only if needed!",
316
328
  inputSchema={
317
329
  "type": "object",
318
330
  "properties": {
@@ -1194,21 +1206,76 @@ src/
1194
1206
  }
1195
1207
 
1196
1208
  async def _handle_check_database_health(self, arguments: Dict[str, Any]) -> Dict[str, Any]:
1197
- """Handle check_database_health tool calls."""
1198
- # Get comprehensive database health and statistics
1199
- health_check = await self.db_manager.check_health()
1209
+ """
1210
+ Handle check_database_health tool calls with comprehensive diagnostics.
1211
+
1212
+ Returns detailed database health information including retry statistics,
1213
+ performance analysis, and resilience indicators.
1214
+ """
1215
+ # Get comprehensive health diagnostics from the enhanced monitor
1216
+ if hasattr(self.db_manager, '_health_monitor') and self.db_manager._health_monitor:
1217
+ comprehensive_diagnostics = self.db_manager._health_monitor.get_comprehensive_diagnostics()
1218
+ else:
1219
+ # Fallback to basic health check if monitor not available
1220
+ health_check = await self.db_manager.check_health()
1221
+ comprehensive_diagnostics = {
1222
+ "basic_health_check": health_check,
1223
+ "note": "Enhanced health monitoring not available"
1224
+ }
1225
+
1226
+ # Get additional database-level statistics
1200
1227
  database_stats = self.db_manager.get_database_stats()
1201
1228
 
1202
1229
  return {
1203
- "health_check": health_check,
1204
- "database_stats": database_stats,
1205
- "configuration": self.db_config,
1230
+ "comprehensive_diagnostics": comprehensive_diagnostics,
1231
+ "database_statistics": database_stats,
1232
+ "configuration": {
1233
+ **self.db_config,
1234
+ "retry_executor_config": (
1235
+ self.db_manager._retry_executor.config.__dict__
1236
+ if hasattr(self.db_manager, '_retry_executor') and self.db_manager._retry_executor
1237
+ else {}
1238
+ )
1239
+ },
1206
1240
  "server_info": {
1207
1241
  "token_limit": self.token_limit,
1208
1242
  "db_path": str(self.db_path),
1209
- "cache_dir": str(self.cache_dir)
1243
+ "cache_dir": str(self.cache_dir),
1244
+ "health_monitoring_enabled": (
1245
+ hasattr(self.db_manager, '_health_monitor') and
1246
+ self.db_manager._health_monitor is not None
1247
+ )
1210
1248
  },
1211
- "timestamp": datetime.utcnow().isoformat()
1249
+ "timestamp": datetime.utcnow().isoformat(),
1250
+ "status_summary": self._generate_health_summary(comprehensive_diagnostics)
1251
+ }
1252
+
1253
+ def _generate_health_summary(self, diagnostics: Dict[str, Any]) -> Dict[str, Any]:
1254
+ """Generate a concise health summary from comprehensive diagnostics."""
1255
+ if "resilience_indicators" not in diagnostics:
1256
+ return {"status": "limited_diagnostics_available"}
1257
+
1258
+ resilience = diagnostics["resilience_indicators"]
1259
+ performance = diagnostics.get("performance_analysis", {})
1260
+
1261
+ # Overall status based on health score
1262
+ health_score = resilience.get("overall_health_score", 0)
1263
+ if health_score >= 90:
1264
+ status = "excellent"
1265
+ elif health_score >= 75:
1266
+ status = "good"
1267
+ elif health_score >= 50:
1268
+ status = "fair"
1269
+ else:
1270
+ status = "poor"
1271
+
1272
+ return {
1273
+ "overall_status": status,
1274
+ "health_score": health_score,
1275
+ "retry_effectiveness": resilience.get("retry_effectiveness", {}).get("is_effective", False),
1276
+ "connection_stability": resilience.get("connection_stability", {}).get("is_stable", False),
1277
+ "key_recommendations": resilience.get("recommendations", [])[:3], # Top 3 recommendations
1278
+ "performance_trend": performance.get("health_check_performance", {}).get("recent_performance_trend", "unknown")
1212
1279
  }
1213
1280
 
1214
1281
  async def _run_session_with_retry(self, read_stream, write_stream, initialization_options) -> None:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mcp-code-indexer
3
- Version: 2.0.2
3
+ Version: 2.1.0
4
4
  Summary: MCP server that tracks file descriptions across codebases, enabling AI agents to efficiently navigate and understand code through searchable summaries and token-aware overviews.
5
5
  Author: MCP Code Indexer Contributors
6
6
  Maintainer: MCP Code Indexer Contributors
@@ -59,8 +59,8 @@ Dynamic: requires-python
59
59
 
60
60
  # MCP Code Indexer 🚀
61
61
 
62
- [![PyPI version](https://badge.fury.io/py/mcp-code-indexer.svg?13)](https://badge.fury.io/py/mcp-code-indexer)
63
- [![Python](https://img.shields.io/pypi/pyversions/mcp-code-indexer.svg?13)](https://pypi.org/project/mcp-code-indexer/)
62
+ [![PyPI version](https://badge.fury.io/py/mcp-code-indexer.svg?14)](https://badge.fury.io/py/mcp-code-indexer)
63
+ [![Python](https://img.shields.io/pypi/pyversions/mcp-code-indexer.svg?14)](https://pypi.org/project/mcp-code-indexer/)
64
64
  [![License](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
65
65
 
66
66
  A production-ready **Model Context Protocol (MCP) server** that revolutionizes how AI agents navigate and understand codebases. Built for high-concurrency environments with advanced database resilience, the server provides instant access to intelligent descriptions, semantic search, and context-aware recommendations while maintaining 800+ writes/sec throughput.
@@ -9,19 +9,20 @@ mcp_code_indexer/merge_handler.py,sha256=lJR8eVq2qSrF6MW9mR3Fy8UzrNAaQ7RsI2FMNXn
9
9
  mcp_code_indexer/token_counter.py,sha256=WrifOkbF99nWWHlRlhCHAB2KN7qr83GOHl7apE-hJcE,8460
10
10
  mcp_code_indexer/data/stop_words_english.txt,sha256=7Zdd9ameVgA6tN_zuXROvHXD4hkWeELVywPhb7FJEkw,6343
11
11
  mcp_code_indexer/database/__init__.py,sha256=aPq_aaRp0aSwOBIq9GkuMNjmLxA411zg2vhdrAuHm-w,38
12
- mcp_code_indexer/database/connection_health.py,sha256=XJvUrHRhIroZlIPScVGdKb69lNP67lT9ZTTO67cFSEs,16721
13
- mcp_code_indexer/database/database.py,sha256=5G_1E-jSVMDJuyFCVki9tbNmoyNvHJa3x7dRIBB2UQE,49603
12
+ mcp_code_indexer/database/connection_health.py,sha256=s2r9L_KipH5NlemAUDnhBQO90Dn4b_0Ht9UDs7F6QPk,24432
13
+ mcp_code_indexer/database/database.py,sha256=86XL1b49cTeTzkJ1mVbkYPq_QyQrVQOy8w_b1MxZR-E,50856
14
+ mcp_code_indexer/database/exceptions.py,sha256=AgpRA9Z5R-GoWYdQSPeSdYvAXDopFCQkLGN3jD7Ha4E,10215
14
15
  mcp_code_indexer/database/models.py,sha256=_vCmJnPXZSiInRzyvs4c7QUWuNNW8qsOoDlGX8J-Gnk,7124
15
- mcp_code_indexer/database/retry_handler.py,sha256=zwwZ0V1PzzS1rtcfVQOI-CXqWnPGF-KGH4L_3d-_h1Y,11932
16
+ mcp_code_indexer/database/retry_executor.py,sha256=QUayjkCk8OsckVMYiJ_HBQ9NTUss-H8GQeUIUbbw4_U,13419
16
17
  mcp_code_indexer/middleware/__init__.py,sha256=p-mP0pMsfiU2yajCPvokCUxUEkh_lu4XJP1LyyMW2ug,220
17
18
  mcp_code_indexer/middleware/error_middleware.py,sha256=5agJTAkkPogfPGnja1V9JtG9RG-BiOALIJYctK3byJQ,11730
18
19
  mcp_code_indexer/server/__init__.py,sha256=16xMcuriUOBlawRqWNBk6niwrvtv_JD5xvI36X1Vsmk,41
19
- mcp_code_indexer/server/mcp_server.py,sha256=tQTZFnAnASStkbnQOsj2-3eH1DYSmY97q1pwo7geFXI,66934
20
+ mcp_code_indexer/server/mcp_server.py,sha256=KJAGkhYIR3MVJZECKWL9rpMP3Yb8uO9k7gj5dQ3Wpbc,70436
20
21
  mcp_code_indexer/tiktoken_cache/9b5ad71b2ce5302211f9c61530b329a4922fc6a4,sha256=Ijkht27pm96ZW3_3OFE-7xAPtR0YyTWXoRO8_-hlsqc,1681126
21
22
  mcp_code_indexer/tools/__init__.py,sha256=m01mxML2UdD7y5rih_XNhNSCMzQTz7WQ_T1TeOcYlnE,49
22
- mcp_code_indexer-2.0.2.dist-info/licenses/LICENSE,sha256=JN9dyPPgYwH9C-UjYM7FLNZjQ6BF7kAzpF3_4PwY4rY,1086
23
- mcp_code_indexer-2.0.2.dist-info/METADATA,sha256=BsEhIKscok46a5pfTyigsaMIHjMHZgmg-7-O_OyTrZY,20165
24
- mcp_code_indexer-2.0.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
25
- mcp_code_indexer-2.0.2.dist-info/entry_points.txt,sha256=8HqWOw1Is7jOP1bvIgaSwouvT9z_Boe-9hd4NzyJOhY,68
26
- mcp_code_indexer-2.0.2.dist-info/top_level.txt,sha256=yKYCM-gMGt-cnupGfAhnZaoEsROLB6DQ1KFUuyKx4rw,17
27
- mcp_code_indexer-2.0.2.dist-info/RECORD,,
23
+ mcp_code_indexer-2.1.0.dist-info/licenses/LICENSE,sha256=JN9dyPPgYwH9C-UjYM7FLNZjQ6BF7kAzpF3_4PwY4rY,1086
24
+ mcp_code_indexer-2.1.0.dist-info/METADATA,sha256=YA7MXrDtBNK3Hja15CNGJIx4HcwIti__ccb3V5as3So,20165
25
+ mcp_code_indexer-2.1.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
26
+ mcp_code_indexer-2.1.0.dist-info/entry_points.txt,sha256=8HqWOw1Is7jOP1bvIgaSwouvT9z_Boe-9hd4NzyJOhY,68
27
+ mcp_code_indexer-2.1.0.dist-info/top_level.txt,sha256=yKYCM-gMGt-cnupGfAhnZaoEsROLB6DQ1KFUuyKx4rw,17
28
+ mcp_code_indexer-2.1.0.dist-info/RECORD,,
@@ -1,344 +0,0 @@
1
- """
2
- Database retry handling for SQLite locking scenarios.
3
-
4
- This module provides specialized retry logic for database operations that may
5
- encounter locking issues in high-concurrency environments.
6
- """
7
-
8
- import asyncio
9
- import logging
10
- import random
11
- import time
12
- from contextlib import asynccontextmanager
13
- from dataclasses import dataclass
14
- from typing import Any, AsyncIterator, Callable, Optional, Type, Union
15
- from datetime import datetime
16
-
17
- import aiosqlite
18
-
19
- logger = logging.getLogger(__name__)
20
-
21
-
22
- @dataclass
23
- class RetryConfig:
24
- """Configuration for database retry logic."""
25
- max_attempts: int = 5
26
- initial_delay: float = 0.1 # seconds
27
- max_delay: float = 2.0 # seconds
28
- backoff_multiplier: float = 2.0
29
- jitter: bool = True
30
- retry_on_error_types: tuple = (aiosqlite.OperationalError,)
31
-
32
-
33
- class DatabaseLockError(Exception):
34
- """Exception for database locking issues."""
35
-
36
- def __init__(self, message: str, retry_count: int = 0, last_attempt: Optional[datetime] = None):
37
- self.message = message
38
- self.retry_count = retry_count
39
- self.last_attempt = last_attempt or datetime.utcnow()
40
- super().__init__(message)
41
-
42
-
43
- class RetryHandler:
44
- """Handles database operation retries with exponential backoff."""
45
-
46
- def __init__(self, config: Optional[RetryConfig] = None):
47
- """
48
- Initialize retry handler.
49
-
50
- Args:
51
- config: Retry configuration, uses defaults if None
52
- """
53
- self.config = config or RetryConfig()
54
- self._retry_stats = {
55
- "total_attempts": 0,
56
- "successful_retries": 0,
57
- "failed_operations": 0,
58
- "avg_retry_delay": 0.0
59
- }
60
-
61
- @asynccontextmanager
62
- async def with_retry(self, operation_name: str = "database_operation") -> AsyncIterator[None]:
63
- """
64
- Context manager that provides retry logic for database operations.
65
-
66
- Args:
67
- operation_name: Name of the operation for logging
68
-
69
- Usage:
70
- async with retry_handler.with_retry("create_project"):
71
- # Your database operation here
72
- await db.execute(...)
73
- """
74
- last_error = None
75
- total_delay = 0.0
76
-
77
- for attempt in range(1, self.config.max_attempts + 1):
78
- self._retry_stats["total_attempts"] += 1
79
-
80
- try:
81
- yield
82
-
83
- # Success - log if this was a retry
84
- if attempt > 1:
85
- self._retry_stats["successful_retries"] += 1
86
- logger.info(
87
- f"Database operation '{operation_name}' succeeded on attempt {attempt}",
88
- extra={
89
- "structured_data": {
90
- "retry_success": {
91
- "operation": operation_name,
92
- "attempt": attempt,
93
- "total_delay": total_delay
94
- }
95
- }
96
- }
97
- )
98
- return
99
-
100
- except Exception as e:
101
- last_error = e
102
-
103
- # Check if this is a retryable error
104
- if not self._is_retryable_error(e):
105
- logger.error(
106
- f"Non-retryable error in '{operation_name}': {e}",
107
- extra={
108
- "structured_data": {
109
- "non_retryable_error": {
110
- "operation": operation_name,
111
- "error_type": type(e).__name__,
112
- "error_message": str(e)
113
- }
114
- }
115
- }
116
- )
117
- raise
118
-
119
- # If this is the last attempt, give up
120
- if attempt >= self.config.max_attempts:
121
- self._retry_stats["failed_operations"] += 1
122
- logger.error(
123
- f"Database operation '{operation_name}' failed after {attempt} attempts",
124
- extra={
125
- "structured_data": {
126
- "retry_exhausted": {
127
- "operation": operation_name,
128
- "max_attempts": self.config.max_attempts,
129
- "total_delay": total_delay,
130
- "final_error": str(e)
131
- }
132
- }
133
- }
134
- )
135
- raise DatabaseLockError(
136
- f"Database operation failed after {attempt} attempts: {e}",
137
- retry_count=attempt,
138
- last_attempt=datetime.utcnow()
139
- )
140
-
141
- # Calculate delay for next attempt
142
- delay = self._calculate_delay(attempt)
143
- total_delay += delay
144
-
145
- logger.warning(
146
- f"Database operation '{operation_name}' failed on attempt {attempt}, retrying in {delay:.2f}s",
147
- extra={
148
- "structured_data": {
149
- "retry_attempt": {
150
- "operation": operation_name,
151
- "attempt": attempt,
152
- "delay_seconds": delay,
153
- "error_type": type(e).__name__,
154
- "error_message": str(e)
155
- }
156
- }
157
- }
158
- )
159
-
160
- # Wait before retry
161
- await asyncio.sleep(delay)
162
-
163
- def _is_retryable_error(self, error: Exception) -> bool:
164
- """
165
- Determine if an error is retryable.
166
-
167
- Args:
168
- error: Exception to check
169
-
170
- Returns:
171
- True if the error should trigger a retry
172
- """
173
- # Check error type
174
- if not isinstance(error, self.config.retry_on_error_types):
175
- return False
176
-
177
- # Check specific SQLite error messages
178
- error_message = str(error).lower()
179
- retryable_messages = [
180
- "database is locked",
181
- "database is busy",
182
- "cannot start a transaction within a transaction",
183
- "sqlite_busy",
184
- "sqlite_locked"
185
- ]
186
-
187
- return any(msg in error_message for msg in retryable_messages)
188
-
189
- def _calculate_delay(self, attempt: int) -> float:
190
- """
191
- Calculate delay for retry attempt with exponential backoff and jitter.
192
-
193
- Args:
194
- attempt: Current attempt number (1-based)
195
-
196
- Returns:
197
- Delay in seconds
198
- """
199
- # Exponential backoff: initial_delay * (multiplier ^ (attempt - 1))
200
- delay = self.config.initial_delay * (self.config.backoff_multiplier ** (attempt - 1))
201
-
202
- # Cap at max delay
203
- delay = min(delay, self.config.max_delay)
204
-
205
- # Add jitter to prevent thundering herd
206
- if self.config.jitter:
207
- jitter_range = delay * 0.1 # 10% jitter
208
- delay += random.uniform(-jitter_range, jitter_range)
209
-
210
- # Ensure delay is positive
211
- return max(0.0, delay)
212
-
213
- def get_retry_stats(self) -> dict:
214
- """
215
- Get retry statistics.
216
-
217
- Returns:
218
- Dictionary with retry statistics
219
- """
220
- if self._retry_stats["successful_retries"] > 0:
221
- self._retry_stats["avg_retry_delay"] = (
222
- self._retry_stats["total_attempts"] / self._retry_stats["successful_retries"]
223
- )
224
-
225
- return self._retry_stats.copy()
226
-
227
- def reset_stats(self) -> None:
228
- """Reset retry statistics."""
229
- self._retry_stats = {
230
- "total_attempts": 0,
231
- "successful_retries": 0,
232
- "failed_operations": 0,
233
- "avg_retry_delay": 0.0
234
- }
235
-
236
-
237
- class ConnectionRecoveryManager:
238
- """Manages database connection recovery for persistent failures."""
239
-
240
- def __init__(self, database_manager):
241
- """
242
- Initialize connection recovery manager.
243
-
244
- Args:
245
- database_manager: DatabaseManager instance to manage
246
- """
247
- self.database_manager = database_manager
248
- self._recovery_stats = {
249
- "pool_refreshes": 0,
250
- "last_refresh": None,
251
- "consecutive_failures": 0
252
- }
253
- self._failure_threshold = 3 # Refresh pool after 3 consecutive failures
254
-
255
- async def handle_persistent_failure(self, operation_name: str, error: Exception) -> bool:
256
- """
257
- Handle persistent database failures by attempting pool refresh.
258
-
259
- Args:
260
- operation_name: Name of the failing operation
261
- error: The persistent error
262
-
263
- Returns:
264
- True if pool refresh was attempted, False otherwise
265
- """
266
- self._recovery_stats["consecutive_failures"] += 1
267
-
268
- # Only refresh if we've hit the threshold
269
- if self._recovery_stats["consecutive_failures"] >= self._failure_threshold:
270
- logger.warning(
271
- f"Attempting connection pool refresh after {self._recovery_stats['consecutive_failures']} failures",
272
- extra={
273
- "structured_data": {
274
- "pool_recovery": {
275
- "operation": operation_name,
276
- "consecutive_failures": self._recovery_stats["consecutive_failures"],
277
- "trigger_error": str(error)
278
- }
279
- }
280
- }
281
- )
282
-
283
- await self._refresh_connection_pool()
284
- return True
285
-
286
- return False
287
-
288
- def reset_failure_count(self) -> None:
289
- """Reset consecutive failure count after successful operation."""
290
- self._recovery_stats["consecutive_failures"] = 0
291
-
292
- async def _refresh_connection_pool(self) -> None:
293
- """
294
- Refresh the database connection pool by closing all connections.
295
-
296
- This forces creation of new connections on next access.
297
- """
298
- try:
299
- # Close existing pool
300
- await self.database_manager.close_pool()
301
-
302
- # Update stats
303
- self._recovery_stats["pool_refreshes"] += 1
304
- self._recovery_stats["last_refresh"] = datetime.utcnow()
305
- self._recovery_stats["consecutive_failures"] = 0
306
-
307
- logger.info("Database connection pool refreshed successfully")
308
-
309
- except Exception as e:
310
- logger.error(f"Failed to refresh connection pool: {e}")
311
- raise
312
-
313
- def get_recovery_stats(self) -> dict:
314
- """
315
- Get connection recovery statistics.
316
-
317
- Returns:
318
- Dictionary with recovery statistics
319
- """
320
- return self._recovery_stats.copy()
321
-
322
-
323
- def create_retry_handler(
324
- max_attempts: int = 5,
325
- initial_delay: float = 0.1,
326
- max_delay: float = 2.0
327
- ) -> RetryHandler:
328
- """
329
- Create a configured retry handler for database operations.
330
-
331
- Args:
332
- max_attempts: Maximum retry attempts
333
- initial_delay: Initial delay in seconds
334
- max_delay: Maximum delay in seconds
335
-
336
- Returns:
337
- Configured RetryHandler instance
338
- """
339
- config = RetryConfig(
340
- max_attempts=max_attempts,
341
- initial_delay=initial_delay,
342
- max_delay=max_delay
343
- )
344
- return RetryHandler(config)