mcp-code-indexer 2.0.2__py3-none-any.whl → 2.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcp_code_indexer/ask_handler.py +217 -0
- mcp_code_indexer/claude_api_handler.py +355 -0
- mcp_code_indexer/database/connection_health.py +187 -3
- mcp_code_indexer/database/database.py +94 -68
- mcp_code_indexer/database/exceptions.py +303 -0
- mcp_code_indexer/database/retry_executor.py +359 -0
- mcp_code_indexer/deepask_handler.py +465 -0
- mcp_code_indexer/server/mcp_server.py +79 -12
- {mcp_code_indexer-2.0.2.dist-info → mcp_code_indexer-2.2.0.dist-info}/METADATA +3 -3
- {mcp_code_indexer-2.0.2.dist-info → mcp_code_indexer-2.2.0.dist-info}/RECORD +14 -10
- mcp_code_indexer/database/retry_handler.py +0 -344
- {mcp_code_indexer-2.0.2.dist-info → mcp_code_indexer-2.2.0.dist-info}/WHEEL +0 -0
- {mcp_code_indexer-2.0.2.dist-info → mcp_code_indexer-2.2.0.dist-info}/entry_points.txt +0 -0
- {mcp_code_indexer-2.0.2.dist-info → mcp_code_indexer-2.2.0.dist-info}/licenses/LICENSE +0 -0
- {mcp_code_indexer-2.0.2.dist-info → mcp_code_indexer-2.2.0.dist-info}/top_level.txt +0 -0
@@ -1,344 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
Database retry handling for SQLite locking scenarios.
|
3
|
-
|
4
|
-
This module provides specialized retry logic for database operations that may
|
5
|
-
encounter locking issues in high-concurrency environments.
|
6
|
-
"""
|
7
|
-
|
8
|
-
import asyncio
|
9
|
-
import logging
|
10
|
-
import random
|
11
|
-
import time
|
12
|
-
from contextlib import asynccontextmanager
|
13
|
-
from dataclasses import dataclass
|
14
|
-
from typing import Any, AsyncIterator, Callable, Optional, Type, Union
|
15
|
-
from datetime import datetime
|
16
|
-
|
17
|
-
import aiosqlite
|
18
|
-
|
19
|
-
logger = logging.getLogger(__name__)
|
20
|
-
|
21
|
-
|
22
|
-
@dataclass
|
23
|
-
class RetryConfig:
|
24
|
-
"""Configuration for database retry logic."""
|
25
|
-
max_attempts: int = 5
|
26
|
-
initial_delay: float = 0.1 # seconds
|
27
|
-
max_delay: float = 2.0 # seconds
|
28
|
-
backoff_multiplier: float = 2.0
|
29
|
-
jitter: bool = True
|
30
|
-
retry_on_error_types: tuple = (aiosqlite.OperationalError,)
|
31
|
-
|
32
|
-
|
33
|
-
class DatabaseLockError(Exception):
|
34
|
-
"""Exception for database locking issues."""
|
35
|
-
|
36
|
-
def __init__(self, message: str, retry_count: int = 0, last_attempt: Optional[datetime] = None):
|
37
|
-
self.message = message
|
38
|
-
self.retry_count = retry_count
|
39
|
-
self.last_attempt = last_attempt or datetime.utcnow()
|
40
|
-
super().__init__(message)
|
41
|
-
|
42
|
-
|
43
|
-
class RetryHandler:
|
44
|
-
"""Handles database operation retries with exponential backoff."""
|
45
|
-
|
46
|
-
def __init__(self, config: Optional[RetryConfig] = None):
|
47
|
-
"""
|
48
|
-
Initialize retry handler.
|
49
|
-
|
50
|
-
Args:
|
51
|
-
config: Retry configuration, uses defaults if None
|
52
|
-
"""
|
53
|
-
self.config = config or RetryConfig()
|
54
|
-
self._retry_stats = {
|
55
|
-
"total_attempts": 0,
|
56
|
-
"successful_retries": 0,
|
57
|
-
"failed_operations": 0,
|
58
|
-
"avg_retry_delay": 0.0
|
59
|
-
}
|
60
|
-
|
61
|
-
@asynccontextmanager
|
62
|
-
async def with_retry(self, operation_name: str = "database_operation") -> AsyncIterator[None]:
|
63
|
-
"""
|
64
|
-
Context manager that provides retry logic for database operations.
|
65
|
-
|
66
|
-
Args:
|
67
|
-
operation_name: Name of the operation for logging
|
68
|
-
|
69
|
-
Usage:
|
70
|
-
async with retry_handler.with_retry("create_project"):
|
71
|
-
# Your database operation here
|
72
|
-
await db.execute(...)
|
73
|
-
"""
|
74
|
-
last_error = None
|
75
|
-
total_delay = 0.0
|
76
|
-
|
77
|
-
for attempt in range(1, self.config.max_attempts + 1):
|
78
|
-
self._retry_stats["total_attempts"] += 1
|
79
|
-
|
80
|
-
try:
|
81
|
-
yield
|
82
|
-
|
83
|
-
# Success - log if this was a retry
|
84
|
-
if attempt > 1:
|
85
|
-
self._retry_stats["successful_retries"] += 1
|
86
|
-
logger.info(
|
87
|
-
f"Database operation '{operation_name}' succeeded on attempt {attempt}",
|
88
|
-
extra={
|
89
|
-
"structured_data": {
|
90
|
-
"retry_success": {
|
91
|
-
"operation": operation_name,
|
92
|
-
"attempt": attempt,
|
93
|
-
"total_delay": total_delay
|
94
|
-
}
|
95
|
-
}
|
96
|
-
}
|
97
|
-
)
|
98
|
-
return
|
99
|
-
|
100
|
-
except Exception as e:
|
101
|
-
last_error = e
|
102
|
-
|
103
|
-
# Check if this is a retryable error
|
104
|
-
if not self._is_retryable_error(e):
|
105
|
-
logger.error(
|
106
|
-
f"Non-retryable error in '{operation_name}': {e}",
|
107
|
-
extra={
|
108
|
-
"structured_data": {
|
109
|
-
"non_retryable_error": {
|
110
|
-
"operation": operation_name,
|
111
|
-
"error_type": type(e).__name__,
|
112
|
-
"error_message": str(e)
|
113
|
-
}
|
114
|
-
}
|
115
|
-
}
|
116
|
-
)
|
117
|
-
raise
|
118
|
-
|
119
|
-
# If this is the last attempt, give up
|
120
|
-
if attempt >= self.config.max_attempts:
|
121
|
-
self._retry_stats["failed_operations"] += 1
|
122
|
-
logger.error(
|
123
|
-
f"Database operation '{operation_name}' failed after {attempt} attempts",
|
124
|
-
extra={
|
125
|
-
"structured_data": {
|
126
|
-
"retry_exhausted": {
|
127
|
-
"operation": operation_name,
|
128
|
-
"max_attempts": self.config.max_attempts,
|
129
|
-
"total_delay": total_delay,
|
130
|
-
"final_error": str(e)
|
131
|
-
}
|
132
|
-
}
|
133
|
-
}
|
134
|
-
)
|
135
|
-
raise DatabaseLockError(
|
136
|
-
f"Database operation failed after {attempt} attempts: {e}",
|
137
|
-
retry_count=attempt,
|
138
|
-
last_attempt=datetime.utcnow()
|
139
|
-
)
|
140
|
-
|
141
|
-
# Calculate delay for next attempt
|
142
|
-
delay = self._calculate_delay(attempt)
|
143
|
-
total_delay += delay
|
144
|
-
|
145
|
-
logger.warning(
|
146
|
-
f"Database operation '{operation_name}' failed on attempt {attempt}, retrying in {delay:.2f}s",
|
147
|
-
extra={
|
148
|
-
"structured_data": {
|
149
|
-
"retry_attempt": {
|
150
|
-
"operation": operation_name,
|
151
|
-
"attempt": attempt,
|
152
|
-
"delay_seconds": delay,
|
153
|
-
"error_type": type(e).__name__,
|
154
|
-
"error_message": str(e)
|
155
|
-
}
|
156
|
-
}
|
157
|
-
}
|
158
|
-
)
|
159
|
-
|
160
|
-
# Wait before retry
|
161
|
-
await asyncio.sleep(delay)
|
162
|
-
|
163
|
-
def _is_retryable_error(self, error: Exception) -> bool:
|
164
|
-
"""
|
165
|
-
Determine if an error is retryable.
|
166
|
-
|
167
|
-
Args:
|
168
|
-
error: Exception to check
|
169
|
-
|
170
|
-
Returns:
|
171
|
-
True if the error should trigger a retry
|
172
|
-
"""
|
173
|
-
# Check error type
|
174
|
-
if not isinstance(error, self.config.retry_on_error_types):
|
175
|
-
return False
|
176
|
-
|
177
|
-
# Check specific SQLite error messages
|
178
|
-
error_message = str(error).lower()
|
179
|
-
retryable_messages = [
|
180
|
-
"database is locked",
|
181
|
-
"database is busy",
|
182
|
-
"cannot start a transaction within a transaction",
|
183
|
-
"sqlite_busy",
|
184
|
-
"sqlite_locked"
|
185
|
-
]
|
186
|
-
|
187
|
-
return any(msg in error_message for msg in retryable_messages)
|
188
|
-
|
189
|
-
def _calculate_delay(self, attempt: int) -> float:
|
190
|
-
"""
|
191
|
-
Calculate delay for retry attempt with exponential backoff and jitter.
|
192
|
-
|
193
|
-
Args:
|
194
|
-
attempt: Current attempt number (1-based)
|
195
|
-
|
196
|
-
Returns:
|
197
|
-
Delay in seconds
|
198
|
-
"""
|
199
|
-
# Exponential backoff: initial_delay * (multiplier ^ (attempt - 1))
|
200
|
-
delay = self.config.initial_delay * (self.config.backoff_multiplier ** (attempt - 1))
|
201
|
-
|
202
|
-
# Cap at max delay
|
203
|
-
delay = min(delay, self.config.max_delay)
|
204
|
-
|
205
|
-
# Add jitter to prevent thundering herd
|
206
|
-
if self.config.jitter:
|
207
|
-
jitter_range = delay * 0.1 # 10% jitter
|
208
|
-
delay += random.uniform(-jitter_range, jitter_range)
|
209
|
-
|
210
|
-
# Ensure delay is positive
|
211
|
-
return max(0.0, delay)
|
212
|
-
|
213
|
-
def get_retry_stats(self) -> dict:
|
214
|
-
"""
|
215
|
-
Get retry statistics.
|
216
|
-
|
217
|
-
Returns:
|
218
|
-
Dictionary with retry statistics
|
219
|
-
"""
|
220
|
-
if self._retry_stats["successful_retries"] > 0:
|
221
|
-
self._retry_stats["avg_retry_delay"] = (
|
222
|
-
self._retry_stats["total_attempts"] / self._retry_stats["successful_retries"]
|
223
|
-
)
|
224
|
-
|
225
|
-
return self._retry_stats.copy()
|
226
|
-
|
227
|
-
def reset_stats(self) -> None:
|
228
|
-
"""Reset retry statistics."""
|
229
|
-
self._retry_stats = {
|
230
|
-
"total_attempts": 0,
|
231
|
-
"successful_retries": 0,
|
232
|
-
"failed_operations": 0,
|
233
|
-
"avg_retry_delay": 0.0
|
234
|
-
}
|
235
|
-
|
236
|
-
|
237
|
-
class ConnectionRecoveryManager:
|
238
|
-
"""Manages database connection recovery for persistent failures."""
|
239
|
-
|
240
|
-
def __init__(self, database_manager):
|
241
|
-
"""
|
242
|
-
Initialize connection recovery manager.
|
243
|
-
|
244
|
-
Args:
|
245
|
-
database_manager: DatabaseManager instance to manage
|
246
|
-
"""
|
247
|
-
self.database_manager = database_manager
|
248
|
-
self._recovery_stats = {
|
249
|
-
"pool_refreshes": 0,
|
250
|
-
"last_refresh": None,
|
251
|
-
"consecutive_failures": 0
|
252
|
-
}
|
253
|
-
self._failure_threshold = 3 # Refresh pool after 3 consecutive failures
|
254
|
-
|
255
|
-
async def handle_persistent_failure(self, operation_name: str, error: Exception) -> bool:
|
256
|
-
"""
|
257
|
-
Handle persistent database failures by attempting pool refresh.
|
258
|
-
|
259
|
-
Args:
|
260
|
-
operation_name: Name of the failing operation
|
261
|
-
error: The persistent error
|
262
|
-
|
263
|
-
Returns:
|
264
|
-
True if pool refresh was attempted, False otherwise
|
265
|
-
"""
|
266
|
-
self._recovery_stats["consecutive_failures"] += 1
|
267
|
-
|
268
|
-
# Only refresh if we've hit the threshold
|
269
|
-
if self._recovery_stats["consecutive_failures"] >= self._failure_threshold:
|
270
|
-
logger.warning(
|
271
|
-
f"Attempting connection pool refresh after {self._recovery_stats['consecutive_failures']} failures",
|
272
|
-
extra={
|
273
|
-
"structured_data": {
|
274
|
-
"pool_recovery": {
|
275
|
-
"operation": operation_name,
|
276
|
-
"consecutive_failures": self._recovery_stats["consecutive_failures"],
|
277
|
-
"trigger_error": str(error)
|
278
|
-
}
|
279
|
-
}
|
280
|
-
}
|
281
|
-
)
|
282
|
-
|
283
|
-
await self._refresh_connection_pool()
|
284
|
-
return True
|
285
|
-
|
286
|
-
return False
|
287
|
-
|
288
|
-
def reset_failure_count(self) -> None:
|
289
|
-
"""Reset consecutive failure count after successful operation."""
|
290
|
-
self._recovery_stats["consecutive_failures"] = 0
|
291
|
-
|
292
|
-
async def _refresh_connection_pool(self) -> None:
|
293
|
-
"""
|
294
|
-
Refresh the database connection pool by closing all connections.
|
295
|
-
|
296
|
-
This forces creation of new connections on next access.
|
297
|
-
"""
|
298
|
-
try:
|
299
|
-
# Close existing pool
|
300
|
-
await self.database_manager.close_pool()
|
301
|
-
|
302
|
-
# Update stats
|
303
|
-
self._recovery_stats["pool_refreshes"] += 1
|
304
|
-
self._recovery_stats["last_refresh"] = datetime.utcnow()
|
305
|
-
self._recovery_stats["consecutive_failures"] = 0
|
306
|
-
|
307
|
-
logger.info("Database connection pool refreshed successfully")
|
308
|
-
|
309
|
-
except Exception as e:
|
310
|
-
logger.error(f"Failed to refresh connection pool: {e}")
|
311
|
-
raise
|
312
|
-
|
313
|
-
def get_recovery_stats(self) -> dict:
|
314
|
-
"""
|
315
|
-
Get connection recovery statistics.
|
316
|
-
|
317
|
-
Returns:
|
318
|
-
Dictionary with recovery statistics
|
319
|
-
"""
|
320
|
-
return self._recovery_stats.copy()
|
321
|
-
|
322
|
-
|
323
|
-
def create_retry_handler(
|
324
|
-
max_attempts: int = 5,
|
325
|
-
initial_delay: float = 0.1,
|
326
|
-
max_delay: float = 2.0
|
327
|
-
) -> RetryHandler:
|
328
|
-
"""
|
329
|
-
Create a configured retry handler for database operations.
|
330
|
-
|
331
|
-
Args:
|
332
|
-
max_attempts: Maximum retry attempts
|
333
|
-
initial_delay: Initial delay in seconds
|
334
|
-
max_delay: Maximum delay in seconds
|
335
|
-
|
336
|
-
Returns:
|
337
|
-
Configured RetryHandler instance
|
338
|
-
"""
|
339
|
-
config = RetryConfig(
|
340
|
-
max_attempts=max_attempts,
|
341
|
-
initial_delay=initial_delay,
|
342
|
-
max_delay=max_delay
|
343
|
-
)
|
344
|
-
return RetryHandler(config)
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|