mcp-code-indexer 3.1.4__py3-none-any.whl → 3.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,17 +1,16 @@
1
1
  """
2
2
  Tenacity-based retry executor for database operations with exponential backoff.
3
3
 
4
- This module provides a robust retry executor that replaces the broken async
4
+ This module provides a robust retry executor that replaces the broken async
5
5
  context manager retry pattern with proper separation of concerns between
6
6
  retry logic and resource management.
7
7
  """
8
8
 
9
- import asyncio
10
9
  import logging
11
10
  from contextlib import asynccontextmanager
12
11
  from dataclasses import dataclass, field
13
- from datetime import datetime, timedelta, timezone
14
- from typing import Any, AsyncIterator, Callable, Dict, Optional, Type, TypeVar, Union
12
+ from datetime import datetime, timezone
13
+ from typing import Any, AsyncIterator, Callable, Dict, Optional, TypeVar
15
14
 
16
15
  import aiosqlite
17
16
  from tenacity import (
@@ -19,29 +18,32 @@ from tenacity import (
19
18
  RetryError,
20
19
  stop_after_attempt,
21
20
  wait_exponential_jitter,
22
- retry_if_exception_type,
23
21
  before_sleep_log,
24
- after_log
22
+ after_log,
25
23
  )
26
24
 
27
25
  logger = logging.getLogger(__name__)
28
26
 
29
- T = TypeVar('T')
27
+ T = TypeVar("T")
30
28
 
31
29
 
32
30
  @dataclass
33
31
  class RetryConfig:
34
32
  """Configuration for database retry logic using tenacity."""
33
+
35
34
  max_attempts: int = 5
36
35
  min_wait_seconds: float = 0.1
37
36
  max_wait_seconds: float = 2.0
38
37
  jitter_max_seconds: float = 0.2 # Max jitter to add
39
- retry_on_errors: tuple = field(default_factory=lambda: (aiosqlite.OperationalError,))
40
-
38
+ retry_on_errors: tuple = field(
39
+ default_factory=lambda: (aiosqlite.OperationalError,)
40
+ )
41
41
 
42
- @dataclass
42
+
43
+ @dataclass
43
44
  class RetryStats:
44
45
  """Statistics for retry operations."""
46
+
45
47
  total_operations: int = 0
46
48
  successful_operations: int = 0
47
49
  retried_operations: int = 0
@@ -49,21 +51,21 @@ class RetryStats:
49
51
  total_attempts: int = 0
50
52
  total_retry_time: float = 0.0
51
53
  last_operation_time: Optional[datetime] = None
52
-
54
+
53
55
  @property
54
56
  def success_rate(self) -> float:
55
57
  """Calculate success rate as percentage."""
56
58
  if self.total_operations == 0:
57
59
  return 0.0
58
60
  return (self.successful_operations / self.total_operations) * 100.0
59
-
61
+
60
62
  @property
61
63
  def retry_rate(self) -> float:
62
64
  """Calculate retry rate as percentage."""
63
65
  if self.total_operations == 0:
64
66
  return 0.0
65
67
  return (self.retried_operations / self.total_operations) * 100.0
66
-
68
+
67
69
  @property
68
70
  def average_attempts_per_operation(self) -> float:
69
71
  """Calculate average retry attempts per operation."""
@@ -74,9 +76,14 @@ class RetryStats:
74
76
 
75
77
  class DatabaseLockError(Exception):
76
78
  """Exception for database locking issues with retry context."""
77
-
78
- def __init__(self, message: str, retry_count: int = 0, operation_name: str = "",
79
- last_attempt: Optional[datetime] = None):
79
+
80
+ def __init__(
81
+ self,
82
+ message: str,
83
+ retry_count: int = 0,
84
+ operation_name: str = "",
85
+ last_attempt: Optional[datetime] = None,
86
+ ):
80
87
  self.message = message
81
88
  self.retry_count = retry_count
82
89
  self.operation_name = operation_name
@@ -87,223 +94,246 @@ class DatabaseLockError(Exception):
87
94
  class RetryExecutor:
88
95
  """
89
96
  Tenacity-based retry executor for database operations.
90
-
97
+
91
98
  This executor provides robust retry logic with exponential backoff,
92
99
  proper error classification, and comprehensive statistics tracking.
93
100
  It replaces the broken async context manager retry pattern.
94
101
  """
95
-
102
+
96
103
  def __init__(self, config: Optional[RetryConfig] = None):
97
104
  """
98
105
  Initialize retry executor.
99
-
106
+
100
107
  Args:
101
108
  config: Retry configuration, uses defaults if None
102
109
  """
103
110
  self.config = config or RetryConfig()
104
111
  self._stats = RetryStats()
105
112
  self._operation_start_times: Dict[str, datetime] = {}
106
-
113
+
107
114
  # Configure tenacity retrying with exponential backoff and jitter
108
115
  self._tenacity_retrying = AsyncRetrying(
109
116
  stop=stop_after_attempt(self.config.max_attempts),
110
117
  wait=wait_exponential_jitter(
111
118
  initial=self.config.min_wait_seconds,
112
119
  max=self.config.max_wait_seconds,
113
- jitter=self.config.jitter_max_seconds
120
+ jitter=self.config.jitter_max_seconds,
114
121
  ),
115
122
  retry=self._should_retry_exception,
116
123
  before_sleep=before_sleep_log(logger, logging.WARNING),
117
124
  after=after_log(logger, logging.DEBUG),
118
- reraise=False
125
+ reraise=False,
119
126
  )
120
-
121
- async def execute_with_retry(self,
122
- operation: Callable[[], T],
123
- operation_name: str = "database_operation") -> T:
127
+
128
+ async def execute_with_retry(
129
+ self, operation: Callable[[], T], operation_name: str = "database_operation"
130
+ ) -> T:
124
131
  """
125
132
  Execute an operation with retry logic.
126
-
133
+
127
134
  Args:
128
135
  operation: Async callable to execute
129
136
  operation_name: Name for logging and statistics
130
-
137
+
131
138
  Returns:
132
139
  Result of the operation
133
-
140
+
134
141
  Raises:
135
142
  DatabaseLockError: If all retry attempts fail
136
143
  Exception: For non-retryable errors
137
144
  """
138
145
  self._stats.total_operations += 1
139
146
  self._operation_start_times[operation_name] = datetime.now(timezone.utc)
140
-
147
+
141
148
  attempt_count = 0
142
149
  operation_start = datetime.now(timezone.utc)
143
150
  operation_had_retries = False
144
-
151
+
145
152
  try:
146
153
  async for attempt in self._tenacity_retrying:
147
154
  with attempt:
148
155
  attempt_count += 1
149
156
  self._stats.total_attempts += 1
150
-
157
+
151
158
  # Execute the operation
152
159
  result = await operation()
153
-
160
+
154
161
  # Success - update statistics
155
- operation_time = (datetime.now(timezone.utc) - operation_start).total_seconds()
162
+ operation_time = (
163
+ datetime.now(timezone.utc) - operation_start
164
+ ).total_seconds()
156
165
  self._stats.successful_operations += 1
157
166
  self._stats.last_operation_time = datetime.now(timezone.utc)
158
-
167
+
159
168
  if attempt_count > 1:
160
169
  if not operation_had_retries:
161
170
  self._stats.retried_operations += 1
162
171
  operation_had_retries = True
163
172
  self._stats.total_retry_time += operation_time
164
173
  logger.info(
165
- f"Operation '{operation_name}' succeeded after {attempt_count} attempts",
166
- extra={"structured_data": {
167
- "retry_success": {
168
- "operation": operation_name,
169
- "attempts": attempt_count,
170
- "total_time_seconds": operation_time
174
+ (
175
+ f"Operation '{operation_name}' succeeded after "
176
+ f"{attempt_count} attempts"
177
+ ),
178
+ extra={
179
+ "structured_data": {
180
+ "retry_success": {
181
+ "operation": operation_name,
182
+ "attempts": attempt_count,
183
+ "total_time_seconds": operation_time,
184
+ }
171
185
  }
172
- }}
186
+ },
173
187
  )
174
-
188
+
175
189
  return result
176
-
190
+
177
191
  except RetryError as e:
178
192
  # All retry attempts exhausted
179
- operation_time = (datetime.now(timezone.utc) - operation_start).total_seconds()
193
+ operation_time = (
194
+ datetime.now(timezone.utc) - operation_start
195
+ ).total_seconds()
180
196
  self._stats.failed_operations += 1
181
197
  self._stats.total_retry_time += operation_time
182
-
198
+
183
199
  original_error = e.last_attempt.exception()
184
200
  logger.error(
185
- f"Operation '{operation_name}' failed after {attempt_count} attempts",
186
- extra={"structured_data": {
187
- "retry_exhausted": {
188
- "operation": operation_name,
189
- "max_attempts": self.config.max_attempts,
190
- "total_time_seconds": operation_time,
191
- "final_error": str(original_error)
201
+ (
202
+ f"Operation '{operation_name}' failed after "
203
+ f"{attempt_count} attempts"
204
+ ),
205
+ extra={
206
+ "structured_data": {
207
+ "retry_exhausted": {
208
+ "operation": operation_name,
209
+ "max_attempts": self.config.max_attempts,
210
+ "total_time_seconds": operation_time,
211
+ "final_error": str(original_error),
212
+ }
192
213
  }
193
- }}
214
+ },
194
215
  )
195
-
216
+
196
217
  raise DatabaseLockError(
197
- f"Database operation failed after {attempt_count} attempts: {original_error}",
218
+ (
219
+ f"Database operation failed after {attempt_count} attempts: "
220
+ f"{original_error}"
221
+ ),
198
222
  retry_count=attempt_count,
199
223
  operation_name=operation_name,
200
- last_attempt=datetime.now(timezone.utc)
224
+ last_attempt=datetime.now(timezone.utc),
201
225
  )
202
-
226
+
203
227
  except Exception as e:
204
228
  # Non-retryable error on first attempt
205
229
  self._stats.failed_operations += 1
206
230
  logger.error(
207
231
  f"Non-retryable error in '{operation_name}': {e}",
208
- extra={"structured_data": {
209
- "immediate_failure": {
210
- "operation": operation_name,
211
- "error_type": type(e).__name__,
212
- "error_message": str(e)
232
+ extra={
233
+ "structured_data": {
234
+ "immediate_failure": {
235
+ "operation": operation_name,
236
+ "error_type": type(e).__name__,
237
+ "error_message": str(e),
238
+ }
213
239
  }
214
- }}
240
+ },
215
241
  )
216
242
  raise
217
-
243
+
218
244
  finally:
219
245
  # Clean up tracking
220
246
  self._operation_start_times.pop(operation_name, None)
221
-
247
+
222
248
  @asynccontextmanager
223
- async def get_connection_with_retry(self,
224
- connection_factory: Callable[[], AsyncIterator[aiosqlite.Connection]],
225
- operation_name: str = "database_connection") -> AsyncIterator[aiosqlite.Connection]:
249
+ async def get_connection_with_retry(
250
+ self,
251
+ connection_factory: Callable[[], AsyncIterator[aiosqlite.Connection]],
252
+ operation_name: str = "database_connection",
253
+ ) -> AsyncIterator[aiosqlite.Connection]:
226
254
  """
227
255
  Get a database connection with retry logic wrapped around the context manager.
228
-
229
- This method properly separates retry logic from resource management by
230
- retrying the entire context manager operation, not yielding inside a retry loop.
231
-
256
+
257
+ This method properly separates retry logic from resource management
258
+ by retrying the entire context manager operation, not yielding inside
259
+ a retry loop.
260
+
232
261
  Args:
233
- connection_factory: Function that returns an async context manager for connections
262
+ connection_factory: Function that returns an async context manager
263
+ for connections
234
264
  operation_name: Name for logging and statistics
235
-
265
+
236
266
  Yields:
237
267
  Database connection
238
268
  """
239
-
269
+
240
270
  async def get_connection():
241
271
  # This function will be retried by execute_with_retry
242
272
  async with connection_factory() as conn:
243
273
  # Store connection for the outer context manager
244
274
  return conn
245
-
275
+
246
276
  # Use execute_with_retry to handle the retry logic
247
277
  # We create a connection and store it for the context manager
248
278
  connection = await self.execute_with_retry(get_connection, operation_name)
249
-
279
+
250
280
  try:
251
281
  yield connection
252
282
  finally:
253
283
  # Connection cleanup is handled by the original context manager
254
284
  # in the connection_factory, so nothing to do here
255
285
  pass
256
-
286
+
257
287
  def _should_retry_exception(self, retry_state) -> bool:
258
288
  """
259
289
  Determine if an exception should trigger a retry.
260
-
290
+
261
291
  This is used by tenacity to decide whether to retry.
262
-
292
+
263
293
  Args:
264
294
  retry_state: Tenacity retry state
265
-
295
+
266
296
  Returns:
267
297
  True if the exception should trigger a retry
268
298
  """
269
299
  if retry_state.outcome is None:
270
300
  return False
271
-
301
+
272
302
  exception = retry_state.outcome.exception()
273
303
  if exception is None:
274
304
  return False
275
-
305
+
276
306
  return self._is_sqlite_retryable_error(exception)
277
-
307
+
278
308
  def _is_sqlite_retryable_error(self, error: Exception) -> bool:
279
309
  """
280
310
  Determine if a SQLite error is retryable.
281
-
311
+
282
312
  Args:
283
313
  error: Exception to check
284
-
314
+
285
315
  Returns:
286
316
  True if the error should trigger a retry
287
317
  """
288
318
  if not isinstance(error, self.config.retry_on_errors):
289
319
  return False
290
-
320
+
291
321
  # Check specific SQLite error messages that indicate transient issues
292
322
  error_message = str(error).lower()
293
323
  retryable_messages = [
294
324
  "database is locked",
295
- "database is busy",
325
+ "database is busy",
296
326
  "cannot start a transaction within a transaction",
297
327
  "sqlite_busy",
298
- "sqlite_locked"
328
+ "sqlite_locked",
299
329
  ]
300
-
330
+
301
331
  return any(msg in error_message for msg in retryable_messages)
302
-
332
+
303
333
  def get_retry_stats(self) -> Dict[str, Any]:
304
334
  """
305
335
  Get comprehensive retry statistics.
306
-
336
+
307
337
  Returns:
308
338
  Dictionary with retry statistics and performance metrics
309
339
  """
@@ -315,17 +345,23 @@ class RetryExecutor:
315
345
  "total_attempts": self._stats.total_attempts,
316
346
  "success_rate_percent": round(self._stats.success_rate, 2),
317
347
  "retry_rate_percent": round(self._stats.retry_rate, 2),
318
- "average_attempts_per_operation": round(self._stats.average_attempts_per_operation, 2),
348
+ "average_attempts_per_operation": round(
349
+ self._stats.average_attempts_per_operation, 2
350
+ ),
319
351
  "total_retry_time_seconds": round(self._stats.total_retry_time, 3),
320
- "last_operation_time": self._stats.last_operation_time.isoformat() if self._stats.last_operation_time else None,
352
+ "last_operation_time": (
353
+ self._stats.last_operation_time.isoformat()
354
+ if self._stats.last_operation_time
355
+ else None
356
+ ),
321
357
  "config": {
322
358
  "max_attempts": self.config.max_attempts,
323
359
  "min_wait_seconds": self.config.min_wait_seconds,
324
360
  "max_wait_seconds": self.config.max_wait_seconds,
325
- "jitter_max_seconds": self.config.jitter_max_seconds
326
- }
361
+ "jitter_max_seconds": self.config.jitter_max_seconds,
362
+ },
327
363
  }
328
-
364
+
329
365
  def reset_stats(self) -> None:
330
366
  """Reset retry statistics."""
331
367
  self._stats = RetryStats()
@@ -336,17 +372,17 @@ def create_retry_executor(
336
372
  max_attempts: int = 5,
337
373
  min_wait_seconds: float = 0.1,
338
374
  max_wait_seconds: float = 2.0,
339
- jitter_max_seconds: float = 0.2
375
+ jitter_max_seconds: float = 0.2,
340
376
  ) -> RetryExecutor:
341
377
  """
342
378
  Create a configured retry executor for database operations.
343
-
379
+
344
380
  Args:
345
381
  max_attempts: Maximum retry attempts
346
382
  min_wait_seconds: Initial delay in seconds
347
- max_wait_seconds: Maximum delay in seconds
383
+ max_wait_seconds: Maximum delay in seconds
348
384
  jitter_max_seconds: Maximum jitter to add to delays
349
-
385
+
350
386
  Returns:
351
387
  Configured RetryExecutor instance
352
388
  """
@@ -354,6 +390,6 @@ def create_retry_executor(
354
390
  max_attempts=max_attempts,
355
391
  min_wait_seconds=min_wait_seconds,
356
392
  max_wait_seconds=max_wait_seconds,
357
- jitter_max_seconds=jitter_max_seconds
393
+ jitter_max_seconds=jitter_max_seconds,
358
394
  )
359
395
  return RetryExecutor(config)