mcp-code-indexer 2.0.2__py3-none-any.whl → 2.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcp_code_indexer/ask_handler.py +217 -0
- mcp_code_indexer/claude_api_handler.py +355 -0
- mcp_code_indexer/database/connection_health.py +187 -3
- mcp_code_indexer/database/database.py +94 -68
- mcp_code_indexer/database/exceptions.py +303 -0
- mcp_code_indexer/database/retry_executor.py +359 -0
- mcp_code_indexer/deepask_handler.py +465 -0
- mcp_code_indexer/server/mcp_server.py +79 -12
- {mcp_code_indexer-2.0.2.dist-info → mcp_code_indexer-2.2.0.dist-info}/METADATA +3 -3
- {mcp_code_indexer-2.0.2.dist-info → mcp_code_indexer-2.2.0.dist-info}/RECORD +14 -10
- mcp_code_indexer/database/retry_handler.py +0 -344
- {mcp_code_indexer-2.0.2.dist-info → mcp_code_indexer-2.2.0.dist-info}/WHEEL +0 -0
- {mcp_code_indexer-2.0.2.dist-info → mcp_code_indexer-2.2.0.dist-info}/entry_points.txt +0 -0
- {mcp_code_indexer-2.0.2.dist-info → mcp_code_indexer-2.2.0.dist-info}/licenses/LICENSE +0 -0
- {mcp_code_indexer-2.0.2.dist-info → mcp_code_indexer-2.2.0.dist-info}/top_level.txt +0 -0
@@ -262,12 +262,15 @@ class ConnectionHealthMonitor:
|
|
262
262
|
}
|
263
263
|
)
|
264
264
|
|
265
|
-
def get_health_status(self) -> Dict:
|
265
|
+
def get_health_status(self, include_retry_stats: bool = True) -> Dict:
|
266
266
|
"""
|
267
267
|
Get current health status and metrics.
|
268
268
|
|
269
|
+
Args:
|
270
|
+
include_retry_stats: Whether to include retry executor statistics
|
271
|
+
|
269
272
|
Returns:
|
270
|
-
Dictionary with health status, metrics, and
|
273
|
+
Dictionary with health status, metrics, recent history, and retry stats
|
271
274
|
"""
|
272
275
|
# Get recent health status (last 5 checks)
|
273
276
|
recent_checks = self._health_history[-5:] if self._health_history else []
|
@@ -276,7 +279,7 @@ class ConnectionHealthMonitor:
|
|
276
279
|
if recent_checks else 0
|
277
280
|
)
|
278
281
|
|
279
|
-
|
282
|
+
health_status = {
|
280
283
|
"is_monitoring": self._is_monitoring,
|
281
284
|
"current_status": {
|
282
285
|
"is_healthy": (
|
@@ -301,6 +304,22 @@ class ConnectionHealthMonitor:
|
|
301
304
|
"timeout_seconds": self.timeout_seconds
|
302
305
|
}
|
303
306
|
}
|
307
|
+
|
308
|
+
# Include retry executor statistics if available
|
309
|
+
if include_retry_stats and hasattr(self.database_manager, '_retry_executor'):
|
310
|
+
retry_executor = self.database_manager._retry_executor
|
311
|
+
if retry_executor:
|
312
|
+
health_status["retry_statistics"] = retry_executor.get_retry_stats()
|
313
|
+
|
314
|
+
# Include database-level statistics if available
|
315
|
+
if hasattr(self.database_manager, 'get_database_stats'):
|
316
|
+
try:
|
317
|
+
db_stats = self.database_manager.get_database_stats()
|
318
|
+
health_status["database_statistics"] = db_stats
|
319
|
+
except Exception as e:
|
320
|
+
logger.warning(f"Failed to get database statistics: {e}")
|
321
|
+
|
322
|
+
return health_status
|
304
323
|
|
305
324
|
def get_recent_history(self, count: int = 10) -> List[Dict]:
|
306
325
|
"""
|
@@ -322,6 +341,171 @@ class ConnectionHealthMonitor:
|
|
322
341
|
}
|
323
342
|
for check in recent_checks
|
324
343
|
]
|
344
|
+
|
345
|
+
def get_comprehensive_diagnostics(self) -> Dict:
|
346
|
+
"""
|
347
|
+
Get comprehensive database health diagnostics for monitoring.
|
348
|
+
|
349
|
+
This method provides detailed diagnostics suitable for the
|
350
|
+
check_database_health MCP tool.
|
351
|
+
|
352
|
+
Returns:
|
353
|
+
Comprehensive health diagnostics including retry metrics,
|
354
|
+
performance data, and resilience statistics
|
355
|
+
"""
|
356
|
+
# Get base health status with retry stats
|
357
|
+
base_status = self.get_health_status(include_retry_stats=True)
|
358
|
+
|
359
|
+
# Add detailed performance analysis
|
360
|
+
diagnostics = {
|
361
|
+
**base_status,
|
362
|
+
"performance_analysis": {
|
363
|
+
"health_check_performance": {
|
364
|
+
"avg_response_time_ms": self.metrics.avg_response_time_ms,
|
365
|
+
"response_time_threshold_exceeded": self.metrics.avg_response_time_ms > 100,
|
366
|
+
"recent_performance_trend": self._get_performance_trend()
|
367
|
+
},
|
368
|
+
"failure_analysis": {
|
369
|
+
"failure_rate_percent": (
|
370
|
+
(self.metrics.failed_checks / self.metrics.total_checks * 100)
|
371
|
+
if self.metrics.total_checks > 0 else 0
|
372
|
+
),
|
373
|
+
"consecutive_failures": self.metrics.consecutive_failures,
|
374
|
+
"approaching_failure_threshold": (
|
375
|
+
self.metrics.consecutive_failures >= self.failure_threshold - 1
|
376
|
+
),
|
377
|
+
"pool_refresh_frequency": self.metrics.pool_refreshes
|
378
|
+
}
|
379
|
+
},
|
380
|
+
"resilience_indicators": {
|
381
|
+
"overall_health_score": self._calculate_health_score(),
|
382
|
+
"retry_effectiveness": self._analyze_retry_effectiveness(),
|
383
|
+
"connection_stability": self._assess_connection_stability(),
|
384
|
+
"recommendations": self._generate_health_recommendations()
|
385
|
+
},
|
386
|
+
"recent_history": self.get_recent_history(count=5)
|
387
|
+
}
|
388
|
+
|
389
|
+
return diagnostics
|
390
|
+
|
391
|
+
def _get_performance_trend(self) -> str:
|
392
|
+
"""Analyze recent performance trend."""
|
393
|
+
if len(self._health_history) < 5:
|
394
|
+
return "insufficient_data"
|
395
|
+
|
396
|
+
recent_times = [
|
397
|
+
check.response_time_ms for check in self._health_history[-5:]
|
398
|
+
if check.is_healthy
|
399
|
+
]
|
400
|
+
|
401
|
+
if len(recent_times) < 2:
|
402
|
+
return "insufficient_healthy_checks"
|
403
|
+
|
404
|
+
# Simple trend analysis
|
405
|
+
if recent_times[-1] > recent_times[0] * 1.5:
|
406
|
+
return "degrading"
|
407
|
+
elif recent_times[-1] < recent_times[0] * 0.7:
|
408
|
+
return "improving"
|
409
|
+
else:
|
410
|
+
return "stable"
|
411
|
+
|
412
|
+
def _calculate_health_score(self) -> float:
|
413
|
+
"""Calculate overall health score (0-100)."""
|
414
|
+
if self.metrics.total_checks == 0:
|
415
|
+
return 100.0
|
416
|
+
|
417
|
+
# Base score from success rate
|
418
|
+
success_rate = (self.metrics.successful_checks / self.metrics.total_checks) * 100
|
419
|
+
|
420
|
+
# Penalize consecutive failures
|
421
|
+
failure_penalty = min(self.metrics.consecutive_failures * 10, 50)
|
422
|
+
|
423
|
+
# Penalize high response times
|
424
|
+
response_penalty = min(max(0, self.metrics.avg_response_time_ms - 50) / 10, 20)
|
425
|
+
|
426
|
+
# Calculate final score
|
427
|
+
score = success_rate - failure_penalty - response_penalty
|
428
|
+
return max(0.0, min(100.0, score))
|
429
|
+
|
430
|
+
def _analyze_retry_effectiveness(self) -> Dict:
|
431
|
+
"""Analyze retry mechanism effectiveness."""
|
432
|
+
if not hasattr(self.database_manager, '_retry_executor'):
|
433
|
+
return {"status": "no_retry_executor"}
|
434
|
+
|
435
|
+
retry_executor = self.database_manager._retry_executor
|
436
|
+
if not retry_executor:
|
437
|
+
return {"status": "retry_executor_not_initialized"}
|
438
|
+
|
439
|
+
retry_stats = retry_executor.get_retry_stats()
|
440
|
+
|
441
|
+
return {
|
442
|
+
"status": "active",
|
443
|
+
"effectiveness_score": retry_stats.get("success_rate_percent", 0),
|
444
|
+
"retry_frequency": retry_stats.get("retry_rate_percent", 0),
|
445
|
+
"avg_attempts_per_operation": retry_stats.get("average_attempts_per_operation", 0),
|
446
|
+
"is_effective": retry_stats.get("success_rate_percent", 0) > 85
|
447
|
+
}
|
448
|
+
|
449
|
+
def _assess_connection_stability(self) -> Dict:
|
450
|
+
"""Assess connection stability."""
|
451
|
+
stability_score = 100.0
|
452
|
+
|
453
|
+
# Penalize pool refreshes
|
454
|
+
if self.metrics.pool_refreshes > 0:
|
455
|
+
stability_score -= min(self.metrics.pool_refreshes * 15, 60)
|
456
|
+
|
457
|
+
# Penalize consecutive failures
|
458
|
+
if self.metrics.consecutive_failures > 0:
|
459
|
+
stability_score -= min(self.metrics.consecutive_failures * 20, 80)
|
460
|
+
|
461
|
+
return {
|
462
|
+
"stability_score": max(0.0, stability_score),
|
463
|
+
"pool_refreshes": self.metrics.pool_refreshes,
|
464
|
+
"consecutive_failures": self.metrics.consecutive_failures,
|
465
|
+
"is_stable": stability_score > 70
|
466
|
+
}
|
467
|
+
|
468
|
+
def _generate_health_recommendations(self) -> List[str]:
|
469
|
+
"""Generate health recommendations based on current metrics."""
|
470
|
+
recommendations = []
|
471
|
+
|
472
|
+
# High failure rate
|
473
|
+
if self.metrics.total_checks > 0:
|
474
|
+
failure_rate = (self.metrics.failed_checks / self.metrics.total_checks) * 100
|
475
|
+
if failure_rate > 20:
|
476
|
+
recommendations.append(
|
477
|
+
f"High failure rate ({failure_rate:.1f}%) - check database configuration"
|
478
|
+
)
|
479
|
+
|
480
|
+
# High response times
|
481
|
+
if self.metrics.avg_response_time_ms > 100:
|
482
|
+
recommendations.append(
|
483
|
+
f"High response times ({self.metrics.avg_response_time_ms:.1f}ms) - consider optimizing queries"
|
484
|
+
)
|
485
|
+
|
486
|
+
# Approaching failure threshold
|
487
|
+
if self.metrics.consecutive_failures >= self.failure_threshold - 1:
|
488
|
+
recommendations.append(
|
489
|
+
"Approaching failure threshold - pool refresh imminent"
|
490
|
+
)
|
491
|
+
|
492
|
+
# Frequent pool refreshes
|
493
|
+
if self.metrics.pool_refreshes > 3:
|
494
|
+
recommendations.append(
|
495
|
+
"Frequent pool refreshes detected - investigate underlying connection issues"
|
496
|
+
)
|
497
|
+
|
498
|
+
# No recent successful checks
|
499
|
+
if (self.metrics.last_success_time and
|
500
|
+
datetime.utcnow() - self.metrics.last_success_time > timedelta(minutes=5)):
|
501
|
+
recommendations.append(
|
502
|
+
"No successful health checks in last 5 minutes - database may be unavailable"
|
503
|
+
)
|
504
|
+
|
505
|
+
if not recommendations:
|
506
|
+
recommendations.append("Database health is optimal")
|
507
|
+
|
508
|
+
return recommendations
|
325
509
|
|
326
510
|
|
327
511
|
class DatabaseMetricsCollector:
|
@@ -21,8 +21,11 @@ from mcp_code_indexer.database.models import (
|
|
21
21
|
Project, FileDescription, MergeConflict, SearchResult,
|
22
22
|
CodebaseSizeInfo, ProjectOverview, WordFrequencyResult, WordFrequencyTerm
|
23
23
|
)
|
24
|
-
from mcp_code_indexer.database.
|
25
|
-
|
24
|
+
from mcp_code_indexer.database.retry_executor import (
|
25
|
+
RetryExecutor, create_retry_executor
|
26
|
+
)
|
27
|
+
from mcp_code_indexer.database.exceptions import (
|
28
|
+
DatabaseError, DatabaseLockError, classify_sqlite_error, is_retryable_error
|
26
29
|
)
|
27
30
|
from mcp_code_indexer.database.connection_health import (
|
28
31
|
ConnectionHealthMonitor, DatabaseMetricsCollector
|
@@ -45,7 +48,10 @@ class DatabaseManager:
|
|
45
48
|
retry_count: int = 5,
|
46
49
|
timeout: float = 10.0,
|
47
50
|
enable_wal_mode: bool = True,
|
48
|
-
health_check_interval: float = 30.0
|
51
|
+
health_check_interval: float = 30.0,
|
52
|
+
retry_min_wait: float = 0.1,
|
53
|
+
retry_max_wait: float = 2.0,
|
54
|
+
retry_jitter: float = 0.2):
|
49
55
|
"""Initialize database manager with path to SQLite database."""
|
50
56
|
self.db_path = db_path
|
51
57
|
self.pool_size = pool_size
|
@@ -53,13 +59,20 @@ class DatabaseManager:
|
|
53
59
|
self.timeout = timeout
|
54
60
|
self.enable_wal_mode = enable_wal_mode
|
55
61
|
self.health_check_interval = health_check_interval
|
62
|
+
self.retry_min_wait = retry_min_wait
|
63
|
+
self.retry_max_wait = retry_max_wait
|
64
|
+
self.retry_jitter = retry_jitter
|
56
65
|
self._connection_pool: List[aiosqlite.Connection] = []
|
57
66
|
self._pool_lock = None # Will be initialized in async context
|
58
67
|
self._write_lock = None # Write serialization lock, initialized in async context
|
59
68
|
|
60
69
|
# Retry and recovery components - configure with provided settings
|
61
|
-
self.
|
62
|
-
|
70
|
+
self._retry_executor = create_retry_executor(
|
71
|
+
max_attempts=retry_count,
|
72
|
+
min_wait_seconds=retry_min_wait,
|
73
|
+
max_wait_seconds=retry_max_wait,
|
74
|
+
jitter_max_seconds=retry_jitter
|
75
|
+
)
|
63
76
|
|
64
77
|
# Health monitoring and metrics
|
65
78
|
self._health_monitor = None # Initialized in async context
|
@@ -73,8 +86,7 @@ class DatabaseManager:
|
|
73
86
|
self._pool_lock = asyncio.Lock()
|
74
87
|
self._write_lock = asyncio.Lock()
|
75
88
|
|
76
|
-
#
|
77
|
-
self._recovery_manager = ConnectionRecoveryManager(self)
|
89
|
+
# Connection recovery is now handled by the retry executor
|
78
90
|
|
79
91
|
# Initialize health monitoring with configured interval
|
80
92
|
self._health_monitor = ConnectionHealthMonitor(
|
@@ -87,6 +99,8 @@ class DatabaseManager:
|
|
87
99
|
# Ensure database directory exists
|
88
100
|
self.db_path.parent.mkdir(parents=True, exist_ok=True)
|
89
101
|
|
102
|
+
# Database initialization now uses the modern retry executor directly
|
103
|
+
|
90
104
|
# Apply migrations in order
|
91
105
|
migrations_dir = Path(__file__).parent.parent.parent.parent / "migrations"
|
92
106
|
migration_files = sorted(migrations_dir.glob("*.sql"))
|
@@ -217,30 +231,48 @@ class DatabaseManager:
|
|
217
231
|
"""
|
218
232
|
Get a database connection with write serialization and automatic retry logic.
|
219
233
|
|
220
|
-
This
|
221
|
-
|
234
|
+
This uses the new RetryExecutor to properly handle retry logic without
|
235
|
+
the broken yield-in-retry-loop pattern that caused generator errors.
|
222
236
|
|
223
237
|
Args:
|
224
238
|
operation_name: Name of the operation for logging and monitoring
|
225
239
|
"""
|
226
|
-
if self._write_lock is None
|
240
|
+
if self._write_lock is None:
|
227
241
|
raise RuntimeError("DatabaseManager not initialized - call initialize() first")
|
228
242
|
|
229
|
-
async
|
243
|
+
async def get_write_connection():
|
244
|
+
"""Inner function to get connection - will be retried by executor."""
|
245
|
+
async with self._write_lock:
|
246
|
+
async with self.get_connection() as conn:
|
247
|
+
return conn
|
248
|
+
|
249
|
+
try:
|
250
|
+
# Use retry executor to handle connection acquisition with retries
|
251
|
+
connection = await self._retry_executor.execute_with_retry(
|
252
|
+
get_write_connection,
|
253
|
+
operation_name
|
254
|
+
)
|
255
|
+
|
230
256
|
try:
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
# Reset failure count on success
|
236
|
-
if self._recovery_manager:
|
237
|
-
self._recovery_manager.reset_failure_count()
|
257
|
+
yield connection
|
258
|
+
|
259
|
+
# Success - retry executor handles all failure tracking
|
238
260
|
|
239
261
|
except Exception as e:
|
240
|
-
#
|
241
|
-
if self._recovery_manager:
|
242
|
-
await self._recovery_manager.handle_persistent_failure(operation_name, e)
|
262
|
+
# Error handling is managed by the retry executor
|
243
263
|
raise
|
264
|
+
|
265
|
+
except DatabaseError:
|
266
|
+
# Re-raise our custom database errors as-is
|
267
|
+
raise
|
268
|
+
except Exception as e:
|
269
|
+
# Classify and wrap other exceptions
|
270
|
+
classified_error = classify_sqlite_error(e, operation_name)
|
271
|
+
logger.error(
|
272
|
+
f"Database operation '{operation_name}' failed: {classified_error.message}",
|
273
|
+
extra={"structured_data": classified_error.to_dict()}
|
274
|
+
)
|
275
|
+
raise classified_error
|
244
276
|
|
245
277
|
def get_database_stats(self) -> Dict[str, Any]:
|
246
278
|
"""
|
@@ -253,14 +285,11 @@ class DatabaseManager:
|
|
253
285
|
"connection_pool": {
|
254
286
|
"configured_size": self.pool_size,
|
255
287
|
"current_size": len(self._connection_pool)
|
256
|
-
}
|
288
|
+
},
|
289
|
+
"retry_executor": self._retry_executor.get_retry_stats() if self._retry_executor else {},
|
257
290
|
}
|
258
291
|
|
259
|
-
|
260
|
-
stats["retry_stats"] = self._retry_handler.get_retry_stats()
|
261
|
-
|
262
|
-
if self._recovery_manager:
|
263
|
-
stats["recovery_stats"] = self._recovery_manager.get_recovery_stats()
|
292
|
+
# Legacy retry handler removed - retry executor stats are included above
|
264
293
|
|
265
294
|
if self._health_monitor:
|
266
295
|
stats["health_status"] = self._health_monitor.get_health_status()
|
@@ -347,10 +376,13 @@ class DatabaseManager:
|
|
347
376
|
"""
|
348
377
|
Execute a database operation within a transaction with automatic retry.
|
349
378
|
|
379
|
+
Uses the new RetryExecutor for robust retry handling with proper error
|
380
|
+
classification and exponential backoff.
|
381
|
+
|
350
382
|
Args:
|
351
383
|
operation_func: Async function that takes a connection and performs the operation
|
352
384
|
operation_name: Name of the operation for logging
|
353
|
-
max_retries: Maximum retry attempts
|
385
|
+
max_retries: Maximum retry attempts (overrides default retry executor config)
|
354
386
|
timeout_seconds: Transaction timeout in seconds
|
355
387
|
|
356
388
|
Returns:
|
@@ -363,9 +395,9 @@ class DatabaseManager:
|
|
363
395
|
|
364
396
|
result = await db.execute_transaction_with_retry(my_operation, "insert_data")
|
365
397
|
"""
|
366
|
-
last_error = None
|
367
398
|
|
368
|
-
|
399
|
+
async def execute_transaction():
|
400
|
+
"""Inner function to execute transaction - will be retried by executor."""
|
369
401
|
try:
|
370
402
|
async with self.get_immediate_transaction(operation_name, timeout_seconds) as conn:
|
371
403
|
result = await operation_func(conn)
|
@@ -382,34 +414,15 @@ class DatabaseManager:
|
|
382
414
|
return result
|
383
415
|
|
384
416
|
except (aiosqlite.OperationalError, asyncio.TimeoutError) as e:
|
385
|
-
last_error = e
|
386
|
-
|
387
417
|
# Record locking event for metrics
|
388
418
|
if self._metrics_collector and "locked" in str(e).lower():
|
389
419
|
self._metrics_collector.record_locking_event(operation_name, str(e))
|
390
420
|
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
logger.warning(
|
398
|
-
f"Transaction attempt {attempt} failed for {operation_name}, retrying in {wait_time:.2f}s: {e}",
|
399
|
-
extra={
|
400
|
-
"structured_data": {
|
401
|
-
"transaction_retry": {
|
402
|
-
"operation": operation_name,
|
403
|
-
"attempt": attempt,
|
404
|
-
"delay_seconds": wait_time,
|
405
|
-
"error": str(e)
|
406
|
-
}
|
407
|
-
}
|
408
|
-
}
|
409
|
-
)
|
410
|
-
await asyncio.sleep(wait_time)
|
411
|
-
else:
|
412
|
-
# Record failed operation metrics
|
421
|
+
# Classify the error for better handling
|
422
|
+
classified_error = classify_sqlite_error(e, operation_name)
|
423
|
+
|
424
|
+
# Record failed operation metrics for non-retryable errors
|
425
|
+
if not is_retryable_error(classified_error):
|
413
426
|
if self._metrics_collector:
|
414
427
|
self._metrics_collector.record_operation(
|
415
428
|
operation_name,
|
@@ -417,21 +430,34 @@ class DatabaseManager:
|
|
417
430
|
False,
|
418
431
|
len(self._connection_pool)
|
419
432
|
)
|
420
|
-
|
421
|
-
|
422
|
-
f"Transaction failed after {max_retries} attempts for {operation_name}: {e}",
|
423
|
-
extra={
|
424
|
-
"structured_data": {
|
425
|
-
"transaction_failure": {
|
426
|
-
"operation": operation_name,
|
427
|
-
"max_retries": max_retries,
|
428
|
-
"final_error": str(e)
|
429
|
-
}
|
430
|
-
}
|
431
|
-
}
|
432
|
-
)
|
433
|
+
|
434
|
+
raise classified_error
|
433
435
|
|
434
|
-
|
436
|
+
try:
|
437
|
+
# Create a temporary retry executor with custom max_retries if different from default
|
438
|
+
if max_retries != self._retry_executor.config.max_attempts:
|
439
|
+
from mcp_code_indexer.database.retry_executor import RetryConfig, RetryExecutor
|
440
|
+
temp_config = RetryConfig(
|
441
|
+
max_attempts=max_retries,
|
442
|
+
min_wait_seconds=self._retry_executor.config.min_wait_seconds,
|
443
|
+
max_wait_seconds=self._retry_executor.config.max_wait_seconds,
|
444
|
+
jitter_max_seconds=self._retry_executor.config.jitter_max_seconds
|
445
|
+
)
|
446
|
+
temp_executor = RetryExecutor(temp_config)
|
447
|
+
return await temp_executor.execute_with_retry(execute_transaction, operation_name)
|
448
|
+
else:
|
449
|
+
return await self._retry_executor.execute_with_retry(execute_transaction, operation_name)
|
450
|
+
|
451
|
+
except DatabaseError as e:
|
452
|
+
# Record failed operation metrics for final failure
|
453
|
+
if self._metrics_collector:
|
454
|
+
self._metrics_collector.record_operation(
|
455
|
+
operation_name,
|
456
|
+
timeout_seconds * 1000,
|
457
|
+
False,
|
458
|
+
len(self._connection_pool)
|
459
|
+
)
|
460
|
+
raise
|
435
461
|
|
436
462
|
# Project operations
|
437
463
|
|