kailash 0.9.15__py3-none-any.whl → 0.9.16__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/middleware/database/base_models.py +7 -1
- kailash/migration/__init__.py +30 -0
- kailash/migration/cli.py +340 -0
- kailash/migration/compatibility_checker.py +662 -0
- kailash/migration/configuration_validator.py +837 -0
- kailash/migration/documentation_generator.py +1828 -0
- kailash/migration/examples/__init__.py +5 -0
- kailash/migration/examples/complete_migration_example.py +692 -0
- kailash/migration/migration_assistant.py +715 -0
- kailash/migration/performance_comparator.py +760 -0
- kailash/migration/regression_detector.py +1141 -0
- kailash/migration/tests/__init__.py +6 -0
- kailash/migration/tests/test_compatibility_checker.py +403 -0
- kailash/migration/tests/test_integration.py +463 -0
- kailash/migration/tests/test_migration_assistant.py +397 -0
- kailash/migration/tests/test_performance_comparator.py +433 -0
- kailash/nodes/data/async_sql.py +1507 -6
- kailash/runtime/local.py +1255 -8
- kailash/runtime/monitoring/__init__.py +1 -0
- kailash/runtime/monitoring/runtime_monitor.py +780 -0
- kailash/runtime/resource_manager.py +3033 -0
- kailash/sdk_exceptions.py +21 -0
- kailash/workflow/cyclic_runner.py +18 -2
- {kailash-0.9.15.dist-info → kailash-0.9.16.dist-info}/METADATA +1 -1
- {kailash-0.9.15.dist-info → kailash-0.9.16.dist-info}/RECORD +30 -12
- {kailash-0.9.15.dist-info → kailash-0.9.16.dist-info}/WHEEL +0 -0
- {kailash-0.9.15.dist-info → kailash-0.9.16.dist-info}/entry_points.txt +0 -0
- {kailash-0.9.15.dist-info → kailash-0.9.16.dist-info}/licenses/LICENSE +0 -0
- {kailash-0.9.15.dist-info → kailash-0.9.16.dist-info}/licenses/NOTICE +0 -0
- {kailash-0.9.15.dist-info → kailash-0.9.16.dist-info}/top_level.txt +0 -0
kailash/nodes/data/async_sql.py
CHANGED
@@ -26,15 +26,19 @@ Key Features:
|
|
26
26
|
|
27
27
|
import asyncio
|
28
28
|
import json
|
29
|
+
import logging
|
29
30
|
import os
|
30
31
|
import random
|
31
32
|
import re
|
33
|
+
import threading
|
34
|
+
import time
|
32
35
|
from abc import ABC, abstractmethod
|
36
|
+
from collections import defaultdict, deque
|
33
37
|
from dataclasses import dataclass
|
34
38
|
from datetime import date, datetime
|
35
39
|
from decimal import Decimal
|
36
40
|
from enum import Enum
|
37
|
-
from typing import Any, AsyncIterator, Optional, Union
|
41
|
+
from typing import Any, AsyncIterator, Callable, Dict, List, Optional, Union
|
38
42
|
|
39
43
|
import yaml
|
40
44
|
|
@@ -42,6 +46,8 @@ from kailash.nodes.base import NodeParameter, register_node
|
|
42
46
|
from kailash.nodes.base_async import AsyncNode
|
43
47
|
from kailash.sdk_exceptions import NodeExecutionError, NodeValidationError
|
44
48
|
|
49
|
+
logger = logging.getLogger(__name__)
|
50
|
+
|
45
51
|
# Import optimistic locking for version control
|
46
52
|
try:
|
47
53
|
from kailash.nodes.data.optimistic_locking import (
|
@@ -298,6 +304,598 @@ class DatabaseConfig:
|
|
298
304
|
raise ValueError("SQLite requires database path")
|
299
305
|
|
300
306
|
|
307
|
+
# =============================================================================
|
308
|
+
# Enterprise Connection Pool Management
|
309
|
+
# =============================================================================
|
310
|
+
|
311
|
+
|
312
|
+
@dataclass
|
313
|
+
class PoolMetrics:
|
314
|
+
"""Connection pool metrics for monitoring and analytics."""
|
315
|
+
|
316
|
+
# Basic metrics
|
317
|
+
active_connections: int = 0
|
318
|
+
idle_connections: int = 0
|
319
|
+
total_connections: int = 0
|
320
|
+
max_connections: int = 0
|
321
|
+
|
322
|
+
# Usage metrics
|
323
|
+
connections_created: int = 0
|
324
|
+
connections_closed: int = 0
|
325
|
+
connections_failed: int = 0
|
326
|
+
|
327
|
+
# Performance metrics
|
328
|
+
avg_query_time: float = 0.0
|
329
|
+
total_queries: int = 0
|
330
|
+
queries_per_second: float = 0.0
|
331
|
+
|
332
|
+
# Health metrics
|
333
|
+
health_check_successes: int = 0
|
334
|
+
health_check_failures: int = 0
|
335
|
+
last_health_check: Optional[datetime] = None
|
336
|
+
|
337
|
+
# Pool lifecycle
|
338
|
+
pool_created_at: Optional[datetime] = None
|
339
|
+
pool_last_used: Optional[datetime] = None
|
340
|
+
|
341
|
+
def to_dict(self) -> Dict[str, Any]:
|
342
|
+
"""Convert metrics to dictionary for serialization."""
|
343
|
+
return {
|
344
|
+
"active_connections": self.active_connections,
|
345
|
+
"idle_connections": self.idle_connections,
|
346
|
+
"total_connections": self.total_connections,
|
347
|
+
"max_connections": self.max_connections,
|
348
|
+
"connections_created": self.connections_created,
|
349
|
+
"connections_closed": self.connections_closed,
|
350
|
+
"connections_failed": self.connections_failed,
|
351
|
+
"avg_query_time": self.avg_query_time,
|
352
|
+
"total_queries": self.total_queries,
|
353
|
+
"queries_per_second": self.queries_per_second,
|
354
|
+
"health_check_successes": self.health_check_successes,
|
355
|
+
"health_check_failures": self.health_check_failures,
|
356
|
+
"last_health_check": (
|
357
|
+
self.last_health_check.isoformat() if self.last_health_check else None
|
358
|
+
),
|
359
|
+
"pool_created_at": (
|
360
|
+
self.pool_created_at.isoformat() if self.pool_created_at else None
|
361
|
+
),
|
362
|
+
"pool_last_used": (
|
363
|
+
self.pool_last_used.isoformat() if self.pool_last_used else None
|
364
|
+
),
|
365
|
+
}
|
366
|
+
|
367
|
+
|
368
|
+
@dataclass
|
369
|
+
class HealthCheckResult:
|
370
|
+
"""Result of a connection pool health check."""
|
371
|
+
|
372
|
+
is_healthy: bool
|
373
|
+
latency_ms: float
|
374
|
+
error_message: Optional[str] = None
|
375
|
+
checked_at: Optional[datetime] = None
|
376
|
+
connection_count: int = 0
|
377
|
+
|
378
|
+
def __post_init__(self):
|
379
|
+
if self.checked_at is None:
|
380
|
+
self.checked_at = datetime.now()
|
381
|
+
|
382
|
+
|
383
|
+
class CircuitBreakerState(Enum):
|
384
|
+
"""Circuit breaker states for connection management."""
|
385
|
+
|
386
|
+
CLOSED = "closed" # Normal operation
|
387
|
+
OPEN = "open" # Circuit breaker is open - failing fast
|
388
|
+
HALF_OPEN = "half_open" # Testing if service is back
|
389
|
+
|
390
|
+
|
391
|
+
class ConnectionCircuitBreaker:
|
392
|
+
"""Circuit breaker for connection pool health management."""
|
393
|
+
|
394
|
+
def __init__(
|
395
|
+
self,
|
396
|
+
failure_threshold: int = 5,
|
397
|
+
recovery_timeout: int = 60,
|
398
|
+
success_threshold: int = 2,
|
399
|
+
):
|
400
|
+
"""Initialize circuit breaker.
|
401
|
+
|
402
|
+
Args:
|
403
|
+
failure_threshold: Number of failures before opening circuit
|
404
|
+
recovery_timeout: Seconds to wait before attempting recovery
|
405
|
+
success_threshold: Number of successes needed to close circuit
|
406
|
+
"""
|
407
|
+
self.failure_threshold = failure_threshold
|
408
|
+
self.recovery_timeout = recovery_timeout
|
409
|
+
self.success_threshold = success_threshold
|
410
|
+
|
411
|
+
self.state = CircuitBreakerState.CLOSED
|
412
|
+
self.failure_count = 0
|
413
|
+
self.success_count = 0
|
414
|
+
self.last_failure_time: Optional[datetime] = None
|
415
|
+
self._lock = threading.RLock()
|
416
|
+
|
417
|
+
def can_execute(self) -> bool:
|
418
|
+
"""Check if operation can be executed."""
|
419
|
+
with self._lock:
|
420
|
+
if self.state == CircuitBreakerState.CLOSED:
|
421
|
+
return True
|
422
|
+
elif self.state == CircuitBreakerState.OPEN:
|
423
|
+
if self._should_attempt_reset():
|
424
|
+
self.state = CircuitBreakerState.HALF_OPEN
|
425
|
+
self.success_count = 0
|
426
|
+
return True
|
427
|
+
return False
|
428
|
+
else: # HALF_OPEN
|
429
|
+
return True
|
430
|
+
|
431
|
+
def record_success(self) -> None:
|
432
|
+
"""Record a successful operation."""
|
433
|
+
with self._lock:
|
434
|
+
if self.state == CircuitBreakerState.HALF_OPEN:
|
435
|
+
self.success_count += 1
|
436
|
+
if self.success_count >= self.success_threshold:
|
437
|
+
self.state = CircuitBreakerState.CLOSED
|
438
|
+
self.failure_count = 0
|
439
|
+
elif self.state == CircuitBreakerState.CLOSED:
|
440
|
+
self.failure_count = 0
|
441
|
+
|
442
|
+
def record_failure(self) -> None:
|
443
|
+
"""Record a failed operation."""
|
444
|
+
with self._lock:
|
445
|
+
self.failure_count += 1
|
446
|
+
self.last_failure_time = datetime.now()
|
447
|
+
|
448
|
+
if self.state == CircuitBreakerState.HALF_OPEN:
|
449
|
+
self.state = CircuitBreakerState.OPEN
|
450
|
+
self.success_count = 0
|
451
|
+
elif (
|
452
|
+
self.state == CircuitBreakerState.CLOSED
|
453
|
+
and self.failure_count >= self.failure_threshold
|
454
|
+
):
|
455
|
+
self.state = CircuitBreakerState.OPEN
|
456
|
+
|
457
|
+
def _should_attempt_reset(self) -> bool:
|
458
|
+
"""Check if enough time has passed to attempt reset."""
|
459
|
+
if not self.last_failure_time:
|
460
|
+
return True
|
461
|
+
|
462
|
+
time_since_failure = (datetime.now() - self.last_failure_time).total_seconds()
|
463
|
+
return time_since_failure >= self.recovery_timeout
|
464
|
+
|
465
|
+
def get_state(self) -> Dict[str, Any]:
|
466
|
+
"""Get current circuit breaker state."""
|
467
|
+
with self._lock:
|
468
|
+
return {
|
469
|
+
"state": self.state.value,
|
470
|
+
"failure_count": self.failure_count,
|
471
|
+
"success_count": self.success_count,
|
472
|
+
"last_failure_time": (
|
473
|
+
self.last_failure_time.isoformat()
|
474
|
+
if self.last_failure_time
|
475
|
+
else None
|
476
|
+
),
|
477
|
+
}
|
478
|
+
|
479
|
+
|
480
|
+
class EnterpriseConnectionPool:
|
481
|
+
"""Enterprise-grade connection pool with monitoring, health checks, and adaptive sizing."""
|
482
|
+
|
483
|
+
def __init__(
|
484
|
+
self,
|
485
|
+
pool_id: str,
|
486
|
+
database_config: "DatabaseConfig",
|
487
|
+
adapter_class: type,
|
488
|
+
min_size: int = 5,
|
489
|
+
max_size: int = 20,
|
490
|
+
initial_size: int = 10,
|
491
|
+
health_check_interval: int = 30,
|
492
|
+
enable_analytics: bool = True,
|
493
|
+
enable_adaptive_sizing: bool = True,
|
494
|
+
):
|
495
|
+
"""Initialize enterprise connection pool.
|
496
|
+
|
497
|
+
Args:
|
498
|
+
pool_id: Unique identifier for this pool
|
499
|
+
database_config: Database configuration
|
500
|
+
adapter_class: Database adapter class to use
|
501
|
+
min_size: Minimum pool size
|
502
|
+
max_size: Maximum pool size
|
503
|
+
initial_size: Initial pool size
|
504
|
+
health_check_interval: Health check interval in seconds
|
505
|
+
enable_analytics: Enable performance analytics
|
506
|
+
enable_adaptive_sizing: Enable adaptive pool sizing
|
507
|
+
"""
|
508
|
+
self.pool_id = pool_id
|
509
|
+
self.database_config = database_config
|
510
|
+
self.adapter_class = adapter_class
|
511
|
+
self.min_size = min_size
|
512
|
+
self.max_size = max_size
|
513
|
+
self._shutdown = False # Shutdown flag for background tasks
|
514
|
+
self.initial_size = initial_size
|
515
|
+
self.health_check_interval = health_check_interval
|
516
|
+
# Disable analytics during tests to prevent background tasks
|
517
|
+
import os
|
518
|
+
|
519
|
+
in_test_mode = os.getenv(
|
520
|
+
"PYTEST_CURRENT_TEST"
|
521
|
+
) is not None or "pytest" in os.getenv("_", "")
|
522
|
+
self.enable_analytics = enable_analytics and not in_test_mode
|
523
|
+
if in_test_mode and enable_analytics:
|
524
|
+
logger.info(
|
525
|
+
f"Pool '{pool_id}': Disabled analytics in test mode to prevent background task cleanup issues"
|
526
|
+
)
|
527
|
+
self.enable_adaptive_sizing = enable_adaptive_sizing
|
528
|
+
|
529
|
+
# Pool state
|
530
|
+
self._pool = None
|
531
|
+
self._adapter = None
|
532
|
+
self._metrics = PoolMetrics(pool_created_at=datetime.now())
|
533
|
+
self._circuit_breaker = ConnectionCircuitBreaker()
|
534
|
+
|
535
|
+
# Analytics and monitoring
|
536
|
+
self._query_times = deque(maxlen=1000) # Last 1000 query times
|
537
|
+
self._connection_usage_history = deque(maxlen=100) # Last 100 usage snapshots
|
538
|
+
self._health_check_history = deque(maxlen=50) # Last 50 health checks
|
539
|
+
|
540
|
+
# Adaptive sizing
|
541
|
+
self._sizing_history = deque(maxlen=20) # Last 20 sizing decisions
|
542
|
+
self._last_resize_time: Optional[datetime] = None
|
543
|
+
|
544
|
+
# Thread safety
|
545
|
+
self._lock = asyncio.Lock()
|
546
|
+
self._metrics_lock = threading.RLock()
|
547
|
+
|
548
|
+
# Background tasks
|
549
|
+
self._health_check_task: Optional[asyncio.Task] = None
|
550
|
+
self._analytics_task: Optional[asyncio.Task] = None
|
551
|
+
|
552
|
+
logger.info(
|
553
|
+
f"EnterpriseConnectionPool '{pool_id}' initialized with {min_size}-{max_size} connections"
|
554
|
+
)
|
555
|
+
|
556
|
+
async def initialize(self) -> None:
|
557
|
+
"""Initialize the connection pool."""
|
558
|
+
async with self._lock:
|
559
|
+
if self._adapter is None:
|
560
|
+
self._adapter = self.adapter_class(self.database_config)
|
561
|
+
await self._adapter.connect()
|
562
|
+
self._pool = self._adapter._pool
|
563
|
+
|
564
|
+
# Update metrics
|
565
|
+
with self._metrics_lock:
|
566
|
+
self._metrics.pool_created_at = datetime.now()
|
567
|
+
self._metrics.max_connections = self.max_size
|
568
|
+
|
569
|
+
# Start background tasks
|
570
|
+
if self.enable_analytics:
|
571
|
+
self._health_check_task = asyncio.create_task(
|
572
|
+
self._health_check_loop()
|
573
|
+
)
|
574
|
+
self._analytics_task = asyncio.create_task(self._analytics_loop())
|
575
|
+
|
576
|
+
logger.info(f"Pool '{self.pool_id}' initialized successfully")
|
577
|
+
|
578
|
+
async def get_connection(self):
|
579
|
+
"""Get a connection from the pool with circuit breaker protection."""
|
580
|
+
if not self._circuit_breaker.can_execute():
|
581
|
+
raise ConnectionError(f"Circuit breaker is open for pool '{self.pool_id}'")
|
582
|
+
|
583
|
+
try:
|
584
|
+
if self._pool is None:
|
585
|
+
await self.initialize()
|
586
|
+
|
587
|
+
connection = await self._get_pool_connection()
|
588
|
+
self._circuit_breaker.record_success()
|
589
|
+
|
590
|
+
# Update metrics
|
591
|
+
with self._metrics_lock:
|
592
|
+
self._metrics.pool_last_used = datetime.now()
|
593
|
+
|
594
|
+
return connection
|
595
|
+
|
596
|
+
except Exception as e:
|
597
|
+
self._circuit_breaker.record_failure()
|
598
|
+
with self._metrics_lock:
|
599
|
+
self._metrics.connections_failed += 1
|
600
|
+
logger.error(f"Failed to get connection from pool '{self.pool_id}': {e}")
|
601
|
+
raise
|
602
|
+
|
603
|
+
async def _get_pool_connection(self):
|
604
|
+
"""Get connection from the underlying pool (adapter-specific)."""
|
605
|
+
if hasattr(self._pool, "acquire"):
|
606
|
+
# asyncpg style pool
|
607
|
+
return self._pool.acquire()
|
608
|
+
elif hasattr(self._pool, "get_connection"):
|
609
|
+
# aiomysql style pool
|
610
|
+
return self._pool.get_connection()
|
611
|
+
else:
|
612
|
+
# Direct adapter access for SQLite
|
613
|
+
return self._adapter._get_connection()
|
614
|
+
|
615
|
+
async def execute_query(
|
616
|
+
self, query: str, params: Optional[Union[tuple, dict]] = None, **kwargs
|
617
|
+
) -> Any:
|
618
|
+
"""Execute query with performance tracking."""
|
619
|
+
start_time = time.time()
|
620
|
+
|
621
|
+
try:
|
622
|
+
result = await self._adapter.execute(query, params, **kwargs)
|
623
|
+
|
624
|
+
# Record performance metrics
|
625
|
+
execution_time = time.time() - start_time
|
626
|
+
self._record_query_metrics(execution_time, success=True)
|
627
|
+
|
628
|
+
return result
|
629
|
+
|
630
|
+
except Exception as e:
|
631
|
+
execution_time = time.time() - start_time
|
632
|
+
self._record_query_metrics(execution_time, success=False)
|
633
|
+
raise
|
634
|
+
|
635
|
+
def _record_query_metrics(self, execution_time: float, success: bool) -> None:
|
636
|
+
"""Record query performance metrics."""
|
637
|
+
if not self.enable_analytics:
|
638
|
+
return
|
639
|
+
|
640
|
+
with self._metrics_lock:
|
641
|
+
self._metrics.total_queries += 1
|
642
|
+
self._query_times.append(execution_time)
|
643
|
+
|
644
|
+
# Calculate rolling average
|
645
|
+
if self._query_times:
|
646
|
+
self._metrics.avg_query_time = sum(self._query_times) / len(
|
647
|
+
self._query_times
|
648
|
+
)
|
649
|
+
|
650
|
+
# Update QPS (simple approximation)
|
651
|
+
now = datetime.now()
|
652
|
+
recent_queries = [t for t in self._query_times if t is not None]
|
653
|
+
if len(recent_queries) > 1:
|
654
|
+
time_span = 60 # 1 minute window
|
655
|
+
self._metrics.queries_per_second = min(
|
656
|
+
len(recent_queries) / time_span, len(recent_queries)
|
657
|
+
)
|
658
|
+
|
659
|
+
async def health_check(self) -> HealthCheckResult:
|
660
|
+
"""Perform comprehensive health check."""
|
661
|
+
start_time = time.time()
|
662
|
+
|
663
|
+
try:
|
664
|
+
if self._adapter is None:
|
665
|
+
return HealthCheckResult(
|
666
|
+
is_healthy=False, latency_ms=0, error_message="Pool not initialized"
|
667
|
+
)
|
668
|
+
|
669
|
+
# Perform simple query
|
670
|
+
await self.execute_query("SELECT 1", timeout=5)
|
671
|
+
|
672
|
+
latency = (time.time() - start_time) * 1000
|
673
|
+
|
674
|
+
result = HealthCheckResult(
|
675
|
+
is_healthy=True,
|
676
|
+
latency_ms=latency,
|
677
|
+
connection_count=self._get_active_connection_count(),
|
678
|
+
)
|
679
|
+
|
680
|
+
with self._metrics_lock:
|
681
|
+
self._metrics.health_check_successes += 1
|
682
|
+
self._metrics.last_health_check = datetime.now()
|
683
|
+
|
684
|
+
return result
|
685
|
+
|
686
|
+
except Exception as e:
|
687
|
+
latency = (time.time() - start_time) * 1000
|
688
|
+
|
689
|
+
result = HealthCheckResult(
|
690
|
+
is_healthy=False, latency_ms=latency, error_message=str(e)
|
691
|
+
)
|
692
|
+
|
693
|
+
with self._metrics_lock:
|
694
|
+
self._metrics.health_check_failures += 1
|
695
|
+
self._metrics.last_health_check = datetime.now()
|
696
|
+
|
697
|
+
return result
|
698
|
+
|
699
|
+
def _get_active_connection_count(self) -> int:
|
700
|
+
"""Get current active connection count."""
|
701
|
+
try:
|
702
|
+
if hasattr(self._pool, "__len__"):
|
703
|
+
return len(self._pool)
|
704
|
+
elif hasattr(self._pool, "size"):
|
705
|
+
return self._pool.size
|
706
|
+
elif hasattr(self._pool, "_size"):
|
707
|
+
return self._pool._size
|
708
|
+
else:
|
709
|
+
return 0
|
710
|
+
except:
|
711
|
+
return 0
|
712
|
+
|
713
|
+
async def _health_check_loop(self) -> None:
|
714
|
+
"""Background health check loop."""
|
715
|
+
while not getattr(self, "_shutdown", False):
|
716
|
+
try:
|
717
|
+
await asyncio.sleep(self.health_check_interval)
|
718
|
+
if getattr(self, "_shutdown", False):
|
719
|
+
break
|
720
|
+
result = await self.health_check()
|
721
|
+
self._health_check_history.append(result)
|
722
|
+
|
723
|
+
if not result.is_healthy:
|
724
|
+
logger.warning(
|
725
|
+
f"Health check failed for pool '{self.pool_id}': {result.error_message}"
|
726
|
+
)
|
727
|
+
|
728
|
+
except asyncio.CancelledError:
|
729
|
+
break
|
730
|
+
except Exception as e:
|
731
|
+
logger.error(f"Health check loop error for pool '{self.pool_id}': {e}")
|
732
|
+
await asyncio.sleep(5) # Brief pause before retry
|
733
|
+
|
734
|
+
async def _analytics_loop(self) -> None:
|
735
|
+
"""Background analytics and adaptive sizing loop."""
|
736
|
+
while not getattr(self, "_shutdown", False):
|
737
|
+
try:
|
738
|
+
await asyncio.sleep(60) # Run every minute
|
739
|
+
if getattr(self, "_shutdown", False):
|
740
|
+
break
|
741
|
+
|
742
|
+
# Update connection usage history
|
743
|
+
current_usage = {
|
744
|
+
"timestamp": datetime.now(),
|
745
|
+
"active_connections": self._get_active_connection_count(),
|
746
|
+
"avg_query_time": self._metrics.avg_query_time,
|
747
|
+
"queries_per_second": self._metrics.queries_per_second,
|
748
|
+
}
|
749
|
+
self._connection_usage_history.append(current_usage)
|
750
|
+
|
751
|
+
# Perform adaptive sizing if enabled
|
752
|
+
if self.enable_adaptive_sizing:
|
753
|
+
await self._consider_adaptive_resize()
|
754
|
+
|
755
|
+
except asyncio.CancelledError:
|
756
|
+
break
|
757
|
+
except Exception as e:
|
758
|
+
logger.error(f"Analytics loop error for pool '{self.pool_id}': {e}")
|
759
|
+
|
760
|
+
async def _consider_adaptive_resize(self) -> None:
|
761
|
+
"""Consider resizing the pool based on usage patterns."""
|
762
|
+
if len(self._connection_usage_history) < 5:
|
763
|
+
return # Not enough data
|
764
|
+
|
765
|
+
# Prevent frequent resizing
|
766
|
+
if (
|
767
|
+
self._last_resize_time
|
768
|
+
and (datetime.now() - self._last_resize_time).total_seconds() < 300
|
769
|
+
): # 5 minutes
|
770
|
+
return
|
771
|
+
|
772
|
+
recent_usage = list(self._connection_usage_history)[-5:] # Last 5 minutes
|
773
|
+
avg_connections = sum(u["active_connections"] for u in recent_usage) / len(
|
774
|
+
recent_usage
|
775
|
+
)
|
776
|
+
avg_qps = sum(u["queries_per_second"] for u in recent_usage) / len(recent_usage)
|
777
|
+
|
778
|
+
current_size = self._get_active_connection_count()
|
779
|
+
new_size = current_size
|
780
|
+
|
781
|
+
# Scale up conditions
|
782
|
+
if (
|
783
|
+
avg_connections > current_size * 0.8 # High utilization
|
784
|
+
and avg_qps > 10 # High query rate
|
785
|
+
and current_size < self.max_size
|
786
|
+
):
|
787
|
+
new_size = min(current_size + 2, self.max_size)
|
788
|
+
|
789
|
+
# Scale down conditions
|
790
|
+
elif (
|
791
|
+
avg_connections < current_size * 0.3 # Low utilization
|
792
|
+
and avg_qps < 2 # Low query rate
|
793
|
+
and current_size > self.min_size
|
794
|
+
):
|
795
|
+
new_size = max(current_size - 1, self.min_size)
|
796
|
+
|
797
|
+
if new_size != current_size:
|
798
|
+
logger.info(
|
799
|
+
f"Adaptive sizing: Pool '{self.pool_id}' {current_size} -> {new_size} connections"
|
800
|
+
)
|
801
|
+
# Note: Actual resizing implementation depends on the underlying pool type
|
802
|
+
# This would need to be implemented per adapter
|
803
|
+
self._last_resize_time = datetime.now()
|
804
|
+
|
805
|
+
self._sizing_history.append(
|
806
|
+
{
|
807
|
+
"timestamp": datetime.now(),
|
808
|
+
"old_size": current_size,
|
809
|
+
"new_size": new_size,
|
810
|
+
"trigger_avg_connections": avg_connections,
|
811
|
+
"trigger_avg_qps": avg_qps,
|
812
|
+
}
|
813
|
+
)
|
814
|
+
|
815
|
+
def get_metrics(self) -> PoolMetrics:
|
816
|
+
"""Get current pool metrics."""
|
817
|
+
with self._metrics_lock:
|
818
|
+
# Update real-time metrics
|
819
|
+
self._metrics.active_connections = self._get_active_connection_count()
|
820
|
+
self._metrics.total_connections = self._metrics.active_connections
|
821
|
+
return self._metrics
|
822
|
+
|
823
|
+
def get_analytics_summary(self) -> Dict[str, Any]:
|
824
|
+
"""Get comprehensive analytics summary."""
|
825
|
+
metrics = self.get_metrics()
|
826
|
+
|
827
|
+
return {
|
828
|
+
"pool_id": self.pool_id,
|
829
|
+
"pool_config": {
|
830
|
+
"min_size": self.min_size,
|
831
|
+
"max_size": self.max_size,
|
832
|
+
"current_size": self._get_active_connection_count(),
|
833
|
+
},
|
834
|
+
"metrics": metrics.to_dict(),
|
835
|
+
"circuit_breaker": self._circuit_breaker.get_state(),
|
836
|
+
"recent_health_checks": [
|
837
|
+
{
|
838
|
+
"is_healthy": hc.is_healthy,
|
839
|
+
"latency_ms": hc.latency_ms,
|
840
|
+
"checked_at": hc.checked_at.isoformat() if hc.checked_at else None,
|
841
|
+
"error": hc.error_message,
|
842
|
+
}
|
843
|
+
for hc in list(self._health_check_history)[-5:] # Last 5 checks
|
844
|
+
],
|
845
|
+
"usage_history": [
|
846
|
+
{
|
847
|
+
"timestamp": usage["timestamp"].isoformat(),
|
848
|
+
"active_connections": usage["active_connections"],
|
849
|
+
"avg_query_time": usage["avg_query_time"],
|
850
|
+
"queries_per_second": usage["queries_per_second"],
|
851
|
+
}
|
852
|
+
for usage in list(self._connection_usage_history)[
|
853
|
+
-10:
|
854
|
+
] # Last 10 snapshots
|
855
|
+
],
|
856
|
+
"sizing_history": [
|
857
|
+
{
|
858
|
+
"timestamp": sizing["timestamp"].isoformat(),
|
859
|
+
"old_size": sizing["old_size"],
|
860
|
+
"new_size": sizing["new_size"],
|
861
|
+
"trigger_avg_connections": sizing["trigger_avg_connections"],
|
862
|
+
"trigger_avg_qps": sizing["trigger_avg_qps"],
|
863
|
+
}
|
864
|
+
for sizing in list(self._sizing_history)[
|
865
|
+
-5:
|
866
|
+
] # Last 5 resize operations
|
867
|
+
],
|
868
|
+
}
|
869
|
+
|
870
|
+
async def close(self) -> None:
|
871
|
+
"""Close the connection pool and cleanup resources."""
|
872
|
+
# Set shutdown flag
|
873
|
+
self._shutdown = True
|
874
|
+
|
875
|
+
# Cancel background tasks
|
876
|
+
if self._health_check_task:
|
877
|
+
self._health_check_task.cancel()
|
878
|
+
try:
|
879
|
+
await self._health_check_task
|
880
|
+
except asyncio.CancelledError:
|
881
|
+
pass
|
882
|
+
|
883
|
+
if self._analytics_task:
|
884
|
+
self._analytics_task.cancel()
|
885
|
+
try:
|
886
|
+
await self._analytics_task
|
887
|
+
except asyncio.CancelledError:
|
888
|
+
pass
|
889
|
+
|
890
|
+
# Close adapter and pool
|
891
|
+
if self._adapter:
|
892
|
+
await self._adapter.disconnect()
|
893
|
+
self._adapter = None
|
894
|
+
|
895
|
+
self._pool = None
|
896
|
+
logger.info(f"Pool '{self.pool_id}' closed successfully")
|
897
|
+
|
898
|
+
|
301
899
|
class DatabaseAdapter(ABC):
|
302
900
|
"""Abstract base class for database adapters."""
|
303
901
|
|
@@ -823,6 +1421,21 @@ class SQLiteAdapter(DatabaseAdapter):
|
|
823
1421
|
_shared_memory_connections = {}
|
824
1422
|
_connection_locks = {}
|
825
1423
|
|
1424
|
+
def __init__(self, config: DatabaseConfig):
|
1425
|
+
"""Initialize SQLite adapter."""
|
1426
|
+
super().__init__(config)
|
1427
|
+
# Initialize SQLite-specific attributes
|
1428
|
+
self._db_path = config.connection_string or config.database or ":memory:"
|
1429
|
+
self._is_memory_db = self._db_path == ":memory:"
|
1430
|
+
self._connection = None
|
1431
|
+
# Import aiosqlite on init
|
1432
|
+
try:
|
1433
|
+
import aiosqlite
|
1434
|
+
|
1435
|
+
self._aiosqlite = aiosqlite
|
1436
|
+
except ImportError:
|
1437
|
+
self._aiosqlite = None
|
1438
|
+
|
826
1439
|
async def connect(self) -> None:
|
827
1440
|
"""Establish connection pool."""
|
828
1441
|
try:
|
@@ -1165,6 +1778,475 @@ class DatabaseConfigManager:
|
|
1165
1778
|
)
|
1166
1779
|
|
1167
1780
|
|
1781
|
+
# =============================================================================
|
1782
|
+
# Production Database Adapters
|
1783
|
+
# =============================================================================
|
1784
|
+
|
1785
|
+
|
1786
|
+
class ProductionPostgreSQLAdapter(PostgreSQLAdapter):
|
1787
|
+
"""Production-ready PostgreSQL adapter with enterprise features."""
|
1788
|
+
|
1789
|
+
def __init__(self, config: DatabaseConfig):
|
1790
|
+
super().__init__(config)
|
1791
|
+
self._enterprise_pool: Optional[EnterpriseConnectionPool] = None
|
1792
|
+
self._pool_config = {
|
1793
|
+
"min_size": getattr(config, "min_pool_size", 5),
|
1794
|
+
"max_size": getattr(config, "max_pool_size", 20),
|
1795
|
+
"health_check_interval": getattr(config, "health_check_interval", 30),
|
1796
|
+
"enable_analytics": getattr(config, "enable_analytics", True),
|
1797
|
+
"enable_adaptive_sizing": getattr(config, "enable_adaptive_sizing", True),
|
1798
|
+
}
|
1799
|
+
|
1800
|
+
async def connect(self) -> None:
|
1801
|
+
"""Connect using enterprise pool."""
|
1802
|
+
if self._enterprise_pool is None:
|
1803
|
+
pool_id = f"postgresql_{hash(str(self.config.__dict__))}"
|
1804
|
+
self._enterprise_pool = EnterpriseConnectionPool(
|
1805
|
+
pool_id=pool_id,
|
1806
|
+
database_config=self.config,
|
1807
|
+
adapter_class=PostgreSQLAdapter,
|
1808
|
+
**self._pool_config,
|
1809
|
+
)
|
1810
|
+
await self._enterprise_pool.initialize()
|
1811
|
+
self._pool = self._enterprise_pool._pool
|
1812
|
+
|
1813
|
+
async def execute(
|
1814
|
+
self, query: str, params: Optional[Union[tuple, dict]] = None, **kwargs
|
1815
|
+
) -> Any:
|
1816
|
+
"""Execute with enterprise monitoring."""
|
1817
|
+
if self._enterprise_pool:
|
1818
|
+
return await self._enterprise_pool.execute_query(query, params, **kwargs)
|
1819
|
+
else:
|
1820
|
+
return await super().execute(query, params, **kwargs)
|
1821
|
+
|
1822
|
+
async def health_check(self) -> HealthCheckResult:
|
1823
|
+
"""Perform health check."""
|
1824
|
+
if self._enterprise_pool:
|
1825
|
+
return await self._enterprise_pool.health_check()
|
1826
|
+
else:
|
1827
|
+
# Fallback basic health check
|
1828
|
+
try:
|
1829
|
+
await self.execute("SELECT 1")
|
1830
|
+
return HealthCheckResult(is_healthy=True, latency_ms=0)
|
1831
|
+
except Exception as e:
|
1832
|
+
return HealthCheckResult(
|
1833
|
+
is_healthy=False, latency_ms=0, error_message=str(e)
|
1834
|
+
)
|
1835
|
+
|
1836
|
+
def get_pool_metrics(self) -> Optional[PoolMetrics]:
|
1837
|
+
"""Get pool metrics."""
|
1838
|
+
return self._enterprise_pool.get_metrics() if self._enterprise_pool else None
|
1839
|
+
|
1840
|
+
def get_analytics_summary(self) -> Optional[Dict[str, Any]]:
|
1841
|
+
"""Get analytics summary."""
|
1842
|
+
return (
|
1843
|
+
self._enterprise_pool.get_analytics_summary()
|
1844
|
+
if self._enterprise_pool
|
1845
|
+
else None
|
1846
|
+
)
|
1847
|
+
|
1848
|
+
async def disconnect(self) -> None:
|
1849
|
+
"""Disconnect enterprise pool."""
|
1850
|
+
if self._enterprise_pool:
|
1851
|
+
await self._enterprise_pool.close()
|
1852
|
+
self._enterprise_pool = None
|
1853
|
+
else:
|
1854
|
+
await super().disconnect()
|
1855
|
+
|
1856
|
+
|
1857
|
+
class ProductionMySQLAdapter(MySQLAdapter):
|
1858
|
+
"""Production-ready MySQL adapter with enterprise features."""
|
1859
|
+
|
1860
|
+
def __init__(self, config: DatabaseConfig):
|
1861
|
+
super().__init__(config)
|
1862
|
+
self._enterprise_pool: Optional[EnterpriseConnectionPool] = None
|
1863
|
+
self._pool_config = {
|
1864
|
+
"min_size": getattr(config, "min_pool_size", 5),
|
1865
|
+
"max_size": getattr(config, "max_pool_size", 20),
|
1866
|
+
"health_check_interval": getattr(config, "health_check_interval", 30),
|
1867
|
+
"enable_analytics": getattr(config, "enable_analytics", True),
|
1868
|
+
"enable_adaptive_sizing": getattr(config, "enable_adaptive_sizing", True),
|
1869
|
+
}
|
1870
|
+
|
1871
|
+
async def connect(self) -> None:
|
1872
|
+
"""Connect using enterprise pool."""
|
1873
|
+
if self._enterprise_pool is None:
|
1874
|
+
pool_id = f"mysql_{hash(str(self.config.__dict__))}"
|
1875
|
+
self._enterprise_pool = EnterpriseConnectionPool(
|
1876
|
+
pool_id=pool_id,
|
1877
|
+
database_config=self.config,
|
1878
|
+
adapter_class=MySQLAdapter,
|
1879
|
+
**self._pool_config,
|
1880
|
+
)
|
1881
|
+
await self._enterprise_pool.initialize()
|
1882
|
+
self._pool = self._enterprise_pool._pool
|
1883
|
+
|
1884
|
+
async def execute(
|
1885
|
+
self, query: str, params: Optional[Union[tuple, dict]] = None, **kwargs
|
1886
|
+
) -> Any:
|
1887
|
+
"""Execute with enterprise monitoring."""
|
1888
|
+
if self._enterprise_pool:
|
1889
|
+
return await self._enterprise_pool.execute_query(query, params, **kwargs)
|
1890
|
+
else:
|
1891
|
+
return await super().execute(query, params, **kwargs)
|
1892
|
+
|
1893
|
+
async def health_check(self) -> HealthCheckResult:
|
1894
|
+
"""Perform health check."""
|
1895
|
+
if self._enterprise_pool:
|
1896
|
+
return await self._enterprise_pool.health_check()
|
1897
|
+
else:
|
1898
|
+
# Fallback basic health check
|
1899
|
+
try:
|
1900
|
+
await self.execute("SELECT 1")
|
1901
|
+
return HealthCheckResult(is_healthy=True, latency_ms=0)
|
1902
|
+
except Exception as e:
|
1903
|
+
return HealthCheckResult(
|
1904
|
+
is_healthy=False, latency_ms=0, error_message=str(e)
|
1905
|
+
)
|
1906
|
+
|
1907
|
+
def get_pool_metrics(self) -> Optional[PoolMetrics]:
|
1908
|
+
"""Get pool metrics."""
|
1909
|
+
return self._enterprise_pool.get_metrics() if self._enterprise_pool else None
|
1910
|
+
|
1911
|
+
def get_analytics_summary(self) -> Optional[Dict[str, Any]]:
|
1912
|
+
"""Get analytics summary."""
|
1913
|
+
return (
|
1914
|
+
self._enterprise_pool.get_analytics_summary()
|
1915
|
+
if self._enterprise_pool
|
1916
|
+
else None
|
1917
|
+
)
|
1918
|
+
|
1919
|
+
async def disconnect(self) -> None:
|
1920
|
+
"""Disconnect enterprise pool."""
|
1921
|
+
if self._enterprise_pool:
|
1922
|
+
await self._enterprise_pool.close()
|
1923
|
+
self._enterprise_pool = None
|
1924
|
+
else:
|
1925
|
+
await super().disconnect()
|
1926
|
+
|
1927
|
+
|
1928
|
+
class ProductionSQLiteAdapter(SQLiteAdapter):
|
1929
|
+
"""Production-ready SQLite adapter with enterprise features."""
|
1930
|
+
|
1931
|
+
def __init__(self, config: DatabaseConfig):
|
1932
|
+
super().__init__(config)
|
1933
|
+
# Initialize SQLite-specific attributes
|
1934
|
+
self._db_path = config.connection_string or config.database or ":memory:"
|
1935
|
+
self._is_memory_db = self._db_path == ":memory:"
|
1936
|
+
self._connection = None
|
1937
|
+
self._aiosqlite = None
|
1938
|
+
|
1939
|
+
self._enterprise_pool: Optional[EnterpriseConnectionPool] = None
|
1940
|
+
self._pool_config = {
|
1941
|
+
"min_size": 1, # SQLite is typically single-connection
|
1942
|
+
"max_size": getattr(config, "max_pool_size", 5),
|
1943
|
+
"health_check_interval": getattr(config, "health_check_interval", 60),
|
1944
|
+
"enable_analytics": getattr(config, "enable_analytics", True),
|
1945
|
+
"enable_adaptive_sizing": False, # SQLite doesn't benefit from adaptive sizing
|
1946
|
+
}
|
1947
|
+
|
1948
|
+
async def connect(self) -> None:
|
1949
|
+
"""Connect using enterprise pool."""
|
1950
|
+
# Import aiosqlite module reference
|
1951
|
+
import aiosqlite as _aiosqlite
|
1952
|
+
|
1953
|
+
self._aiosqlite = _aiosqlite
|
1954
|
+
|
1955
|
+
# Initialize enterprise pool if not already done
|
1956
|
+
if self._enterprise_pool is None:
|
1957
|
+
pool_id = f"sqlite_{hash(str(self.config.__dict__))}"
|
1958
|
+
self._enterprise_pool = EnterpriseConnectionPool(
|
1959
|
+
pool_id=pool_id,
|
1960
|
+
database_config=self.config,
|
1961
|
+
adapter_class=SQLiteAdapter,
|
1962
|
+
**self._pool_config,
|
1963
|
+
)
|
1964
|
+
await self._enterprise_pool.initialize()
|
1965
|
+
|
1966
|
+
# Also initialize base connection for compatibility
|
1967
|
+
await super().connect()
|
1968
|
+
|
1969
|
+
async def execute(
|
1970
|
+
self, query: str, params: Optional[Union[tuple, dict]] = None, **kwargs
|
1971
|
+
) -> Any:
|
1972
|
+
"""Execute with enterprise monitoring."""
|
1973
|
+
if self._enterprise_pool:
|
1974
|
+
return await self._enterprise_pool.execute_query(query, params, **kwargs)
|
1975
|
+
else:
|
1976
|
+
return await super().execute(query, params, **kwargs)
|
1977
|
+
|
1978
|
+
async def health_check(self) -> HealthCheckResult:
|
1979
|
+
"""Perform health check."""
|
1980
|
+
if self._enterprise_pool:
|
1981
|
+
return await self._enterprise_pool.health_check()
|
1982
|
+
else:
|
1983
|
+
# Fallback basic health check
|
1984
|
+
try:
|
1985
|
+
await self.execute("SELECT 1")
|
1986
|
+
return HealthCheckResult(is_healthy=True, latency_ms=0)
|
1987
|
+
except Exception as e:
|
1988
|
+
return HealthCheckResult(
|
1989
|
+
is_healthy=False, latency_ms=0, error_message=str(e)
|
1990
|
+
)
|
1991
|
+
|
1992
|
+
def get_pool_metrics(self) -> Optional[PoolMetrics]:
|
1993
|
+
"""Get pool metrics."""
|
1994
|
+
return self._enterprise_pool.get_metrics() if self._enterprise_pool else None
|
1995
|
+
|
1996
|
+
def get_analytics_summary(self) -> Optional[Dict[str, Any]]:
|
1997
|
+
"""Get analytics summary."""
|
1998
|
+
return (
|
1999
|
+
self._enterprise_pool.get_analytics_summary()
|
2000
|
+
if self._enterprise_pool
|
2001
|
+
else None
|
2002
|
+
)
|
2003
|
+
|
2004
|
+
async def disconnect(self) -> None:
|
2005
|
+
"""Disconnect enterprise pool."""
|
2006
|
+
if self._enterprise_pool:
|
2007
|
+
await self._enterprise_pool.close()
|
2008
|
+
self._enterprise_pool = None
|
2009
|
+
else:
|
2010
|
+
await super().disconnect()
|
2011
|
+
|
2012
|
+
|
2013
|
+
# =============================================================================
|
2014
|
+
# Runtime Integration Components
|
2015
|
+
# =============================================================================
|
2016
|
+
|
2017
|
+
|
2018
|
+
class DatabasePoolCoordinator:
|
2019
|
+
"""Coordinates database pools with the LocalRuntime ConnectionPoolManager."""
|
2020
|
+
|
2021
|
+
def __init__(self, runtime_pool_manager=None):
|
2022
|
+
"""Initialize with reference to runtime pool manager.
|
2023
|
+
|
2024
|
+
Args:
|
2025
|
+
runtime_pool_manager: Reference to LocalRuntime's ConnectionPoolManager
|
2026
|
+
"""
|
2027
|
+
self.runtime_pool_manager = runtime_pool_manager
|
2028
|
+
self._active_pools: Dict[str, EnterpriseConnectionPool] = {}
|
2029
|
+
self._pool_metrics_cache: Dict[str, Dict[str, Any]] = {}
|
2030
|
+
self._coordination_lock = asyncio.Lock()
|
2031
|
+
|
2032
|
+
logger.info("DatabasePoolCoordinator initialized")
|
2033
|
+
|
2034
|
+
async def get_or_create_pool(
|
2035
|
+
self,
|
2036
|
+
pool_id: str,
|
2037
|
+
database_config: DatabaseConfig,
|
2038
|
+
adapter_type: str = "auto",
|
2039
|
+
pool_config: Optional[Dict[str, Any]] = None,
|
2040
|
+
) -> EnterpriseConnectionPool:
|
2041
|
+
"""Get existing pool or create new one with runtime coordination.
|
2042
|
+
|
2043
|
+
Args:
|
2044
|
+
pool_id: Unique pool identifier
|
2045
|
+
database_config: Database configuration
|
2046
|
+
adapter_type: Type of adapter (postgresql, mysql, sqlite, auto)
|
2047
|
+
pool_config: Pool configuration override
|
2048
|
+
|
2049
|
+
Returns:
|
2050
|
+
Enterprise connection pool instance
|
2051
|
+
"""
|
2052
|
+
async with self._coordination_lock:
|
2053
|
+
if pool_id in self._active_pools:
|
2054
|
+
return self._active_pools[pool_id]
|
2055
|
+
|
2056
|
+
# Determine adapter class
|
2057
|
+
if adapter_type == "auto":
|
2058
|
+
adapter_type = database_config.type.value
|
2059
|
+
|
2060
|
+
adapter_classes = {
|
2061
|
+
"postgresql": ProductionPostgreSQLAdapter,
|
2062
|
+
"mysql": ProductionMySQLAdapter,
|
2063
|
+
"sqlite": ProductionSQLiteAdapter,
|
2064
|
+
}
|
2065
|
+
|
2066
|
+
adapter_class = adapter_classes.get(adapter_type)
|
2067
|
+
if not adapter_class:
|
2068
|
+
raise ValueError(f"Unsupported adapter type: {adapter_type}")
|
2069
|
+
|
2070
|
+
# Create enterprise pool
|
2071
|
+
enterprise_pool = EnterpriseConnectionPool(
|
2072
|
+
pool_id=pool_id,
|
2073
|
+
database_config=database_config,
|
2074
|
+
adapter_class=adapter_class,
|
2075
|
+
**(pool_config or {}),
|
2076
|
+
)
|
2077
|
+
|
2078
|
+
# Initialize and register
|
2079
|
+
await enterprise_pool.initialize()
|
2080
|
+
self._active_pools[pool_id] = enterprise_pool
|
2081
|
+
|
2082
|
+
# Register with runtime pool manager if available
|
2083
|
+
if self.runtime_pool_manager:
|
2084
|
+
await self._register_with_runtime(pool_id, enterprise_pool)
|
2085
|
+
|
2086
|
+
logger.info(f"Created and registered enterprise pool '{pool_id}'")
|
2087
|
+
return enterprise_pool
|
2088
|
+
|
2089
|
+
async def _register_with_runtime(
|
2090
|
+
self, pool_id: str, enterprise_pool: EnterpriseConnectionPool
|
2091
|
+
):
|
2092
|
+
"""Register pool with runtime pool manager."""
|
2093
|
+
try:
|
2094
|
+
if hasattr(self.runtime_pool_manager, "register_pool"):
|
2095
|
+
await self.runtime_pool_manager.register_pool(
|
2096
|
+
pool_id,
|
2097
|
+
{
|
2098
|
+
"type": "enterprise_database_pool",
|
2099
|
+
"adapter_type": enterprise_pool.database_config.type.value,
|
2100
|
+
"pool_instance": enterprise_pool,
|
2101
|
+
"metrics_callback": enterprise_pool.get_metrics,
|
2102
|
+
"analytics_callback": enterprise_pool.get_analytics_summary,
|
2103
|
+
},
|
2104
|
+
)
|
2105
|
+
except Exception as e:
|
2106
|
+
logger.warning(f"Failed to register pool with runtime: {e}")
|
2107
|
+
|
2108
|
+
async def get_pool_metrics(self, pool_id: Optional[str] = None) -> Dict[str, Any]:
|
2109
|
+
"""Get metrics for specific pool or all pools.
|
2110
|
+
|
2111
|
+
Args:
|
2112
|
+
pool_id: Pool ID to get metrics for, or None for all pools
|
2113
|
+
|
2114
|
+
Returns:
|
2115
|
+
Pool metrics dictionary
|
2116
|
+
"""
|
2117
|
+
if pool_id:
|
2118
|
+
pool = self._active_pools.get(pool_id)
|
2119
|
+
if pool:
|
2120
|
+
return {pool_id: pool.get_analytics_summary()}
|
2121
|
+
return {}
|
2122
|
+
|
2123
|
+
# Return metrics for all pools
|
2124
|
+
all_metrics = {}
|
2125
|
+
for pid, pool in self._active_pools.items():
|
2126
|
+
all_metrics[pid] = pool.get_analytics_summary()
|
2127
|
+
|
2128
|
+
return all_metrics
|
2129
|
+
|
2130
|
+
async def health_check_all(self) -> Dict[str, HealthCheckResult]:
|
2131
|
+
"""Perform health check on all active pools.
|
2132
|
+
|
2133
|
+
Returns:
|
2134
|
+
Dictionary mapping pool IDs to health check results
|
2135
|
+
"""
|
2136
|
+
results = {}
|
2137
|
+
|
2138
|
+
for pool_id, pool in self._active_pools.items():
|
2139
|
+
try:
|
2140
|
+
result = await pool.health_check()
|
2141
|
+
results[pool_id] = result
|
2142
|
+
except Exception as e:
|
2143
|
+
results[pool_id] = HealthCheckResult(
|
2144
|
+
is_healthy=False,
|
2145
|
+
latency_ms=0,
|
2146
|
+
error_message=f"Health check failed: {str(e)}",
|
2147
|
+
)
|
2148
|
+
|
2149
|
+
return results
|
2150
|
+
|
2151
|
+
async def cleanup_idle_pools(self, idle_timeout: int = 3600) -> int:
|
2152
|
+
"""Clean up pools that have been idle for too long.
|
2153
|
+
|
2154
|
+
Args:
|
2155
|
+
idle_timeout: Idle timeout in seconds
|
2156
|
+
|
2157
|
+
Returns:
|
2158
|
+
Number of pools cleaned up
|
2159
|
+
"""
|
2160
|
+
cleaned_up = 0
|
2161
|
+
pools_to_remove = []
|
2162
|
+
|
2163
|
+
current_time = datetime.now()
|
2164
|
+
|
2165
|
+
for pool_id, pool in self._active_pools.items():
|
2166
|
+
metrics = pool.get_metrics()
|
2167
|
+
|
2168
|
+
if (
|
2169
|
+
metrics.pool_last_used
|
2170
|
+
and (current_time - metrics.pool_last_used).total_seconds()
|
2171
|
+
> idle_timeout
|
2172
|
+
):
|
2173
|
+
pools_to_remove.append(pool_id)
|
2174
|
+
|
2175
|
+
# Clean up identified pools
|
2176
|
+
for pool_id in pools_to_remove:
|
2177
|
+
await self.close_pool(pool_id)
|
2178
|
+
cleaned_up += 1
|
2179
|
+
|
2180
|
+
if cleaned_up > 0:
|
2181
|
+
logger.info(f"Cleaned up {cleaned_up} idle database pools")
|
2182
|
+
|
2183
|
+
return cleaned_up
|
2184
|
+
|
2185
|
+
async def close_pool(self, pool_id: str) -> bool:
|
2186
|
+
"""Close and remove a specific pool.
|
2187
|
+
|
2188
|
+
Args:
|
2189
|
+
pool_id: Pool ID to close
|
2190
|
+
|
2191
|
+
Returns:
|
2192
|
+
True if pool was found and closed, False otherwise
|
2193
|
+
"""
|
2194
|
+
async with self._coordination_lock:
|
2195
|
+
pool = self._active_pools.get(pool_id)
|
2196
|
+
if pool:
|
2197
|
+
await pool.close()
|
2198
|
+
del self._active_pools[pool_id]
|
2199
|
+
|
2200
|
+
# Unregister from runtime if needed
|
2201
|
+
if self.runtime_pool_manager and hasattr(
|
2202
|
+
self.runtime_pool_manager, "unregister_pool"
|
2203
|
+
):
|
2204
|
+
try:
|
2205
|
+
await self.runtime_pool_manager.unregister_pool(pool_id)
|
2206
|
+
except Exception as e:
|
2207
|
+
logger.warning(f"Failed to unregister pool from runtime: {e}")
|
2208
|
+
|
2209
|
+
logger.info(f"Closed database pool '{pool_id}'")
|
2210
|
+
return True
|
2211
|
+
|
2212
|
+
return False
|
2213
|
+
|
2214
|
+
async def close_all_pools(self) -> int:
|
2215
|
+
"""Close all active pools.
|
2216
|
+
|
2217
|
+
Returns:
|
2218
|
+
Number of pools closed
|
2219
|
+
"""
|
2220
|
+
pool_ids = list(self._active_pools.keys())
|
2221
|
+
closed = 0
|
2222
|
+
|
2223
|
+
for pool_id in pool_ids:
|
2224
|
+
if await self.close_pool(pool_id):
|
2225
|
+
closed += 1
|
2226
|
+
|
2227
|
+
return closed
|
2228
|
+
|
2229
|
+
def get_active_pool_count(self) -> int:
|
2230
|
+
"""Get count of active pools."""
|
2231
|
+
return len(self._active_pools)
|
2232
|
+
|
2233
|
+
def get_pool_summary(self) -> Dict[str, Any]:
|
2234
|
+
"""Get summary of all active pools."""
|
2235
|
+
return {
|
2236
|
+
"active_pools": self.get_active_pool_count(),
|
2237
|
+
"pool_ids": list(self._active_pools.keys()),
|
2238
|
+
"total_connections": sum(
|
2239
|
+
pool._get_active_connection_count()
|
2240
|
+
for pool in self._active_pools.values()
|
2241
|
+
),
|
2242
|
+
"healthy_pools": sum(
|
2243
|
+
1
|
2244
|
+
for pool in self._active_pools.values()
|
2245
|
+
if pool._circuit_breaker.state == CircuitBreakerState.CLOSED
|
2246
|
+
),
|
2247
|
+
}
|
2248
|
+
|
2249
|
+
|
1168
2250
|
@register_node()
|
1169
2251
|
class AsyncSQLDatabaseNode(AsyncNode):
|
1170
2252
|
"""Asynchronous SQL database node for high-concurrency database operations.
|
@@ -1264,6 +2346,178 @@ class AsyncSQLDatabaseNode(AsyncNode):
|
|
1264
2346
|
|
1265
2347
|
return cls._pool_lock
|
1266
2348
|
|
2349
|
+
async def _create_adapter_with_runtime_pool(self, shared_pool) -> DatabaseAdapter:
|
2350
|
+
"""Create an adapter that uses a runtime-managed connection pool."""
|
2351
|
+
# Create a simple wrapper adapter that uses the shared pool
|
2352
|
+
db_type = DatabaseType(self.config["database_type"].lower())
|
2353
|
+
db_config = DatabaseConfig(
|
2354
|
+
type=db_type,
|
2355
|
+
host=self.config.get("host"),
|
2356
|
+
port=self.config.get("port"),
|
2357
|
+
database=self.config.get("database"),
|
2358
|
+
user=self.config.get("user"),
|
2359
|
+
password=self.config.get("password"),
|
2360
|
+
connection_string=self.config.get("connection_string"),
|
2361
|
+
pool_size=self.config.get("pool_size", 10),
|
2362
|
+
max_pool_size=self.config.get("max_pool_size", 20),
|
2363
|
+
)
|
2364
|
+
|
2365
|
+
# Create appropriate adapter with the shared pool
|
2366
|
+
if db_type == DatabaseType.POSTGRESQL:
|
2367
|
+
adapter = PostgreSQLAdapter(db_config)
|
2368
|
+
elif db_type == DatabaseType.MYSQL:
|
2369
|
+
adapter = MySQLAdapter(db_config)
|
2370
|
+
elif db_type == DatabaseType.SQLITE:
|
2371
|
+
adapter = SQLiteAdapter(db_config)
|
2372
|
+
else:
|
2373
|
+
raise NodeExecutionError(f"Unsupported database type: {db_type}")
|
2374
|
+
|
2375
|
+
# Inject the shared pool
|
2376
|
+
adapter._pool = shared_pool
|
2377
|
+
adapter._connected = True
|
2378
|
+
return adapter
|
2379
|
+
|
2380
|
+
async def _get_runtime_pool_adapter(self) -> Optional[DatabaseAdapter]:
|
2381
|
+
"""Try to get adapter from runtime connection pool manager with DatabasePoolCoordinator."""
|
2382
|
+
try:
|
2383
|
+
# Check if we have access to a runtime with connection pool manager
|
2384
|
+
import inspect
|
2385
|
+
|
2386
|
+
frame = inspect.currentframe()
|
2387
|
+
|
2388
|
+
# Look for runtime context in the call stack
|
2389
|
+
while frame:
|
2390
|
+
frame_locals = frame.f_locals
|
2391
|
+
if "self" in frame_locals:
|
2392
|
+
obj = frame_locals["self"]
|
2393
|
+
logger.debug(f"Checking call stack object: {type(obj).__name__}")
|
2394
|
+
|
2395
|
+
# Check if this is a LocalRuntime with connection pool manager
|
2396
|
+
if hasattr(obj, "_pool_coordinator") and hasattr(
|
2397
|
+
obj, "_persistent_mode"
|
2398
|
+
):
|
2399
|
+
logger.debug(
|
2400
|
+
f"Found potential runtime: persistent_mode={getattr(obj, '_persistent_mode', False)}, pool_coordinator={getattr(obj, '_pool_coordinator', None) is not None}"
|
2401
|
+
)
|
2402
|
+
|
2403
|
+
if obj._persistent_mode and obj._pool_coordinator:
|
2404
|
+
# Generate pool configuration
|
2405
|
+
pool_config = {
|
2406
|
+
"database_url": self.config.get("connection_string")
|
2407
|
+
or self._build_connection_string(),
|
2408
|
+
"pool_size": self.config.get("pool_size", 10),
|
2409
|
+
"max_pool_size": self.config.get("max_pool_size", 20),
|
2410
|
+
"database_type": self.config.get("database_type"),
|
2411
|
+
}
|
2412
|
+
|
2413
|
+
# Try to get shared pool from runtime
|
2414
|
+
pool_name = self._generate_pool_key()
|
2415
|
+
|
2416
|
+
# Register the pool with runtime's ConnectionPoolManager
|
2417
|
+
if hasattr(obj._pool_coordinator, "get_or_create_pool"):
|
2418
|
+
shared_pool = (
|
2419
|
+
await obj._pool_coordinator.get_or_create_pool(
|
2420
|
+
pool_name, pool_config
|
2421
|
+
)
|
2422
|
+
)
|
2423
|
+
if shared_pool:
|
2424
|
+
# Create adapter that uses the runtime-managed pool
|
2425
|
+
return await self._create_adapter_with_runtime_pool(
|
2426
|
+
shared_pool
|
2427
|
+
)
|
2428
|
+
|
2429
|
+
# Fallback: Create DatabasePoolCoordinator if needed
|
2430
|
+
if not hasattr(obj, "_database_pool_coordinator"):
|
2431
|
+
obj._database_pool_coordinator = (
|
2432
|
+
DatabasePoolCoordinator(obj._pool_coordinator)
|
2433
|
+
)
|
2434
|
+
|
2435
|
+
# Generate pool configuration for enterprise pool
|
2436
|
+
db_config = DatabaseConfig(
|
2437
|
+
type=DatabaseType(self.config["database_type"].lower()),
|
2438
|
+
host=self.config.get("host"),
|
2439
|
+
port=self.config.get("port"),
|
2440
|
+
database=self.config.get("database"),
|
2441
|
+
user=self.config.get("user"),
|
2442
|
+
password=self.config.get("password"),
|
2443
|
+
connection_string=self.config.get("connection_string"),
|
2444
|
+
pool_size=self.config.get("pool_size", 10),
|
2445
|
+
max_pool_size=self.config.get("max_pool_size", 20),
|
2446
|
+
command_timeout=self.config.get("timeout", 60.0),
|
2447
|
+
enable_analytics=self.config.get(
|
2448
|
+
"enable_analytics", True
|
2449
|
+
),
|
2450
|
+
enable_adaptive_sizing=self.config.get(
|
2451
|
+
"enable_adaptive_sizing", True
|
2452
|
+
),
|
2453
|
+
health_check_interval=self.config.get(
|
2454
|
+
"health_check_interval", 30
|
2455
|
+
),
|
2456
|
+
min_pool_size=self.config.get("min_pool_size", 5),
|
2457
|
+
)
|
2458
|
+
|
2459
|
+
# Generate unique pool ID
|
2460
|
+
pool_id = f"{self.config['database_type']}_{hash(str(self.config))}"
|
2461
|
+
|
2462
|
+
# Get or create enterprise pool through coordinator
|
2463
|
+
enterprise_pool = (
|
2464
|
+
await obj._database_pool_coordinator.get_or_create_pool(
|
2465
|
+
pool_id=pool_id,
|
2466
|
+
database_config=db_config,
|
2467
|
+
adapter_type=self.config["database_type"],
|
2468
|
+
pool_config={
|
2469
|
+
"min_size": self.config.get("min_pool_size", 5),
|
2470
|
+
"max_size": self.config.get(
|
2471
|
+
"max_pool_size", 20
|
2472
|
+
),
|
2473
|
+
"enable_analytics": self.config.get(
|
2474
|
+
"enable_analytics", True
|
2475
|
+
),
|
2476
|
+
"enable_adaptive_sizing": self.config.get(
|
2477
|
+
"enable_adaptive_sizing", True
|
2478
|
+
),
|
2479
|
+
"health_check_interval": self.config.get(
|
2480
|
+
"health_check_interval", 30
|
2481
|
+
),
|
2482
|
+
},
|
2483
|
+
)
|
2484
|
+
)
|
2485
|
+
|
2486
|
+
if enterprise_pool:
|
2487
|
+
logger.info(
|
2488
|
+
f"Using runtime-coordinated enterprise pool: {pool_id}"
|
2489
|
+
)
|
2490
|
+
# Return the adapter from the enterprise pool
|
2491
|
+
return enterprise_pool._adapter
|
2492
|
+
|
2493
|
+
frame = frame.f_back
|
2494
|
+
|
2495
|
+
except Exception as e:
|
2496
|
+
# Silently fall back to class-level pools if runtime integration fails
|
2497
|
+
logger.debug(
|
2498
|
+
f"Runtime pool integration failed, falling back to class pools: {e}"
|
2499
|
+
)
|
2500
|
+
pass
|
2501
|
+
|
2502
|
+
return None
|
2503
|
+
|
2504
|
+
async def _create_adapter_with_runtime_coordination(
|
2505
|
+
self, runtime_pool
|
2506
|
+
) -> DatabaseAdapter:
|
2507
|
+
"""Create adapter that coordinates with runtime connection pool."""
|
2508
|
+
# Create standard adapter but mark it as runtime-coordinated
|
2509
|
+
adapter = await self._create_adapter()
|
2510
|
+
|
2511
|
+
# Mark adapter as runtime-coordinated for proper cleanup
|
2512
|
+
if hasattr(adapter, "_set_runtime_coordinated"):
|
2513
|
+
adapter._set_runtime_coordinated(True)
|
2514
|
+
else:
|
2515
|
+
# Add runtime coordination flag
|
2516
|
+
adapter._runtime_coordinated = True
|
2517
|
+
adapter._runtime_pool = runtime_pool
|
2518
|
+
|
2519
|
+
return adapter
|
2520
|
+
|
1267
2521
|
def __init__(self, **config):
|
1268
2522
|
self._adapter: Optional[DatabaseAdapter] = None
|
1269
2523
|
self._connected = False
|
@@ -1463,7 +2717,43 @@ class AsyncSQLDatabaseNode(AsyncNode):
|
|
1463
2717
|
type=bool,
|
1464
2718
|
required=False,
|
1465
2719
|
default=False,
|
1466
|
-
description="
|
2720
|
+
description="Allow administrative operations (USE WITH CAUTION)",
|
2721
|
+
),
|
2722
|
+
# Enterprise features parameters
|
2723
|
+
NodeParameter(
|
2724
|
+
name="enable_analytics",
|
2725
|
+
type=bool,
|
2726
|
+
required=False,
|
2727
|
+
default=True,
|
2728
|
+
description="Enable connection pool analytics and monitoring",
|
2729
|
+
),
|
2730
|
+
NodeParameter(
|
2731
|
+
name="enable_adaptive_sizing",
|
2732
|
+
type=bool,
|
2733
|
+
required=False,
|
2734
|
+
default=True,
|
2735
|
+
description="Enable adaptive connection pool sizing",
|
2736
|
+
),
|
2737
|
+
NodeParameter(
|
2738
|
+
name="health_check_interval",
|
2739
|
+
type=int,
|
2740
|
+
required=False,
|
2741
|
+
default=30,
|
2742
|
+
description="Health check interval in seconds",
|
2743
|
+
),
|
2744
|
+
NodeParameter(
|
2745
|
+
name="min_pool_size",
|
2746
|
+
type=int,
|
2747
|
+
required=False,
|
2748
|
+
default=5,
|
2749
|
+
description="Minimum connection pool size",
|
2750
|
+
),
|
2751
|
+
NodeParameter(
|
2752
|
+
name="circuit_breaker_enabled",
|
2753
|
+
type=bool,
|
2754
|
+
required=False,
|
2755
|
+
default=True,
|
2756
|
+
description="Enable circuit breaker for connection failure protection",
|
1467
2757
|
),
|
1468
2758
|
NodeParameter(
|
1469
2759
|
name="parameter_types",
|
@@ -1679,7 +2969,17 @@ class AsyncSQLDatabaseNode(AsyncNode):
|
|
1679
2969
|
"""Get or create database adapter with optional pool sharing."""
|
1680
2970
|
if not self._adapter:
|
1681
2971
|
if self._share_pool:
|
1682
|
-
#
|
2972
|
+
# PRIORITY 1: Try to get adapter from runtime connection pool manager
|
2973
|
+
runtime_adapter = await self._get_runtime_pool_adapter()
|
2974
|
+
if runtime_adapter:
|
2975
|
+
self._adapter = runtime_adapter
|
2976
|
+
self._connected = True
|
2977
|
+
logger.debug(
|
2978
|
+
f"Using runtime-coordinated connection pool for {self.id}"
|
2979
|
+
)
|
2980
|
+
return self._adapter
|
2981
|
+
|
2982
|
+
# FALLBACK: Use class-level shared pool for backward compatibility
|
1683
2983
|
async with self._get_pool_lock():
|
1684
2984
|
self._pool_key = self._generate_pool_key()
|
1685
2985
|
|
@@ -1689,14 +2989,17 @@ class AsyncSQLDatabaseNode(AsyncNode):
|
|
1689
2989
|
self._shared_pools[self._pool_key] = (adapter, ref_count + 1)
|
1690
2990
|
self._adapter = adapter
|
1691
2991
|
self._connected = True
|
2992
|
+
logger.debug(f"Using class-level shared pool for {self.id}")
|
1692
2993
|
return self._adapter
|
1693
2994
|
|
1694
2995
|
# Create new shared pool
|
1695
2996
|
self._adapter = await self._create_adapter()
|
1696
2997
|
self._shared_pools[self._pool_key] = (self._adapter, 1)
|
2998
|
+
logger.debug(f"Created new class-level shared pool for {self.id}")
|
1697
2999
|
else:
|
1698
3000
|
# Create dedicated pool
|
1699
3001
|
self._adapter = await self._create_adapter()
|
3002
|
+
logger.debug(f"Created dedicated connection pool for {self.id}")
|
1700
3003
|
|
1701
3004
|
return self._adapter
|
1702
3005
|
|
@@ -1716,12 +3019,21 @@ class AsyncSQLDatabaseNode(AsyncNode):
|
|
1716
3019
|
command_timeout=self.config.get("timeout", 60.0),
|
1717
3020
|
)
|
1718
3021
|
|
3022
|
+
# Add enterprise features configuration to database config
|
3023
|
+
db_config.enable_analytics = self.config.get("enable_analytics", True)
|
3024
|
+
db_config.enable_adaptive_sizing = self.config.get(
|
3025
|
+
"enable_adaptive_sizing", True
|
3026
|
+
)
|
3027
|
+
db_config.health_check_interval = self.config.get("health_check_interval", 30)
|
3028
|
+
db_config.min_pool_size = self.config.get("min_pool_size", 5)
|
3029
|
+
|
3030
|
+
# Use production adapters with enterprise features
|
1719
3031
|
if db_type == DatabaseType.POSTGRESQL:
|
1720
|
-
adapter =
|
3032
|
+
adapter = ProductionPostgreSQLAdapter(db_config)
|
1721
3033
|
elif db_type == DatabaseType.MYSQL:
|
1722
|
-
adapter =
|
3034
|
+
adapter = ProductionMySQLAdapter(db_config)
|
1723
3035
|
elif db_type == DatabaseType.SQLITE:
|
1724
|
-
adapter =
|
3036
|
+
adapter = ProductionSQLiteAdapter(db_config)
|
1725
3037
|
else:
|
1726
3038
|
raise NodeExecutionError(f"Unsupported database type: {db_type}")
|
1727
3039
|
|
@@ -2828,6 +4140,195 @@ class AsyncSQLDatabaseNode(AsyncNode):
|
|
2828
4140
|
)
|
2829
4141
|
return data
|
2830
4142
|
|
4143
|
+
# =============================================================================
|
4144
|
+
# Enterprise Features and Monitoring Methods
|
4145
|
+
# =============================================================================
|
4146
|
+
# Note: get_pool_metrics() is already defined above at line 3630
|
4147
|
+
|
4148
|
+
async def get_pool_analytics(self) -> Optional[Dict[str, Any]]:
|
4149
|
+
"""Get comprehensive pool analytics summary.
|
4150
|
+
|
4151
|
+
Returns:
|
4152
|
+
Dictionary with detailed analytics, or None if not available
|
4153
|
+
"""
|
4154
|
+
try:
|
4155
|
+
adapter = await self._get_or_create_adapter()
|
4156
|
+
if hasattr(adapter, "get_analytics_summary"):
|
4157
|
+
return adapter.get_analytics_summary()
|
4158
|
+
except Exception as e:
|
4159
|
+
logger.warning(f"Failed to get pool analytics: {e}")
|
4160
|
+
|
4161
|
+
return None
|
4162
|
+
|
4163
|
+
async def health_check(self) -> Optional[HealthCheckResult]:
|
4164
|
+
"""Perform connection pool health check.
|
4165
|
+
|
4166
|
+
Returns:
|
4167
|
+
HealthCheckResult with health status, or None if not available
|
4168
|
+
"""
|
4169
|
+
try:
|
4170
|
+
adapter = await self._get_or_create_adapter()
|
4171
|
+
if hasattr(adapter, "health_check"):
|
4172
|
+
return await adapter.health_check()
|
4173
|
+
else:
|
4174
|
+
# Fallback basic health check
|
4175
|
+
await self._execute_query_with_retry(adapter, "SELECT 1")
|
4176
|
+
return HealthCheckResult(is_healthy=True, latency_ms=0)
|
4177
|
+
except Exception as e:
|
4178
|
+
logger.warning(f"Health check failed: {e}")
|
4179
|
+
return HealthCheckResult(
|
4180
|
+
is_healthy=False, latency_ms=0, error_message=str(e)
|
4181
|
+
)
|
4182
|
+
|
4183
|
+
def get_circuit_breaker_state(self) -> Optional[Dict[str, Any]]:
|
4184
|
+
"""Get circuit breaker state if available.
|
4185
|
+
|
4186
|
+
Returns:
|
4187
|
+
Dictionary with circuit breaker state, or None if not available
|
4188
|
+
"""
|
4189
|
+
try:
|
4190
|
+
if self._adapter and hasattr(self._adapter, "_enterprise_pool"):
|
4191
|
+
enterprise_pool = self._adapter._enterprise_pool
|
4192
|
+
if enterprise_pool and hasattr(enterprise_pool, "_circuit_breaker"):
|
4193
|
+
return enterprise_pool._circuit_breaker.get_state()
|
4194
|
+
except Exception as e:
|
4195
|
+
logger.warning(f"Failed to get circuit breaker state: {e}")
|
4196
|
+
|
4197
|
+
return None
|
4198
|
+
|
4199
|
+
async def get_connection_usage_history(self) -> List[Dict[str, Any]]:
|
4200
|
+
"""Get connection usage history for analysis.
|
4201
|
+
|
4202
|
+
Returns:
|
4203
|
+
List of usage snapshots with timestamps and metrics
|
4204
|
+
"""
|
4205
|
+
try:
|
4206
|
+
analytics = await self.get_pool_analytics()
|
4207
|
+
if analytics and "usage_history" in analytics:
|
4208
|
+
return analytics["usage_history"]
|
4209
|
+
except Exception as e:
|
4210
|
+
logger.warning(f"Failed to get usage history: {e}")
|
4211
|
+
|
4212
|
+
return []
|
4213
|
+
|
4214
|
+
async def force_pool_health_check(self) -> Dict[str, Any]:
|
4215
|
+
"""Force immediate health check and return comprehensive status.
|
4216
|
+
|
4217
|
+
Returns:
|
4218
|
+
Dictionary with health status, metrics, and diagnostic information
|
4219
|
+
"""
|
4220
|
+
result = {
|
4221
|
+
"timestamp": datetime.now().isoformat(),
|
4222
|
+
"node_id": getattr(self, "id", "unknown"),
|
4223
|
+
"database_type": self.config.get("database_type", "unknown"),
|
4224
|
+
"health": None,
|
4225
|
+
"metrics": None,
|
4226
|
+
"circuit_breaker": None,
|
4227
|
+
"adapter_type": None,
|
4228
|
+
"error": None,
|
4229
|
+
}
|
4230
|
+
|
4231
|
+
try:
|
4232
|
+
# Get health check result
|
4233
|
+
health = await self.health_check()
|
4234
|
+
result["health"] = (
|
4235
|
+
{
|
4236
|
+
"is_healthy": health.is_healthy,
|
4237
|
+
"latency_ms": health.latency_ms,
|
4238
|
+
"error_message": health.error_message,
|
4239
|
+
"checked_at": (
|
4240
|
+
health.checked_at.isoformat() if health.checked_at else None
|
4241
|
+
),
|
4242
|
+
"connection_count": health.connection_count,
|
4243
|
+
}
|
4244
|
+
if health
|
4245
|
+
else None
|
4246
|
+
)
|
4247
|
+
|
4248
|
+
# Get metrics
|
4249
|
+
metrics = await self.get_pool_metrics()
|
4250
|
+
result["metrics"] = metrics.to_dict() if metrics else None
|
4251
|
+
|
4252
|
+
# Get circuit breaker state
|
4253
|
+
result["circuit_breaker"] = self.get_circuit_breaker_state()
|
4254
|
+
|
4255
|
+
# Get adapter type
|
4256
|
+
if self._adapter:
|
4257
|
+
result["adapter_type"] = type(self._adapter).__name__
|
4258
|
+
|
4259
|
+
except Exception as e:
|
4260
|
+
result["error"] = str(e)
|
4261
|
+
logger.error(f"Force health check failed: {e}")
|
4262
|
+
|
4263
|
+
return result
|
4264
|
+
|
4265
|
+
async def get_enterprise_status_summary(self) -> Dict[str, Any]:
|
4266
|
+
"""Get comprehensive enterprise features status summary.
|
4267
|
+
|
4268
|
+
Returns:
|
4269
|
+
Dictionary with complete enterprise features status
|
4270
|
+
"""
|
4271
|
+
try:
|
4272
|
+
analytics = await self.get_pool_analytics()
|
4273
|
+
health = await self.health_check()
|
4274
|
+
circuit_breaker = self.get_circuit_breaker_state()
|
4275
|
+
|
4276
|
+
return {
|
4277
|
+
"timestamp": datetime.now().isoformat(),
|
4278
|
+
"node_id": getattr(self, "id", "unknown"),
|
4279
|
+
"database_type": self.config.get("database_type", "unknown"),
|
4280
|
+
"enterprise_features": {
|
4281
|
+
"analytics_enabled": self.config.get("enable_analytics", True),
|
4282
|
+
"adaptive_sizing_enabled": self.config.get(
|
4283
|
+
"enable_adaptive_sizing", True
|
4284
|
+
),
|
4285
|
+
"circuit_breaker_enabled": self.config.get(
|
4286
|
+
"circuit_breaker_enabled", True
|
4287
|
+
),
|
4288
|
+
"health_check_interval": self.config.get(
|
4289
|
+
"health_check_interval", 30
|
4290
|
+
),
|
4291
|
+
},
|
4292
|
+
"pool_configuration": {
|
4293
|
+
"min_size": self.config.get("min_pool_size", 5),
|
4294
|
+
"max_size": self.config.get("max_pool_size", 20),
|
4295
|
+
"current_size": (
|
4296
|
+
analytics["pool_config"]["current_size"] if analytics else 0
|
4297
|
+
),
|
4298
|
+
"share_pool": self.config.get("share_pool", True),
|
4299
|
+
},
|
4300
|
+
"health_status": {
|
4301
|
+
"is_healthy": health.is_healthy if health else False,
|
4302
|
+
"latency_ms": health.latency_ms if health else 0,
|
4303
|
+
"last_check": (
|
4304
|
+
health.checked_at.isoformat()
|
4305
|
+
if health and health.checked_at
|
4306
|
+
else None
|
4307
|
+
),
|
4308
|
+
"error": health.error_message if health else None,
|
4309
|
+
},
|
4310
|
+
"circuit_breaker": circuit_breaker,
|
4311
|
+
"performance_metrics": analytics["metrics"] if analytics else None,
|
4312
|
+
"recent_usage": (
|
4313
|
+
analytics.get("usage_history", [])[-5:] if analytics else []
|
4314
|
+
),
|
4315
|
+
"adapter_type": type(self._adapter).__name__ if self._adapter else None,
|
4316
|
+
"runtime_coordinated": (
|
4317
|
+
getattr(self._adapter, "_runtime_coordinated", False)
|
4318
|
+
if self._adapter
|
4319
|
+
else False
|
4320
|
+
),
|
4321
|
+
}
|
4322
|
+
|
4323
|
+
except Exception as e:
|
4324
|
+
logger.error(f"Failed to get enterprise status summary: {e}")
|
4325
|
+
return {
|
4326
|
+
"timestamp": datetime.now().isoformat(),
|
4327
|
+
"node_id": getattr(self, "id", "unknown"),
|
4328
|
+
"error": str(e),
|
4329
|
+
"enterprise_features_available": False,
|
4330
|
+
}
|
4331
|
+
|
2831
4332
|
async def cleanup(self):
|
2832
4333
|
"""Clean up database connections."""
|
2833
4334
|
try:
|