kailash 0.6.3__py3-none-any.whl → 0.6.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (122) hide show
  1. kailash/__init__.py +3 -3
  2. kailash/api/custom_nodes_secure.py +3 -3
  3. kailash/api/gateway.py +1 -1
  4. kailash/api/studio.py +1 -1
  5. kailash/api/workflow_api.py +2 -2
  6. kailash/core/resilience/bulkhead.py +475 -0
  7. kailash/core/resilience/circuit_breaker.py +92 -10
  8. kailash/core/resilience/health_monitor.py +578 -0
  9. kailash/edge/discovery.py +86 -0
  10. kailash/mcp_server/__init__.py +309 -33
  11. kailash/mcp_server/advanced_features.py +1022 -0
  12. kailash/mcp_server/ai_registry_server.py +27 -2
  13. kailash/mcp_server/auth.py +789 -0
  14. kailash/mcp_server/client.py +645 -378
  15. kailash/mcp_server/discovery.py +1593 -0
  16. kailash/mcp_server/errors.py +673 -0
  17. kailash/mcp_server/oauth.py +1727 -0
  18. kailash/mcp_server/protocol.py +1126 -0
  19. kailash/mcp_server/registry_integration.py +587 -0
  20. kailash/mcp_server/server.py +1228 -96
  21. kailash/mcp_server/transports.py +1169 -0
  22. kailash/mcp_server/utils/__init__.py +6 -1
  23. kailash/mcp_server/utils/cache.py +250 -7
  24. kailash/middleware/auth/auth_manager.py +3 -3
  25. kailash/middleware/communication/api_gateway.py +1 -1
  26. kailash/middleware/communication/realtime.py +1 -1
  27. kailash/middleware/mcp/enhanced_server.py +1 -1
  28. kailash/nodes/__init__.py +2 -0
  29. kailash/nodes/admin/audit_log.py +6 -6
  30. kailash/nodes/admin/permission_check.py +8 -8
  31. kailash/nodes/admin/role_management.py +32 -28
  32. kailash/nodes/admin/schema.sql +6 -1
  33. kailash/nodes/admin/schema_manager.py +13 -13
  34. kailash/nodes/admin/security_event.py +15 -15
  35. kailash/nodes/admin/tenant_isolation.py +3 -3
  36. kailash/nodes/admin/transaction_utils.py +3 -3
  37. kailash/nodes/admin/user_management.py +21 -21
  38. kailash/nodes/ai/a2a.py +11 -11
  39. kailash/nodes/ai/ai_providers.py +9 -12
  40. kailash/nodes/ai/embedding_generator.py +13 -14
  41. kailash/nodes/ai/intelligent_agent_orchestrator.py +19 -19
  42. kailash/nodes/ai/iterative_llm_agent.py +2 -2
  43. kailash/nodes/ai/llm_agent.py +210 -33
  44. kailash/nodes/ai/self_organizing.py +2 -2
  45. kailash/nodes/alerts/discord.py +4 -4
  46. kailash/nodes/api/graphql.py +6 -6
  47. kailash/nodes/api/http.py +10 -10
  48. kailash/nodes/api/rate_limiting.py +4 -4
  49. kailash/nodes/api/rest.py +15 -15
  50. kailash/nodes/auth/mfa.py +3 -3
  51. kailash/nodes/auth/risk_assessment.py +2 -2
  52. kailash/nodes/auth/session_management.py +5 -5
  53. kailash/nodes/auth/sso.py +143 -0
  54. kailash/nodes/base.py +8 -2
  55. kailash/nodes/base_async.py +16 -2
  56. kailash/nodes/base_with_acl.py +2 -2
  57. kailash/nodes/cache/__init__.py +9 -0
  58. kailash/nodes/cache/cache.py +1172 -0
  59. kailash/nodes/cache/cache_invalidation.py +874 -0
  60. kailash/nodes/cache/redis_pool_manager.py +595 -0
  61. kailash/nodes/code/async_python.py +2 -1
  62. kailash/nodes/code/python.py +194 -30
  63. kailash/nodes/compliance/data_retention.py +6 -6
  64. kailash/nodes/compliance/gdpr.py +5 -5
  65. kailash/nodes/data/__init__.py +10 -0
  66. kailash/nodes/data/async_sql.py +1956 -129
  67. kailash/nodes/data/optimistic_locking.py +906 -0
  68. kailash/nodes/data/readers.py +8 -8
  69. kailash/nodes/data/redis.py +378 -0
  70. kailash/nodes/data/sql.py +314 -3
  71. kailash/nodes/data/streaming.py +21 -0
  72. kailash/nodes/enterprise/__init__.py +8 -0
  73. kailash/nodes/enterprise/audit_logger.py +285 -0
  74. kailash/nodes/enterprise/batch_processor.py +22 -3
  75. kailash/nodes/enterprise/data_lineage.py +1 -1
  76. kailash/nodes/enterprise/mcp_executor.py +205 -0
  77. kailash/nodes/enterprise/service_discovery.py +150 -0
  78. kailash/nodes/enterprise/tenant_assignment.py +108 -0
  79. kailash/nodes/logic/async_operations.py +2 -2
  80. kailash/nodes/logic/convergence.py +1 -1
  81. kailash/nodes/logic/operations.py +1 -1
  82. kailash/nodes/monitoring/__init__.py +11 -1
  83. kailash/nodes/monitoring/health_check.py +456 -0
  84. kailash/nodes/monitoring/log_processor.py +817 -0
  85. kailash/nodes/monitoring/metrics_collector.py +627 -0
  86. kailash/nodes/monitoring/performance_benchmark.py +137 -11
  87. kailash/nodes/rag/advanced.py +7 -7
  88. kailash/nodes/rag/agentic.py +49 -2
  89. kailash/nodes/rag/conversational.py +3 -3
  90. kailash/nodes/rag/evaluation.py +3 -3
  91. kailash/nodes/rag/federated.py +3 -3
  92. kailash/nodes/rag/graph.py +3 -3
  93. kailash/nodes/rag/multimodal.py +3 -3
  94. kailash/nodes/rag/optimized.py +5 -5
  95. kailash/nodes/rag/privacy.py +3 -3
  96. kailash/nodes/rag/query_processing.py +6 -6
  97. kailash/nodes/rag/realtime.py +1 -1
  98. kailash/nodes/rag/registry.py +1 -1
  99. kailash/nodes/rag/router.py +1 -1
  100. kailash/nodes/rag/similarity.py +7 -7
  101. kailash/nodes/rag/strategies.py +4 -4
  102. kailash/nodes/security/abac_evaluator.py +6 -6
  103. kailash/nodes/security/behavior_analysis.py +5 -5
  104. kailash/nodes/security/credential_manager.py +1 -1
  105. kailash/nodes/security/rotating_credentials.py +11 -11
  106. kailash/nodes/security/threat_detection.py +8 -8
  107. kailash/nodes/testing/credential_testing.py +2 -2
  108. kailash/nodes/transform/processors.py +5 -5
  109. kailash/runtime/local.py +163 -9
  110. kailash/runtime/parameter_injection.py +425 -0
  111. kailash/runtime/parameter_injector.py +657 -0
  112. kailash/runtime/testing.py +2 -2
  113. kailash/testing/fixtures.py +2 -2
  114. kailash/workflow/builder.py +99 -14
  115. kailash/workflow/builder_improvements.py +207 -0
  116. kailash/workflow/input_handling.py +170 -0
  117. {kailash-0.6.3.dist-info → kailash-0.6.5.dist-info}/METADATA +22 -9
  118. {kailash-0.6.3.dist-info → kailash-0.6.5.dist-info}/RECORD +122 -95
  119. {kailash-0.6.3.dist-info → kailash-0.6.5.dist-info}/WHEEL +0 -0
  120. {kailash-0.6.3.dist-info → kailash-0.6.5.dist-info}/entry_points.txt +0 -0
  121. {kailash-0.6.3.dist-info → kailash-0.6.5.dist-info}/licenses/LICENSE +0 -0
  122. {kailash-0.6.3.dist-info → kailash-0.6.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,595 @@
1
+ """Redis connection pool manager for enterprise-grade connection handling.
2
+
3
+ Provides connection pooling, health monitoring, and automatic failover
4
+ for Redis operations with comprehensive metrics and circuit breaker integration.
5
+ """
6
+
7
+ import asyncio
8
+ import logging
9
+ import threading
10
+ import time
11
+ from datetime import UTC, datetime, timedelta
12
+ from enum import Enum
13
+ from typing import Any, Dict, List, Optional, Union
14
+
15
+ from kailash.nodes.base import NodeParameter, register_node
16
+ from kailash.nodes.base_async import AsyncNode
17
+ from kailash.sdk_exceptions import NodeExecutionError
18
+
19
+ try:
20
+ import redis.asyncio as redis
21
+ from redis.asyncio.connection import ConnectionPool
22
+
23
+ REDIS_AVAILABLE = True
24
+ except ImportError:
25
+ REDIS_AVAILABLE = False
26
+
27
+ logger = logging.getLogger(__name__)
28
+
29
+
30
+ class PoolHealth(Enum):
31
+ """Pool health status."""
32
+
33
+ HEALTHY = "healthy"
34
+ DEGRADED = "degraded"
35
+ CRITICAL = "critical"
36
+ FAILED = "failed"
37
+
38
+
39
+ class ConnectionStatus(Enum):
40
+ """Connection status tracking."""
41
+
42
+ ACTIVE = "active"
43
+ IDLE = "idle"
44
+ FAILED = "failed"
45
+ RECOVERING = "recovering"
46
+
47
+
48
+ @register_node()
49
+ class RedisPoolManagerNode(AsyncNode):
50
+ """Enterprise Redis connection pool manager.
51
+
52
+ Provides:
53
+ - Connection pooling with health monitoring
54
+ - Automatic failover and recovery
55
+ - Real-time metrics and alerting
56
+ - Circuit breaker integration
57
+ - Connection lifecycle optimization
58
+
59
+ Design Purpose:
60
+ - Prevent connection leaks and resource exhaustion
61
+ - Ensure high availability for Redis operations
62
+ - Provide enterprise-grade monitoring and alerting
63
+ - Support multiple Redis instances and databases
64
+
65
+ Examples:
66
+ >>> # Create pool manager
67
+ >>> pool_manager = RedisPoolManagerNode(
68
+ ... pool_size=10,
69
+ ... max_overflow=20,
70
+ ... health_check_interval=30
71
+ ... )
72
+
73
+ >>> # Execute Redis operation with pooling
74
+ >>> result = await pool_manager.execute(
75
+ ... action="execute_command",
76
+ ... command="SET",
77
+ ... args=["key", "value"],
78
+ ... redis_url="redis://localhost:6380"
79
+ ... )
80
+
81
+ >>> # Monitor pool health
82
+ >>> health = await pool_manager.execute(
83
+ ... action="get_pool_status"
84
+ ... )
85
+ """
86
+
87
+ def __init__(
88
+ self,
89
+ pool_size: int = 10,
90
+ max_overflow: int = 20,
91
+ pool_timeout: int = 30,
92
+ health_check_interval: int = 60,
93
+ max_retries: int = 3,
94
+ retry_delay: float = 0.5,
95
+ **kwargs,
96
+ ):
97
+ """Initialize Redis pool manager."""
98
+ super().__init__(**kwargs)
99
+
100
+ self.pool_size = pool_size
101
+ self.max_overflow = max_overflow
102
+ self.pool_timeout = pool_timeout
103
+ self.health_check_interval = health_check_interval
104
+ self.max_retries = max_retries
105
+ self.retry_delay = retry_delay
106
+
107
+ # Shared pools keyed by (redis_url, database)
108
+ self._pools: Dict[str, ConnectionPool] = {}
109
+ self._pool_metrics: Dict[str, Dict[str, Any]] = {}
110
+ self._pool_health: Dict[str, PoolHealth] = {}
111
+ self._pool_lock = threading.Lock()
112
+
113
+ # Connection tracking
114
+ self._active_connections: Dict[str, List[Dict[str, Any]]] = {}
115
+ self._failed_connections: Dict[str, List[Dict[str, Any]]] = {}
116
+
117
+ # Health monitoring
118
+ self._last_health_check: Dict[str, datetime] = {}
119
+ self._health_history: Dict[str, List[Dict[str, Any]]] = {}
120
+
121
+ self.logger.info(f"Initialized RedisPoolManagerNode: {self.id}")
122
+
123
+ def get_parameters(self) -> Dict[str, NodeParameter]:
124
+ """Define the parameters this node accepts."""
125
+ return {
126
+ "action": NodeParameter(
127
+ name="action",
128
+ type=str,
129
+ required=True,
130
+ description="Action to perform (execute_command, get_pool_status, health_check)",
131
+ ),
132
+ "redis_url": NodeParameter(
133
+ name="redis_url",
134
+ type=str,
135
+ required=False,
136
+ default="redis://localhost:6379",
137
+ description="Redis connection URL",
138
+ ),
139
+ "database": NodeParameter(
140
+ name="database",
141
+ type=int,
142
+ required=False,
143
+ default=0,
144
+ description="Redis database number",
145
+ ),
146
+ "command": NodeParameter(
147
+ name="command",
148
+ type=str,
149
+ required=False,
150
+ description="Redis command to execute",
151
+ ),
152
+ "args": NodeParameter(
153
+ name="args",
154
+ type=list,
155
+ required=False,
156
+ default=[],
157
+ description="Redis command arguments",
158
+ ),
159
+ "timeout": NodeParameter(
160
+ name="timeout",
161
+ type=int,
162
+ required=False,
163
+ default=30,
164
+ description="Operation timeout in seconds",
165
+ ),
166
+ "pool_name": NodeParameter(
167
+ name="pool_name",
168
+ type=str,
169
+ required=False,
170
+ description="Specific pool name for operations",
171
+ ),
172
+ }
173
+
174
+ def get_output_schema(self) -> Dict[str, NodeParameter]:
175
+ """Define the output schema for this node."""
176
+ return {
177
+ "success": NodeParameter(
178
+ name="success",
179
+ type=bool,
180
+ description="Whether the operation succeeded",
181
+ ),
182
+ "result": NodeParameter(
183
+ name="result",
184
+ type=Any,
185
+ required=False,
186
+ description="Command result or operation output",
187
+ ),
188
+ "pool_status": NodeParameter(
189
+ name="pool_status",
190
+ type=dict,
191
+ required=False,
192
+ description="Pool status information",
193
+ ),
194
+ "health_report": NodeParameter(
195
+ name="health_report",
196
+ type=dict,
197
+ required=False,
198
+ description="Health check results",
199
+ ),
200
+ "execution_time": NodeParameter(
201
+ name="execution_time",
202
+ type=float,
203
+ description="Operation execution time",
204
+ ),
205
+ "pool_used": NodeParameter(
206
+ name="pool_used",
207
+ type=str,
208
+ required=False,
209
+ description="Pool identifier used for operation",
210
+ ),
211
+ "metrics": NodeParameter(
212
+ name="metrics",
213
+ type=dict,
214
+ required=False,
215
+ description="Pool metrics and statistics",
216
+ ),
217
+ }
218
+
219
+ async def async_run(self, **kwargs) -> Dict[str, Any]:
220
+ """Execute Redis pool management operations."""
221
+ action = kwargs["action"]
222
+ start_time = time.time()
223
+
224
+ try:
225
+ if action == "execute_command":
226
+ result = await self._execute_redis_command(
227
+ kwargs.get("command"),
228
+ kwargs.get("args", []),
229
+ kwargs.get("redis_url", "redis://localhost:6379"),
230
+ kwargs.get("database", 0),
231
+ kwargs.get("timeout", 30),
232
+ )
233
+ elif action == "get_pool_status":
234
+ result = await self._get_pool_status(kwargs.get("pool_name"))
235
+ elif action == "health_check":
236
+ result = await self._perform_health_check(kwargs.get("pool_name"))
237
+ elif action == "cleanup_pools":
238
+ result = await self._cleanup_inactive_pools()
239
+ else:
240
+ raise ValueError(f"Unknown action: {action}")
241
+
242
+ execution_time = time.time() - start_time
243
+
244
+ return {"success": True, "execution_time": execution_time, **result}
245
+
246
+ except Exception as e:
247
+ execution_time = time.time() - start_time
248
+ self.logger.error(f"Redis pool operation failed: {str(e)}")
249
+ return {
250
+ "success": False,
251
+ "error": str(e),
252
+ "execution_time": execution_time,
253
+ }
254
+
255
+ async def _get_connection_pool(
256
+ self, redis_url: str, database: int = 0
257
+ ) -> ConnectionPool:
258
+ """Get or create Redis connection pool."""
259
+ pool_key = f"{redis_url}/db{database}"
260
+
261
+ with self._pool_lock:
262
+ if pool_key not in self._pools:
263
+ if not REDIS_AVAILABLE:
264
+ raise NodeExecutionError(
265
+ "Redis is not available. Install with: pip install redis"
266
+ )
267
+
268
+ try:
269
+ pool = ConnectionPool.from_url(
270
+ redis_url,
271
+ db=database,
272
+ max_connections=self.pool_size + self.max_overflow,
273
+ socket_timeout=self.pool_timeout,
274
+ socket_connect_timeout=10,
275
+ health_check_interval=self.health_check_interval,
276
+ retry_on_timeout=True,
277
+ retry_on_error=[redis.ConnectionError, redis.TimeoutError],
278
+ )
279
+
280
+ self._pools[pool_key] = pool
281
+ self._pool_health[pool_key] = PoolHealth.HEALTHY
282
+ self._pool_metrics[pool_key] = {
283
+ "created_at": datetime.now(UTC),
284
+ "total_connections": 0,
285
+ "active_connections": 0,
286
+ "failed_connections": 0,
287
+ "total_commands": 0,
288
+ "successful_commands": 0,
289
+ "failed_commands": 0,
290
+ "avg_response_time": 0.0,
291
+ "last_activity": datetime.now(UTC),
292
+ }
293
+ self._active_connections[pool_key] = []
294
+ self._failed_connections[pool_key] = []
295
+ self._health_history[pool_key] = []
296
+
297
+ self.logger.info(f"Created Redis pool: {pool_key}")
298
+
299
+ except Exception as e:
300
+ self.logger.error(f"Failed to create Redis pool {pool_key}: {e}")
301
+ raise NodeExecutionError(f"Failed to create Redis pool: {e}")
302
+
303
+ return self._pools[pool_key]
304
+
305
+ async def _execute_redis_command(
306
+ self, command: str, args: List[Any], redis_url: str, database: int, timeout: int
307
+ ) -> Dict[str, Any]:
308
+ """Execute Redis command using connection pool."""
309
+ pool_key = f"{redis_url}/db{database}"
310
+ pool = await self._get_connection_pool(redis_url, database)
311
+
312
+ connection = None
313
+ start_time = time.time()
314
+
315
+ try:
316
+ # Get connection from pool
317
+ connection = redis.Redis(connection_pool=pool)
318
+
319
+ # Track active connection
320
+ conn_info = {
321
+ "connection_id": id(connection),
322
+ "started_at": datetime.now(UTC),
323
+ "command": command,
324
+ "status": ConnectionStatus.ACTIVE,
325
+ }
326
+ self._active_connections[pool_key].append(conn_info)
327
+
328
+ # Execute command with timeout
329
+ result = await asyncio.wait_for(
330
+ connection.execute_command(command, *args), timeout=timeout
331
+ )
332
+
333
+ # Update metrics
334
+ execution_time = time.time() - start_time
335
+ self._update_pool_metrics(pool_key, True, execution_time)
336
+
337
+ return {
338
+ "result": result,
339
+ "pool_used": pool_key,
340
+ "execution_time": execution_time,
341
+ "connection_id": id(connection),
342
+ }
343
+
344
+ except asyncio.TimeoutError:
345
+ execution_time = time.time() - start_time
346
+ self._update_pool_metrics(pool_key, False, execution_time)
347
+ self._record_connection_failure(pool_key, "timeout", execution_time)
348
+ raise NodeExecutionError(f"Redis command timeout after {timeout}s")
349
+
350
+ except Exception as e:
351
+ execution_time = time.time() - start_time
352
+ self._update_pool_metrics(pool_key, False, execution_time)
353
+ self._record_connection_failure(pool_key, str(e), execution_time)
354
+ raise NodeExecutionError(f"Redis command failed: {e}")
355
+
356
+ finally:
357
+ # Clean up connection tracking
358
+ if connection:
359
+ self._remove_active_connection(pool_key, id(connection))
360
+
361
+ # Close connection properly
362
+ if connection:
363
+ try:
364
+ await connection.aclose()
365
+ except Exception:
366
+ pass # Ignore cleanup errors
367
+
368
+ def _update_pool_metrics(self, pool_key: str, success: bool, execution_time: float):
369
+ """Update pool metrics."""
370
+ if pool_key not in self._pool_metrics:
371
+ return
372
+
373
+ metrics = self._pool_metrics[pool_key]
374
+
375
+ metrics["total_commands"] += 1
376
+ metrics["last_activity"] = datetime.now(UTC)
377
+
378
+ if success:
379
+ metrics["successful_commands"] += 1
380
+ else:
381
+ metrics["failed_commands"] += 1
382
+
383
+ # Update average response time
384
+ total_successful = metrics["successful_commands"]
385
+ if total_successful > 0:
386
+ current_avg = metrics["avg_response_time"]
387
+ metrics["avg_response_time"] = (
388
+ current_avg * (total_successful - 1) + execution_time
389
+ ) / total_successful
390
+
391
+ def _record_connection_failure(
392
+ self, pool_key: str, error: str, execution_time: float
393
+ ):
394
+ """Record connection failure for analysis."""
395
+ failure_info = {
396
+ "timestamp": datetime.now(UTC),
397
+ "error": error,
398
+ "execution_time": execution_time,
399
+ "pool_key": pool_key,
400
+ }
401
+
402
+ if pool_key not in self._failed_connections:
403
+ self._failed_connections[pool_key] = []
404
+
405
+ self._failed_connections[pool_key].append(failure_info)
406
+
407
+ # Keep only recent failures (last 100)
408
+ if len(self._failed_connections[pool_key]) > 100:
409
+ self._failed_connections[pool_key] = self._failed_connections[pool_key][
410
+ -100:
411
+ ]
412
+
413
+ # Update pool health based on failure rate
414
+ self._assess_pool_health(pool_key)
415
+
416
+ def _remove_active_connection(self, pool_key: str, connection_id: int):
417
+ """Remove connection from active tracking."""
418
+ if pool_key in self._active_connections:
419
+ self._active_connections[pool_key] = [
420
+ conn
421
+ for conn in self._active_connections[pool_key]
422
+ if conn["connection_id"] != connection_id
423
+ ]
424
+
425
+ def _assess_pool_health(self, pool_key: str):
426
+ """Assess pool health based on recent metrics."""
427
+ if pool_key not in self._pool_metrics:
428
+ return
429
+
430
+ metrics = self._pool_metrics[pool_key]
431
+ total_commands = metrics["total_commands"]
432
+ failed_commands = metrics["failed_commands"]
433
+
434
+ if total_commands == 0:
435
+ health = PoolHealth.HEALTHY
436
+ else:
437
+ failure_rate = failed_commands / total_commands
438
+ avg_response_time = metrics["avg_response_time"]
439
+
440
+ if failure_rate > 0.5 or avg_response_time > 10.0:
441
+ health = PoolHealth.FAILED
442
+ elif failure_rate > 0.2 or avg_response_time > 5.0:
443
+ health = PoolHealth.CRITICAL
444
+ elif failure_rate > 0.1 or avg_response_time > 2.0:
445
+ health = PoolHealth.DEGRADED
446
+ else:
447
+ health = PoolHealth.HEALTHY
448
+
449
+ self._pool_health[pool_key] = health
450
+
451
+ # Record health history
452
+ health_record = {
453
+ "timestamp": datetime.now(UTC),
454
+ "health": health.value,
455
+ "failure_rate": failed_commands / max(total_commands, 1),
456
+ "avg_response_time": metrics["avg_response_time"],
457
+ "active_connections": len(self._active_connections.get(pool_key, [])),
458
+ }
459
+
460
+ if pool_key not in self._health_history:
461
+ self._health_history[pool_key] = []
462
+
463
+ self._health_history[pool_key].append(health_record)
464
+
465
+ # Keep only recent history (last 100 records)
466
+ if len(self._health_history[pool_key]) > 100:
467
+ self._health_history[pool_key] = self._health_history[pool_key][-100:]
468
+
469
+ async def _get_pool_status(self, pool_name: Optional[str] = None) -> Dict[str, Any]:
470
+ """Get status of all pools or specific pool."""
471
+ if pool_name:
472
+ if pool_name not in self._pools:
473
+ return {"error": f"Pool {pool_name} not found"}
474
+
475
+ return {"pool_status": {pool_name: self._get_single_pool_status(pool_name)}}
476
+ else:
477
+ return {
478
+ "pool_status": {
479
+ pool_key: self._get_single_pool_status(pool_key)
480
+ for pool_key in self._pools.keys()
481
+ }
482
+ }
483
+
484
+ def _get_single_pool_status(self, pool_key: str) -> Dict[str, Any]:
485
+ """Get status of a single pool."""
486
+ pool = self._pools.get(pool_key)
487
+ metrics = self._pool_metrics.get(pool_key, {})
488
+ health = self._pool_health.get(pool_key, PoolHealth.HEALTHY)
489
+
490
+ if not pool:
491
+ return {"status": "not_found"}
492
+
493
+ # Get pool connection info
494
+ try:
495
+ created_connections = pool.created_connections
496
+ available_connections = pool.available_connections
497
+ in_use_connections = created_connections - available_connections
498
+ except AttributeError:
499
+ # Fallback for different Redis versions
500
+ created_connections = 0
501
+ available_connections = 0
502
+ in_use_connections = len(self._active_connections.get(pool_key, []))
503
+
504
+ return {
505
+ "health": health.value,
506
+ "created_connections": created_connections,
507
+ "available_connections": available_connections,
508
+ "in_use_connections": in_use_connections,
509
+ "max_connections": self.pool_size + self.max_overflow,
510
+ "metrics": metrics,
511
+ "active_connections_count": len(self._active_connections.get(pool_key, [])),
512
+ "recent_failures": len(self._failed_connections.get(pool_key, [])),
513
+ }
514
+
515
+ async def _perform_health_check(
516
+ self, pool_name: Optional[str] = None
517
+ ) -> Dict[str, Any]:
518
+ """Perform comprehensive health check."""
519
+ pools_to_check = [pool_name] if pool_name else list(self._pools.keys())
520
+ health_results = {}
521
+
522
+ for pool_key in pools_to_check:
523
+ if pool_key not in self._pools:
524
+ continue
525
+
526
+ pool = self._pools[pool_key]
527
+ start_time = time.time()
528
+
529
+ try:
530
+ # Test connection with ping
531
+ test_connection = redis.Redis(connection_pool=pool)
532
+ await test_connection.ping()
533
+ await test_connection.aclose()
534
+
535
+ response_time = time.time() - start_time
536
+
537
+ health_results[pool_key] = {
538
+ "healthy": True,
539
+ "response_time": response_time,
540
+ "last_check": datetime.now(UTC).isoformat(),
541
+ "pool_status": self._get_single_pool_status(pool_key),
542
+ }
543
+
544
+ except Exception as e:
545
+ response_time = time.time() - start_time
546
+
547
+ health_results[pool_key] = {
548
+ "healthy": False,
549
+ "error": str(e),
550
+ "response_time": response_time,
551
+ "last_check": datetime.now(UTC).isoformat(),
552
+ "pool_status": self._get_single_pool_status(pool_key),
553
+ }
554
+
555
+ # Mark pool as failed
556
+ self._pool_health[pool_key] = PoolHealth.FAILED
557
+
558
+ self._last_health_check[pool_name or "all"] = datetime.now(UTC)
559
+
560
+ return {"health_report": health_results}
561
+
562
+ async def _cleanup_inactive_pools(self) -> Dict[str, Any]:
563
+ """Clean up inactive pools to free resources."""
564
+ cleanup_threshold = datetime.now(UTC) - timedelta(hours=1)
565
+ cleaned_pools = []
566
+
567
+ with self._pool_lock:
568
+ pools_to_remove = []
569
+
570
+ for pool_key, metrics in self._pool_metrics.items():
571
+ last_activity = metrics.get("last_activity")
572
+ if last_activity and last_activity < cleanup_threshold:
573
+ pools_to_remove.append(pool_key)
574
+
575
+ for pool_key in pools_to_remove:
576
+ try:
577
+ pool = self._pools.get(pool_key)
578
+ if pool:
579
+ await pool.aclose()
580
+
581
+ # Clean up tracking data
582
+ del self._pools[pool_key]
583
+ del self._pool_metrics[pool_key]
584
+ del self._pool_health[pool_key]
585
+ self._active_connections.pop(pool_key, None)
586
+ self._failed_connections.pop(pool_key, None)
587
+ self._health_history.pop(pool_key, None)
588
+
589
+ cleaned_pools.append(pool_key)
590
+ self.logger.info(f"Cleaned up inactive pool: {pool_key}")
591
+
592
+ except Exception as e:
593
+ self.logger.error(f"Error cleaning up pool {pool_key}: {e}")
594
+
595
+ return {"cleaned_pools": cleaned_pools, "cleanup_count": len(cleaned_pools)}
@@ -144,7 +144,8 @@ ALLOWED_ASYNC_MODULES = {
144
144
  "asyncpg", # PostgreSQL
145
145
  "aiomysql", # MySQL
146
146
  "motor", # MongoDB
147
- "aioredis", # Redis
147
+ "redis", # Redis with asyncio support
148
+ "redis.asyncio", # Redis async module
148
149
  "aiosqlite", # SQLite
149
150
  # Message queues and streaming
150
151
  "aiokafka", # Kafka