kailash 0.6.2__py3-none-any.whl → 0.6.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (131) hide show
  1. kailash/__init__.py +3 -3
  2. kailash/api/custom_nodes_secure.py +3 -3
  3. kailash/api/gateway.py +1 -1
  4. kailash/api/studio.py +2 -3
  5. kailash/api/workflow_api.py +3 -4
  6. kailash/core/resilience/bulkhead.py +460 -0
  7. kailash/core/resilience/circuit_breaker.py +92 -10
  8. kailash/edge/discovery.py +86 -0
  9. kailash/mcp_server/__init__.py +334 -0
  10. kailash/mcp_server/advanced_features.py +1022 -0
  11. kailash/{mcp → mcp_server}/ai_registry_server.py +29 -4
  12. kailash/mcp_server/auth.py +789 -0
  13. kailash/mcp_server/client.py +712 -0
  14. kailash/mcp_server/discovery.py +1593 -0
  15. kailash/mcp_server/errors.py +673 -0
  16. kailash/mcp_server/oauth.py +1727 -0
  17. kailash/mcp_server/protocol.py +1126 -0
  18. kailash/mcp_server/registry_integration.py +587 -0
  19. kailash/mcp_server/server.py +1747 -0
  20. kailash/{mcp → mcp_server}/servers/ai_registry.py +2 -2
  21. kailash/mcp_server/transports.py +1169 -0
  22. kailash/mcp_server/utils/cache.py +510 -0
  23. kailash/middleware/auth/auth_manager.py +3 -3
  24. kailash/middleware/communication/api_gateway.py +2 -9
  25. kailash/middleware/communication/realtime.py +1 -1
  26. kailash/middleware/mcp/client_integration.py +1 -1
  27. kailash/middleware/mcp/enhanced_server.py +2 -2
  28. kailash/nodes/__init__.py +2 -0
  29. kailash/nodes/admin/audit_log.py +6 -6
  30. kailash/nodes/admin/permission_check.py +8 -8
  31. kailash/nodes/admin/role_management.py +32 -28
  32. kailash/nodes/admin/schema.sql +6 -1
  33. kailash/nodes/admin/schema_manager.py +13 -13
  34. kailash/nodes/admin/security_event.py +16 -20
  35. kailash/nodes/admin/tenant_isolation.py +3 -3
  36. kailash/nodes/admin/transaction_utils.py +3 -3
  37. kailash/nodes/admin/user_management.py +21 -22
  38. kailash/nodes/ai/a2a.py +11 -11
  39. kailash/nodes/ai/ai_providers.py +9 -12
  40. kailash/nodes/ai/embedding_generator.py +13 -14
  41. kailash/nodes/ai/intelligent_agent_orchestrator.py +19 -19
  42. kailash/nodes/ai/iterative_llm_agent.py +3 -3
  43. kailash/nodes/ai/llm_agent.py +213 -36
  44. kailash/nodes/ai/self_organizing.py +2 -2
  45. kailash/nodes/alerts/discord.py +4 -4
  46. kailash/nodes/api/graphql.py +6 -6
  47. kailash/nodes/api/http.py +12 -17
  48. kailash/nodes/api/rate_limiting.py +4 -4
  49. kailash/nodes/api/rest.py +15 -15
  50. kailash/nodes/auth/mfa.py +3 -4
  51. kailash/nodes/auth/risk_assessment.py +2 -2
  52. kailash/nodes/auth/session_management.py +5 -5
  53. kailash/nodes/auth/sso.py +143 -0
  54. kailash/nodes/base.py +6 -2
  55. kailash/nodes/base_async.py +16 -2
  56. kailash/nodes/base_with_acl.py +2 -2
  57. kailash/nodes/cache/__init__.py +9 -0
  58. kailash/nodes/cache/cache.py +1172 -0
  59. kailash/nodes/cache/cache_invalidation.py +870 -0
  60. kailash/nodes/cache/redis_pool_manager.py +595 -0
  61. kailash/nodes/code/async_python.py +2 -1
  62. kailash/nodes/code/python.py +196 -35
  63. kailash/nodes/compliance/data_retention.py +6 -6
  64. kailash/nodes/compliance/gdpr.py +5 -5
  65. kailash/nodes/data/__init__.py +10 -0
  66. kailash/nodes/data/optimistic_locking.py +906 -0
  67. kailash/nodes/data/readers.py +8 -8
  68. kailash/nodes/data/redis.py +349 -0
  69. kailash/nodes/data/sql.py +314 -3
  70. kailash/nodes/data/streaming.py +21 -0
  71. kailash/nodes/enterprise/__init__.py +8 -0
  72. kailash/nodes/enterprise/audit_logger.py +285 -0
  73. kailash/nodes/enterprise/batch_processor.py +22 -3
  74. kailash/nodes/enterprise/data_lineage.py +1 -1
  75. kailash/nodes/enterprise/mcp_executor.py +205 -0
  76. kailash/nodes/enterprise/service_discovery.py +150 -0
  77. kailash/nodes/enterprise/tenant_assignment.py +108 -0
  78. kailash/nodes/logic/async_operations.py +2 -2
  79. kailash/nodes/logic/convergence.py +1 -1
  80. kailash/nodes/logic/operations.py +1 -1
  81. kailash/nodes/monitoring/__init__.py +11 -1
  82. kailash/nodes/monitoring/health_check.py +456 -0
  83. kailash/nodes/monitoring/log_processor.py +817 -0
  84. kailash/nodes/monitoring/metrics_collector.py +627 -0
  85. kailash/nodes/monitoring/performance_benchmark.py +137 -11
  86. kailash/nodes/rag/advanced.py +7 -7
  87. kailash/nodes/rag/agentic.py +49 -2
  88. kailash/nodes/rag/conversational.py +3 -3
  89. kailash/nodes/rag/evaluation.py +3 -3
  90. kailash/nodes/rag/federated.py +3 -3
  91. kailash/nodes/rag/graph.py +3 -3
  92. kailash/nodes/rag/multimodal.py +3 -3
  93. kailash/nodes/rag/optimized.py +5 -5
  94. kailash/nodes/rag/privacy.py +3 -3
  95. kailash/nodes/rag/query_processing.py +6 -6
  96. kailash/nodes/rag/realtime.py +1 -1
  97. kailash/nodes/rag/registry.py +2 -6
  98. kailash/nodes/rag/router.py +1 -1
  99. kailash/nodes/rag/similarity.py +7 -7
  100. kailash/nodes/rag/strategies.py +4 -4
  101. kailash/nodes/security/abac_evaluator.py +6 -6
  102. kailash/nodes/security/behavior_analysis.py +5 -6
  103. kailash/nodes/security/credential_manager.py +1 -1
  104. kailash/nodes/security/rotating_credentials.py +11 -11
  105. kailash/nodes/security/threat_detection.py +8 -8
  106. kailash/nodes/testing/credential_testing.py +2 -2
  107. kailash/nodes/transform/processors.py +5 -5
  108. kailash/runtime/local.py +162 -14
  109. kailash/runtime/parameter_injection.py +425 -0
  110. kailash/runtime/parameter_injector.py +657 -0
  111. kailash/runtime/testing.py +2 -2
  112. kailash/testing/fixtures.py +2 -2
  113. kailash/workflow/builder.py +99 -18
  114. kailash/workflow/builder_improvements.py +207 -0
  115. kailash/workflow/input_handling.py +170 -0
  116. {kailash-0.6.2.dist-info → kailash-0.6.4.dist-info}/METADATA +21 -8
  117. {kailash-0.6.2.dist-info → kailash-0.6.4.dist-info}/RECORD +126 -101
  118. kailash/mcp/__init__.py +0 -53
  119. kailash/mcp/client.py +0 -445
  120. kailash/mcp/server.py +0 -292
  121. kailash/mcp/server_enhanced.py +0 -449
  122. kailash/mcp/utils/cache.py +0 -267
  123. /kailash/{mcp → mcp_server}/client_new.py +0 -0
  124. /kailash/{mcp → mcp_server}/utils/__init__.py +0 -0
  125. /kailash/{mcp → mcp_server}/utils/config.py +0 -0
  126. /kailash/{mcp → mcp_server}/utils/formatters.py +0 -0
  127. /kailash/{mcp → mcp_server}/utils/metrics.py +0 -0
  128. {kailash-0.6.2.dist-info → kailash-0.6.4.dist-info}/WHEEL +0 -0
  129. {kailash-0.6.2.dist-info → kailash-0.6.4.dist-info}/entry_points.txt +0 -0
  130. {kailash-0.6.2.dist-info → kailash-0.6.4.dist-info}/licenses/LICENSE +0 -0
  131. {kailash-0.6.2.dist-info → kailash-0.6.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1172 @@
1
+ """Cache node for high-performance data caching and retrieval.
2
+
3
+ This module provides comprehensive caching capabilities supporting multiple
4
+ backends including Redis, in-memory LRU, and file-based caching with
5
+ advanced features like TTL, compression, and serialization.
6
+ """
7
+
8
+ import asyncio
9
+ import gzip
10
+ import hashlib
11
+ import json
12
+ import pickle
13
+ import time
14
+ from datetime import UTC, datetime, timedelta
15
+ from enum import Enum
16
+ from typing import Any, Dict, List, Optional, Union
17
+
18
+ from kailash.nodes.base import NodeParameter, register_node
19
+ from kailash.nodes.base_async import AsyncNode
20
+ from kailash.sdk_exceptions import NodeExecutionError
21
+
22
+ try:
23
+ import redis.asyncio as redis
24
+
25
+ REDIS_AVAILABLE = True
26
+ except ImportError:
27
+ REDIS_AVAILABLE = False
28
+
29
+
30
+ class CacheBackend(Enum):
31
+ """Supported cache backend types."""
32
+
33
+ MEMORY = "memory"
34
+ REDIS = "redis"
35
+ FILE = "file"
36
+ HYBRID = "hybrid" # Memory + Redis fallback
37
+
38
+
39
+ class SerializationFormat(Enum):
40
+ """Data serialization formats."""
41
+
42
+ JSON = "json"
43
+ PICKLE = "pickle"
44
+ STRING = "string"
45
+ BYTES = "bytes"
46
+
47
+
48
+ class EvictionPolicy(Enum):
49
+ """Cache eviction policies."""
50
+
51
+ LRU = "lru" # Least Recently Used
52
+ LFU = "lfu" # Least Frequently Used
53
+ TTL = "ttl" # Time To Live only
54
+ FIFO = "fifo" # First In, First Out
55
+
56
+
57
+ @register_node()
58
+ class CacheNode(AsyncNode):
59
+ """Node for high-performance data caching and retrieval.
60
+
61
+ This node provides comprehensive caching capabilities including:
62
+ - Multiple backend support (Redis, in-memory, file-based, hybrid)
63
+ - Configurable TTL (Time To Live) with automatic expiration
64
+ - Multiple serialization formats (JSON, Pickle, String, Bytes)
65
+ - Data compression for large values
66
+ - Eviction policies (LRU, LFU, TTL, FIFO)
67
+ - Atomic operations and transactions
68
+ - Cache statistics and monitoring
69
+ - Distributed caching with Redis
70
+ - Fallback strategies for high availability
71
+
72
+ Design Purpose:
73
+ - Improve application performance through intelligent caching
74
+ - Reduce database and API load
75
+ - Provide configurable caching strategies
76
+ - Support both simple and complex caching scenarios
77
+
78
+ Examples:
79
+ >>> # Simple key-value caching
80
+ >>> cache = CacheNode()
81
+ >>> result = await cache.execute(
82
+ ... operation="set",
83
+ ... key="user:123",
84
+ ... value={"name": "John", "email": "john@example.com"},
85
+ ... ttl=3600 # 1 hour
86
+ ... )
87
+
88
+ >>> # Batch operations with pattern matching
89
+ >>> result = await cache.execute(
90
+ ... operation="get_pattern",
91
+ ... pattern="user:*",
92
+ ... limit=100
93
+ ... )
94
+
95
+ >>> # Cache with compression and custom serialization
96
+ >>> result = await cache.execute(
97
+ ... operation="set",
98
+ ... key="large_data",
99
+ ... value=large_dataset,
100
+ ... compression=True,
101
+ ... serialization="pickle",
102
+ ... ttl=86400 # 24 hours
103
+ ... )
104
+ """
105
+
106
+ def __init__(self, **kwargs):
107
+ """Initialize the cache node."""
108
+ super().__init__(**kwargs)
109
+ self._memory_cache = {}
110
+ self._access_times = {}
111
+ self._access_counts = {}
112
+ self._redis_client = None
113
+ self._cache_stats = {
114
+ "hits": 0,
115
+ "misses": 0,
116
+ "sets": 0,
117
+ "deletes": 0,
118
+ "evictions": 0,
119
+ }
120
+ self.logger.info(f"Initialized CacheNode: {self.id}")
121
+
122
+ def get_parameters(self) -> Dict[str, NodeParameter]:
123
+ """Define the parameters this node accepts."""
124
+ return {
125
+ "operation": NodeParameter(
126
+ name="operation",
127
+ type=str,
128
+ required=True,
129
+ description="Cache operation (get, set, delete, exists, clear, stats, get_pattern)",
130
+ ),
131
+ "key": NodeParameter(
132
+ name="key",
133
+ type=str,
134
+ required=False,
135
+ description="Cache key for single operations",
136
+ ),
137
+ "value": NodeParameter(
138
+ name="value",
139
+ type=Any,
140
+ required=False,
141
+ description="Value to cache (for set operations)",
142
+ ),
143
+ "keys": NodeParameter(
144
+ name="keys",
145
+ type=list,
146
+ required=False,
147
+ description="Multiple keys for batch operations",
148
+ ),
149
+ "values": NodeParameter(
150
+ name="values",
151
+ type=dict,
152
+ required=False,
153
+ description="Key-value pairs for batch set operations",
154
+ ),
155
+ "pattern": NodeParameter(
156
+ name="pattern",
157
+ type=str,
158
+ required=False,
159
+ description="Pattern for pattern-based operations (supports wildcards)",
160
+ ),
161
+ "ttl": NodeParameter(
162
+ name="ttl",
163
+ type=int,
164
+ required=False,
165
+ default=3600,
166
+ description="Time to live in seconds (0 = no expiration)",
167
+ ),
168
+ "backend": NodeParameter(
169
+ name="backend",
170
+ type=str,
171
+ required=False,
172
+ default="memory",
173
+ description="Cache backend (memory, redis, file, hybrid)",
174
+ ),
175
+ "redis_url": NodeParameter(
176
+ name="redis_url",
177
+ type=str,
178
+ required=False,
179
+ default="redis://localhost:6379",
180
+ description="Redis connection URL",
181
+ ),
182
+ "serialization": NodeParameter(
183
+ name="serialization",
184
+ type=str,
185
+ required=False,
186
+ default="json",
187
+ description="Serialization format (json, pickle, string, bytes)",
188
+ ),
189
+ "compression": NodeParameter(
190
+ name="compression",
191
+ type=bool,
192
+ required=False,
193
+ default=False,
194
+ description="Enable gzip compression for large values",
195
+ ),
196
+ "compression_threshold": NodeParameter(
197
+ name="compression_threshold",
198
+ type=int,
199
+ required=False,
200
+ default=1024,
201
+ description="Minimum size in bytes to trigger compression",
202
+ ),
203
+ "eviction_policy": NodeParameter(
204
+ name="eviction_policy",
205
+ type=str,
206
+ required=False,
207
+ default="lru",
208
+ description="Eviction policy (lru, lfu, ttl, fifo)",
209
+ ),
210
+ "max_memory_items": NodeParameter(
211
+ name="max_memory_items",
212
+ type=int,
213
+ required=False,
214
+ default=10000,
215
+ description="Maximum items in memory cache",
216
+ ),
217
+ "namespace": NodeParameter(
218
+ name="namespace",
219
+ type=str,
220
+ required=False,
221
+ default="",
222
+ description="Key namespace prefix",
223
+ ),
224
+ }
225
+
226
+ def get_output_schema(self) -> Dict[str, NodeParameter]:
227
+ """Define the output schema for this node."""
228
+ return {
229
+ "success": NodeParameter(
230
+ name="success",
231
+ type=bool,
232
+ description="Whether the operation succeeded",
233
+ ),
234
+ "value": NodeParameter(
235
+ name="value",
236
+ type=Any,
237
+ required=False,
238
+ description="Retrieved value (for get operations)",
239
+ ),
240
+ "values": NodeParameter(
241
+ name="values",
242
+ type=dict,
243
+ required=False,
244
+ description="Multiple values (for batch operations)",
245
+ ),
246
+ "hit": NodeParameter(
247
+ name="hit",
248
+ type=bool,
249
+ required=False,
250
+ description="Cache hit status (for get operations)",
251
+ ),
252
+ "key": NodeParameter(
253
+ name="key",
254
+ type=str,
255
+ required=False,
256
+ description="The cache key used",
257
+ ),
258
+ "ttl_remaining": NodeParameter(
259
+ name="ttl_remaining",
260
+ type=int,
261
+ required=False,
262
+ description="Remaining TTL in seconds",
263
+ ),
264
+ "backend_used": NodeParameter(
265
+ name="backend_used",
266
+ type=str,
267
+ description="Backend that handled the operation",
268
+ ),
269
+ "operation_time": NodeParameter(
270
+ name="operation_time",
271
+ type=float,
272
+ description="Time taken for the operation",
273
+ ),
274
+ "stats": NodeParameter(
275
+ name="stats",
276
+ type=dict,
277
+ required=False,
278
+ description="Cache statistics (for stats operation)",
279
+ ),
280
+ "compressed": NodeParameter(
281
+ name="compressed",
282
+ type=bool,
283
+ required=False,
284
+ description="Whether the value was compressed",
285
+ ),
286
+ }
287
+
288
+ async def async_run(self, **kwargs) -> Dict[str, Any]:
289
+ """Execute cache operations."""
290
+ operation = kwargs["operation"].lower()
291
+ backend = CacheBackend(kwargs.get("backend", "memory"))
292
+
293
+ start_time = time.time()
294
+
295
+ try:
296
+ # Initialize backend if needed
297
+ await self._ensure_backend(backend, kwargs)
298
+
299
+ # Execute operation
300
+ if operation == "get":
301
+ result = await self._get(kwargs)
302
+ elif operation == "set":
303
+ result = await self._set(kwargs)
304
+ elif operation == "delete":
305
+ result = await self._delete(kwargs)
306
+ elif operation == "exists":
307
+ result = await self._exists(kwargs)
308
+ elif operation == "clear":
309
+ result = await self._clear(kwargs)
310
+ elif operation == "stats":
311
+ result = await self._get_stats(kwargs)
312
+ elif operation == "get_pattern":
313
+ result = await self._get_pattern(kwargs)
314
+ elif operation == "mget":
315
+ result = await self._mget(kwargs)
316
+ elif operation == "mset":
317
+ result = await self._mset(kwargs)
318
+ else:
319
+ raise ValueError(f"Unsupported operation: {operation}")
320
+
321
+ operation_time = time.time() - start_time
322
+ result["operation_time"] = operation_time
323
+ result["backend_used"] = backend.value
324
+
325
+ return result
326
+
327
+ except Exception as e:
328
+ self.logger.error(f"Cache operation failed: {str(e)}")
329
+ raise NodeExecutionError(f"Cache operation '{operation}' failed: {str(e)}")
330
+
331
+ finally:
332
+ # Clean up Redis connection after operation
333
+ if self._redis_client:
334
+ try:
335
+ await self._redis_client.aclose()
336
+ except Exception:
337
+ pass # Ignore cleanup errors
338
+ self._redis_client = None
339
+
340
+ async def _ensure_backend(self, backend: CacheBackend, kwargs: Dict[str, Any]):
341
+ """Ensure the cache backend is initialized with proper connection management."""
342
+ if backend in [CacheBackend.REDIS, CacheBackend.HYBRID]:
343
+ if not REDIS_AVAILABLE:
344
+ if backend == CacheBackend.REDIS:
345
+ raise NodeExecutionError(
346
+ "Redis is not available. Install with: pip install redis"
347
+ )
348
+ else:
349
+ # Fall back to memory for hybrid mode
350
+ self.logger.warning("Redis not available, using memory cache only")
351
+ return
352
+
353
+ # Create fresh Redis client for each operation to avoid event loop issues
354
+ redis_url = kwargs.get("redis_url", "redis://localhost:6379")
355
+ try:
356
+ # Create a new client for this operation
357
+ redis_client = redis.from_url(redis_url, decode_responses=False)
358
+ # Test connection
359
+ await redis_client.ping()
360
+
361
+ # Store the client for this operation
362
+ self._redis_client = redis_client
363
+
364
+ except Exception as e:
365
+ if backend == CacheBackend.REDIS:
366
+ raise NodeExecutionError(f"Failed to connect to Redis: {str(e)}")
367
+ else:
368
+ # Fall back to memory for hybrid mode
369
+ self.logger.warning(
370
+ f"Redis connection failed, using memory cache: {str(e)}"
371
+ )
372
+ self._redis_client = None
373
+
374
+ async def _close_redis_connection(self):
375
+ """Close Redis connection if it exists."""
376
+ if self._redis_client:
377
+ try:
378
+ await self._redis_client.aclose()
379
+ except Exception:
380
+ pass # Ignore errors during cleanup
381
+ self._redis_client = None
382
+
383
+ def _build_key(self, key: str, namespace: str = "") -> str:
384
+ """Build a namespaced cache key."""
385
+ if namespace:
386
+ return f"{namespace}:{key}"
387
+ return key
388
+
389
+ async def _get(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
390
+ """Get a value from cache."""
391
+ key = kwargs["key"]
392
+ namespace = kwargs.get("namespace", "")
393
+ backend = CacheBackend(kwargs.get("backend", "memory"))
394
+ serialization = SerializationFormat(kwargs.get("serialization", "json"))
395
+
396
+ full_key = self._build_key(key, namespace)
397
+
398
+ try:
399
+ if backend == CacheBackend.REDIS and self._redis_client:
400
+ value, hit = await self._redis_get(full_key, serialization, kwargs)
401
+ elif backend == CacheBackend.HYBRID:
402
+ # Try memory first, then Redis
403
+ value, hit = await self._memory_get(full_key, serialization, kwargs)
404
+ if hit and self._redis_client:
405
+ # Check if key still exists in Redis (might have been invalidated)
406
+ redis_exists = await self._redis_client.exists(full_key)
407
+ if not redis_exists:
408
+ # Key was invalidated in Redis, remove from memory cache
409
+ self._memory_cache.pop(full_key, None)
410
+ self._access_times.pop(full_key, None)
411
+ self._access_counts.pop(full_key, None)
412
+ value, hit = None, False
413
+ elif not hit and self._redis_client:
414
+ value, hit = await self._redis_get(full_key, serialization, kwargs)
415
+ # Cache in memory for next access
416
+ if hit:
417
+ await self._memory_set(full_key, value, kwargs)
418
+ else:
419
+ value, hit = await self._memory_get(full_key, serialization, kwargs)
420
+
421
+ if hit:
422
+ self._cache_stats["hits"] += 1
423
+ else:
424
+ self._cache_stats["misses"] += 1
425
+
426
+ return {
427
+ "success": True,
428
+ "value": value,
429
+ "hit": hit,
430
+ "key": full_key,
431
+ }
432
+
433
+ except Exception as e:
434
+ self.logger.error(f"Cache get failed for key '{full_key}': {str(e)}")
435
+ return {
436
+ "success": False,
437
+ "value": None,
438
+ "hit": False,
439
+ "key": full_key,
440
+ "error": str(e),
441
+ }
442
+
443
+ async def _set(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
444
+ """Set a value in cache."""
445
+ key = kwargs["key"]
446
+ value = kwargs["value"]
447
+ namespace = kwargs.get("namespace", "")
448
+ backend = CacheBackend(kwargs.get("backend", "memory"))
449
+ ttl = kwargs.get("ttl", 3600)
450
+
451
+ full_key = self._build_key(key, namespace)
452
+
453
+ try:
454
+ if backend == CacheBackend.REDIS and self._redis_client:
455
+ success = await self._redis_set(full_key, value, ttl, kwargs)
456
+ elif backend == CacheBackend.HYBRID:
457
+ # Set in both memory and Redis
458
+ success1 = await self._memory_set(full_key, value, kwargs)
459
+ success2 = True
460
+ if self._redis_client:
461
+ success2 = await self._redis_set(full_key, value, ttl, kwargs)
462
+ success = success1 and success2
463
+ else:
464
+ success = await self._memory_set(full_key, value, kwargs)
465
+
466
+ if success:
467
+ self._cache_stats["sets"] += 1
468
+
469
+ return {
470
+ "success": success,
471
+ "key": full_key,
472
+ "ttl_remaining": ttl if ttl > 0 else -1,
473
+ }
474
+
475
+ except Exception as e:
476
+ self.logger.error(f"Cache set failed for key '{full_key}': {str(e)}")
477
+ return {
478
+ "success": False,
479
+ "key": full_key,
480
+ "error": str(e),
481
+ }
482
+
483
+ async def _delete(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
484
+ """Delete a value from cache."""
485
+ key = kwargs["key"]
486
+ namespace = kwargs.get("namespace", "")
487
+ backend = CacheBackend(kwargs.get("backend", "memory"))
488
+
489
+ full_key = self._build_key(key, namespace)
490
+
491
+ try:
492
+ deleted = False
493
+
494
+ if backend == CacheBackend.REDIS and self._redis_client:
495
+ deleted = bool(await self._redis_client.delete(full_key))
496
+ elif backend == CacheBackend.HYBRID:
497
+ # Delete from both
498
+ mem_deleted = full_key in self._memory_cache
499
+ if mem_deleted:
500
+ del self._memory_cache[full_key]
501
+ del self._access_times[full_key]
502
+ self._access_counts.pop(full_key, None)
503
+
504
+ redis_deleted = False
505
+ if self._redis_client:
506
+ redis_deleted = bool(await self._redis_client.delete(full_key))
507
+
508
+ deleted = mem_deleted or redis_deleted
509
+ else:
510
+ if full_key in self._memory_cache:
511
+ del self._memory_cache[full_key]
512
+ del self._access_times[full_key]
513
+ self._access_counts.pop(full_key, None)
514
+ deleted = True
515
+
516
+ if deleted:
517
+ self._cache_stats["deletes"] += 1
518
+
519
+ return {
520
+ "success": True,
521
+ "deleted": deleted,
522
+ "key": full_key,
523
+ }
524
+
525
+ except Exception as e:
526
+ self.logger.error(f"Cache delete failed for key '{full_key}': {str(e)}")
527
+ return {
528
+ "success": False,
529
+ "deleted": False,
530
+ "key": full_key,
531
+ "error": str(e),
532
+ }
533
+
534
+ async def _exists(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
535
+ """Check if a key exists in cache."""
536
+ key = kwargs["key"]
537
+ namespace = kwargs.get("namespace", "")
538
+ backend = CacheBackend(kwargs.get("backend", "memory"))
539
+
540
+ full_key = self._build_key(key, namespace)
541
+
542
+ try:
543
+ exists = False
544
+
545
+ if backend == CacheBackend.REDIS and self._redis_client:
546
+ exists = bool(await self._redis_client.exists(full_key))
547
+ elif backend == CacheBackend.HYBRID:
548
+ exists = full_key in self._memory_cache or (
549
+ self._redis_client
550
+ and bool(await self._redis_client.exists(full_key))
551
+ )
552
+ else:
553
+ exists = full_key in self._memory_cache
554
+
555
+ return {
556
+ "success": True,
557
+ "exists": exists,
558
+ "key": full_key,
559
+ }
560
+
561
+ except Exception as e:
562
+ self.logger.error(
563
+ f"Cache exists check failed for key '{full_key}': {str(e)}"
564
+ )
565
+ return {
566
+ "success": False,
567
+ "exists": False,
568
+ "key": full_key,
569
+ "error": str(e),
570
+ }
571
+
572
+ async def _clear(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
573
+ """Clear all cache entries."""
574
+ backend = CacheBackend(kwargs.get("backend", "memory"))
575
+ namespace = kwargs.get("namespace", "")
576
+
577
+ try:
578
+ cleared_count = 0
579
+
580
+ if backend == CacheBackend.REDIS and self._redis_client:
581
+ if namespace:
582
+ # Clear only namespaced keys
583
+ pattern = f"{namespace}:*"
584
+ keys = await self._redis_client.keys(pattern)
585
+ if keys:
586
+ cleared_count = await self._redis_client.delete(*keys)
587
+ else:
588
+ # Clear all
589
+ await self._redis_client.flushdb()
590
+ cleared_count = -1 # Unknown count
591
+
592
+ elif backend == CacheBackend.HYBRID:
593
+ # Clear memory
594
+ if namespace:
595
+ mem_keys = [
596
+ k
597
+ for k in self._memory_cache.keys()
598
+ if k.startswith(f"{namespace}:")
599
+ ]
600
+ for k in mem_keys:
601
+ del self._memory_cache[k]
602
+ del self._access_times[k]
603
+ self._access_counts.pop(k, None)
604
+ cleared_count += len(mem_keys)
605
+ else:
606
+ cleared_count += len(self._memory_cache)
607
+ self._memory_cache.clear()
608
+ self._access_times.clear()
609
+ self._access_counts.clear()
610
+
611
+ # Clear Redis
612
+ if self._redis_client:
613
+ if namespace:
614
+ pattern = f"{namespace}:*"
615
+ keys = await self._redis_client.keys(pattern)
616
+ if keys:
617
+ cleared_count += await self._redis_client.delete(*keys)
618
+ else:
619
+ await self._redis_client.flushdb()
620
+
621
+ else:
622
+ # Memory cache
623
+ if namespace:
624
+ mem_keys = [
625
+ k
626
+ for k in self._memory_cache.keys()
627
+ if k.startswith(f"{namespace}:")
628
+ ]
629
+ for k in mem_keys:
630
+ del self._memory_cache[k]
631
+ del self._access_times[k]
632
+ self._access_counts.pop(k, None)
633
+ cleared_count = len(mem_keys)
634
+ else:
635
+ cleared_count = len(self._memory_cache)
636
+ self._memory_cache.clear()
637
+ self._access_times.clear()
638
+ self._access_counts.clear()
639
+
640
+ return {
641
+ "success": True,
642
+ "cleared_count": cleared_count,
643
+ }
644
+
645
+ except Exception as e:
646
+ self.logger.error(f"Cache clear failed: {str(e)}")
647
+ return {
648
+ "success": False,
649
+ "cleared_count": 0,
650
+ "error": str(e),
651
+ }
652
+
653
+ async def _get_stats(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
654
+ """Get cache statistics."""
655
+ backend = CacheBackend(kwargs.get("backend", "memory"))
656
+
657
+ try:
658
+ stats = dict(self._cache_stats)
659
+
660
+ if backend == CacheBackend.MEMORY or backend == CacheBackend.HYBRID:
661
+ stats["memory_items"] = len(self._memory_cache)
662
+ stats["memory_size_bytes"] = sum(
663
+ len(str(v).encode()) for v in self._memory_cache.values()
664
+ )
665
+
666
+ if (
667
+ backend == CacheBackend.REDIS or backend == CacheBackend.HYBRID
668
+ ) and self._redis_client:
669
+ redis_info = await self._redis_client.info()
670
+ stats["redis_connected"] = True
671
+ stats["redis_memory"] = redis_info.get("used_memory", 0)
672
+ stats["redis_keys"] = redis_info.get("db0", {}).get("keys", 0)
673
+ else:
674
+ stats["redis_connected"] = False
675
+
676
+ # Calculate hit rate
677
+ total_reads = stats["hits"] + stats["misses"]
678
+ stats["hit_rate"] = stats["hits"] / total_reads if total_reads > 0 else 0
679
+
680
+ return {
681
+ "success": True,
682
+ "stats": stats,
683
+ }
684
+
685
+ except Exception as e:
686
+ self.logger.error(f"Failed to get cache stats: {str(e)}")
687
+ return {
688
+ "success": False,
689
+ "stats": {},
690
+ "error": str(e),
691
+ }
692
+
693
+ async def _get_pattern(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
694
+ """Get values matching a pattern."""
695
+ pattern = kwargs["pattern"]
696
+ namespace = kwargs.get("namespace", "")
697
+ backend = CacheBackend(kwargs.get("backend", "memory"))
698
+ limit = kwargs.get("limit", 1000)
699
+
700
+ if namespace:
701
+ pattern = f"{namespace}:{pattern}"
702
+
703
+ try:
704
+ values = {}
705
+
706
+ if backend == CacheBackend.REDIS and self._redis_client:
707
+ keys = await self._redis_client.keys(pattern)
708
+ if keys:
709
+ keys = keys[:limit] # Limit results
710
+ raw_values = await self._redis_client.mget(keys)
711
+ for key, raw_value in zip(keys, raw_values):
712
+ if raw_value:
713
+ try:
714
+ values[
715
+ key.decode() if isinstance(key, bytes) else key
716
+ ] = json.loads(raw_value)
717
+ except json.JSONDecodeError:
718
+ values[
719
+ key.decode() if isinstance(key, bytes) else key
720
+ ] = (
721
+ raw_value.decode()
722
+ if isinstance(raw_value, bytes)
723
+ else raw_value
724
+ )
725
+
726
+ elif backend == CacheBackend.HYBRID:
727
+ # Get from memory
728
+ import fnmatch
729
+
730
+ mem_keys = [
731
+ k for k in self._memory_cache.keys() if fnmatch.fnmatch(k, pattern)
732
+ ]
733
+ for key in mem_keys[:limit]:
734
+ values[key] = self._memory_cache[key]
735
+
736
+ # Get from Redis if not enough results
737
+ if len(values) < limit and self._redis_client:
738
+ remaining_limit = limit - len(values)
739
+ redis_keys = await self._redis_client.keys(pattern)
740
+ redis_keys = [k for k in redis_keys if k not in values][
741
+ :remaining_limit
742
+ ]
743
+ if redis_keys:
744
+ raw_values = await self._redis_client.mget(redis_keys)
745
+ for key, raw_value in zip(redis_keys, raw_values):
746
+ if raw_value:
747
+ try:
748
+ values[
749
+ key.decode() if isinstance(key, bytes) else key
750
+ ] = json.loads(raw_value)
751
+ except json.JSONDecodeError:
752
+ values[
753
+ key.decode() if isinstance(key, bytes) else key
754
+ ] = (
755
+ raw_value.decode()
756
+ if isinstance(raw_value, bytes)
757
+ else raw_value
758
+ )
759
+
760
+ else:
761
+ # Memory cache with fnmatch
762
+ import fnmatch
763
+
764
+ mem_keys = [
765
+ k for k in self._memory_cache.keys() if fnmatch.fnmatch(k, pattern)
766
+ ]
767
+ for key in mem_keys[:limit]:
768
+ values[key] = self._memory_cache[key]
769
+
770
+ return {
771
+ "success": True,
772
+ "values": values,
773
+ "count": len(values),
774
+ "pattern": pattern,
775
+ }
776
+
777
+ except Exception as e:
778
+ self.logger.error(f"Pattern get failed for pattern '{pattern}': {str(e)}")
779
+ return {
780
+ "success": False,
781
+ "values": {},
782
+ "count": 0,
783
+ "pattern": pattern,
784
+ "error": str(e),
785
+ }
786
+
787
+ async def _mget(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
788
+ """Get multiple values by keys."""
789
+ keys = kwargs["keys"]
790
+ namespace = kwargs.get("namespace", "")
791
+ backend = CacheBackend(kwargs.get("backend", "memory"))
792
+
793
+ full_keys = [self._build_key(key, namespace) for key in keys]
794
+
795
+ try:
796
+ values = {}
797
+ hits = 0
798
+
799
+ if backend == CacheBackend.REDIS and self._redis_client:
800
+ raw_values = await self._redis_client.mget(full_keys)
801
+ for key, raw_value in zip(full_keys, raw_values):
802
+ if raw_value:
803
+ try:
804
+ values[key] = json.loads(raw_value)
805
+ hits += 1
806
+ except json.JSONDecodeError:
807
+ values[key] = (
808
+ raw_value.decode()
809
+ if isinstance(raw_value, bytes)
810
+ else raw_value
811
+ )
812
+ hits += 1
813
+
814
+ elif backend == CacheBackend.HYBRID:
815
+ # Try memory first
816
+ for key in full_keys:
817
+ if key in self._memory_cache:
818
+ values[key] = self._memory_cache[key]
819
+ hits += 1
820
+
821
+ # Get missing keys from Redis
822
+ missing_keys = [k for k in full_keys if k not in values]
823
+ if missing_keys and self._redis_client:
824
+ raw_values = await self._redis_client.mget(missing_keys)
825
+ for key, raw_value in zip(missing_keys, raw_values):
826
+ if raw_value:
827
+ try:
828
+ value = json.loads(raw_value)
829
+ values[key] = value
830
+ # Cache in memory
831
+ self._memory_cache[key] = value
832
+ self._access_times[key] = time.time()
833
+ hits += 1
834
+ except json.JSONDecodeError:
835
+ value = (
836
+ raw_value.decode()
837
+ if isinstance(raw_value, bytes)
838
+ else raw_value
839
+ )
840
+ values[key] = value
841
+ self._memory_cache[key] = value
842
+ self._access_times[key] = time.time()
843
+ hits += 1
844
+
845
+ else:
846
+ # Memory cache
847
+ for key in full_keys:
848
+ if key in self._memory_cache:
849
+ values[key] = self._memory_cache[key]
850
+ hits += 1
851
+
852
+ self._cache_stats["hits"] += hits
853
+ self._cache_stats["misses"] += len(full_keys) - hits
854
+
855
+ return {
856
+ "success": True,
857
+ "values": values,
858
+ "hits": hits,
859
+ "total_keys": len(full_keys),
860
+ }
861
+
862
+ except Exception as e:
863
+ self.logger.error(f"Batch get failed: {str(e)}")
864
+ return {
865
+ "success": False,
866
+ "values": {},
867
+ "hits": 0,
868
+ "total_keys": len(full_keys),
869
+ "error": str(e),
870
+ }
871
+
872
+ async def _mset(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
873
+ """Set multiple key-value pairs."""
874
+ values_dict = kwargs.get("values", {})
875
+ namespace = kwargs.get("namespace", "")
876
+ backend = CacheBackend(kwargs.get("backend", "memory"))
877
+ ttl = kwargs.get("ttl", 3600)
878
+
879
+ if not values_dict:
880
+ return {"success": False, "error": "No values provided"}
881
+
882
+ full_values = {
883
+ self._build_key(key, namespace): value for key, value in values_dict.items()
884
+ }
885
+
886
+ try:
887
+ success_count = 0
888
+
889
+ if backend == CacheBackend.REDIS and self._redis_client:
890
+ # Use Redis pipeline for efficiency
891
+ pipe = self._redis_client.pipeline()
892
+ for key, value in full_values.items():
893
+ serialized = (
894
+ json.dumps(value) if not isinstance(value, str) else value
895
+ )
896
+ if ttl > 0:
897
+ pipe.setex(key, ttl, serialized)
898
+ else:
899
+ pipe.set(key, serialized)
900
+
901
+ results = await pipe.execute()
902
+ success_count = sum(1 for r in results if r)
903
+
904
+ elif backend == CacheBackend.HYBRID:
905
+ # Set in memory
906
+ for key, value in full_values.items():
907
+ self._memory_cache[key] = value
908
+ self._access_times[key] = time.time()
909
+ success_count += 1
910
+
911
+ # Set in Redis
912
+ if self._redis_client:
913
+ pipe = self._redis_client.pipeline()
914
+ for key, value in full_values.items():
915
+ serialized = (
916
+ json.dumps(value) if not isinstance(value, str) else value
917
+ )
918
+ if ttl > 0:
919
+ pipe.setex(key, ttl, serialized)
920
+ else:
921
+ pipe.set(key, serialized)
922
+ await pipe.execute()
923
+
924
+ else:
925
+ # Memory cache
926
+ for key, value in full_values.items():
927
+ self._memory_cache[key] = value
928
+ self._access_times[key] = time.time()
929
+ success_count += 1
930
+
931
+ self._cache_stats["sets"] += success_count
932
+
933
+ return {
934
+ "success": True,
935
+ "set_count": success_count,
936
+ "total_keys": len(full_values),
937
+ }
938
+
939
+ except Exception as e:
940
+ self.logger.error(f"Batch set failed: {str(e)}")
941
+ return {
942
+ "success": False,
943
+ "set_count": 0,
944
+ "total_keys": len(full_values),
945
+ "error": str(e),
946
+ }
947
+
948
+ async def _memory_get(
949
+ self, key: str, serialization: SerializationFormat, kwargs: Dict[str, Any]
950
+ ) -> tuple[Any, bool]:
951
+ """Get value from memory cache."""
952
+ if key not in self._memory_cache:
953
+ return None, False
954
+
955
+ # Check TTL if stored with timestamp
956
+ value_data = self._memory_cache[key]
957
+ if isinstance(value_data, dict) and "_cache_timestamp" in value_data:
958
+ timestamp = value_data["_cache_timestamp"]
959
+ ttl = value_data.get("_cache_ttl", 0)
960
+ if ttl > 0 and time.time() - timestamp > ttl:
961
+ # Expired
962
+ del self._memory_cache[key]
963
+ del self._access_times[key]
964
+ self._access_counts.pop(key, None)
965
+ return None, False
966
+ value = value_data["_cache_value"]
967
+ else:
968
+ value = value_data
969
+
970
+ # Update access statistics
971
+ self._access_times[key] = time.time()
972
+ self._access_counts[key] = self._access_counts.get(key, 0) + 1
973
+
974
+ # Handle decompression if needed
975
+ if kwargs.get("compression", False) and isinstance(value, bytes):
976
+ try:
977
+ value = gzip.decompress(value)
978
+ if serialization == SerializationFormat.JSON:
979
+ value = json.loads(value.decode())
980
+ elif serialization == SerializationFormat.PICKLE:
981
+ value = pickle.loads(value)
982
+ except Exception:
983
+ pass # Use value as-is if decompression fails
984
+
985
+ return value, True
986
+
987
+ async def _memory_set(self, key: str, value: Any, kwargs: Dict[str, Any]) -> bool:
988
+ """Set value in memory cache."""
989
+ try:
990
+ ttl = kwargs.get("ttl", 0)
991
+ compression = kwargs.get("compression", False)
992
+ compression_threshold = kwargs.get("compression_threshold", 1024)
993
+ max_items = kwargs.get("max_memory_items", 10000)
994
+
995
+ # Handle eviction if needed
996
+ if len(self._memory_cache) >= max_items:
997
+ await self._evict_memory_items(kwargs)
998
+
999
+ # Prepare value for storage
1000
+ stored_value = value
1001
+ compressed = False
1002
+
1003
+ # Apply compression if needed
1004
+ if compression:
1005
+ serialized = (
1006
+ json.dumps(value) if not isinstance(value, (str, bytes)) else value
1007
+ )
1008
+ if isinstance(serialized, str):
1009
+ serialized = serialized.encode()
1010
+
1011
+ if len(serialized) >= compression_threshold:
1012
+ stored_value = gzip.compress(serialized)
1013
+ compressed = True
1014
+
1015
+ # Store with TTL if specified
1016
+ if ttl > 0:
1017
+ self._memory_cache[key] = {
1018
+ "_cache_value": stored_value,
1019
+ "_cache_timestamp": time.time(),
1020
+ "_cache_ttl": ttl,
1021
+ "_cache_compressed": compressed,
1022
+ }
1023
+ else:
1024
+ self._memory_cache[key] = stored_value
1025
+
1026
+ self._access_times[key] = time.time()
1027
+ return True
1028
+
1029
+ except Exception as e:
1030
+ self.logger.error(f"Memory cache set failed: {str(e)}")
1031
+ return False
1032
+
1033
+ async def _redis_get(
1034
+ self, key: str, serialization: SerializationFormat, kwargs: Dict[str, Any]
1035
+ ) -> tuple[Any, bool]:
1036
+ """Get value from Redis cache."""
1037
+ try:
1038
+ raw_value = await self._redis_client.get(key)
1039
+ if raw_value is None:
1040
+ return None, False
1041
+
1042
+ # Handle decompression
1043
+ if kwargs.get("compression", False):
1044
+ try:
1045
+ raw_value = gzip.decompress(raw_value)
1046
+ except Exception:
1047
+ pass # Not compressed or failed decompression
1048
+
1049
+ # Deserialize based on format
1050
+ if serialization == SerializationFormat.JSON:
1051
+ value = json.loads(raw_value)
1052
+ elif serialization == SerializationFormat.PICKLE:
1053
+ value = pickle.loads(raw_value)
1054
+ elif serialization == SerializationFormat.STRING:
1055
+ value = (
1056
+ raw_value.decode() if isinstance(raw_value, bytes) else raw_value
1057
+ )
1058
+ else: # BYTES
1059
+ value = raw_value
1060
+
1061
+ return value, True
1062
+
1063
+ except Exception as e:
1064
+ self.logger.error(f"Redis get failed: {str(e)}")
1065
+ return None, False
1066
+
1067
+ async def _redis_set(
1068
+ self, key: str, value: Any, ttl: int, kwargs: Dict[str, Any]
1069
+ ) -> bool:
1070
+ """Set value in Redis cache."""
1071
+ try:
1072
+ serialization = SerializationFormat(kwargs.get("serialization", "json"))
1073
+ compression = kwargs.get("compression", False)
1074
+ compression_threshold = kwargs.get("compression_threshold", 1024)
1075
+
1076
+ # Serialize value
1077
+ if serialization == SerializationFormat.JSON:
1078
+ serialized = json.dumps(value)
1079
+ elif serialization == SerializationFormat.PICKLE:
1080
+ serialized = pickle.dumps(value)
1081
+ elif serialization == SerializationFormat.STRING:
1082
+ serialized = str(value)
1083
+ else: # BYTES
1084
+ serialized = value if isinstance(value, bytes) else str(value).encode()
1085
+
1086
+ # Apply compression if needed
1087
+ if compression and len(serialized) >= compression_threshold:
1088
+ if isinstance(serialized, str):
1089
+ serialized = serialized.encode()
1090
+ serialized = gzip.compress(serialized)
1091
+
1092
+ # Store in Redis
1093
+ if ttl > 0:
1094
+ await self._redis_client.setex(key, ttl, serialized)
1095
+ else:
1096
+ await self._redis_client.set(key, serialized)
1097
+
1098
+ return True
1099
+
1100
+ except Exception as e:
1101
+ self.logger.error(f"Redis set failed: {str(e)}")
1102
+ return False
1103
+
1104
+ async def _evict_memory_items(self, kwargs: Dict[str, Any]):
1105
+ """Evict items from memory cache based on policy."""
1106
+ eviction_policy = EvictionPolicy(kwargs.get("eviction_policy", "lru"))
1107
+ max_items = kwargs.get("max_memory_items", 10000)
1108
+
1109
+ # Remove 10% of items to make room
1110
+ evict_count = max(1, len(self._memory_cache) // 10)
1111
+
1112
+ if eviction_policy == EvictionPolicy.LRU:
1113
+ # Remove least recently used
1114
+ sorted_by_access = sorted(self._access_times.items(), key=lambda x: x[1])
1115
+ for key, _ in sorted_by_access[:evict_count]:
1116
+ del self._memory_cache[key]
1117
+ del self._access_times[key]
1118
+ self._access_counts.pop(key, None)
1119
+ self._cache_stats["evictions"] += 1
1120
+
1121
+ elif eviction_policy == EvictionPolicy.LFU:
1122
+ # Remove least frequently used
1123
+ sorted_by_frequency = sorted(
1124
+ self._access_counts.items(), key=lambda x: x[1]
1125
+ )
1126
+ for key, _ in sorted_by_frequency[:evict_count]:
1127
+ del self._memory_cache[key]
1128
+ del self._access_times[key]
1129
+ del self._access_counts[key]
1130
+ self._cache_stats["evictions"] += 1
1131
+
1132
+ elif eviction_policy == EvictionPolicy.TTL:
1133
+ # Remove expired items first
1134
+ now = time.time()
1135
+ expired_keys = []
1136
+ for key, value_data in self._memory_cache.items():
1137
+ if isinstance(value_data, dict) and "_cache_timestamp" in value_data:
1138
+ timestamp = value_data["_cache_timestamp"]
1139
+ ttl = value_data.get("_cache_ttl", 0)
1140
+ if ttl > 0 and now - timestamp > ttl:
1141
+ expired_keys.append(key)
1142
+
1143
+ for key in expired_keys:
1144
+ del self._memory_cache[key]
1145
+ del self._access_times[key]
1146
+ self._access_counts.pop(key, None)
1147
+ self._cache_stats["evictions"] += 1
1148
+
1149
+ elif eviction_policy == EvictionPolicy.FIFO:
1150
+ # Remove oldest inserted items
1151
+ sorted_keys = list(self._memory_cache.keys())[:evict_count]
1152
+ for key in sorted_keys:
1153
+ del self._memory_cache[key]
1154
+ del self._access_times[key]
1155
+ self._access_counts.pop(key, None)
1156
+ self._cache_stats["evictions"] += 1
1157
+
1158
+ def run(self, **kwargs) -> Dict[str, Any]:
1159
+ """Synchronous wrapper for compatibility."""
1160
+ try:
1161
+ # Try to get current event loop
1162
+ loop = asyncio.get_running_loop()
1163
+ except RuntimeError:
1164
+ # No event loop running, safe to use asyncio.run()
1165
+ return asyncio.run(self.async_run(**kwargs))
1166
+ else:
1167
+ # Event loop is running, create a task
1168
+ import concurrent.futures
1169
+
1170
+ with concurrent.futures.ThreadPoolExecutor() as executor:
1171
+ future = executor.submit(asyncio.run, self.async_run(**kwargs))
1172
+ return future.result()