kailash 0.4.2__py3-none-any.whl → 0.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. kailash/__init__.py +1 -1
  2. kailash/client/__init__.py +12 -0
  3. kailash/client/enhanced_client.py +306 -0
  4. kailash/core/actors/__init__.py +16 -0
  5. kailash/core/actors/connection_actor.py +566 -0
  6. kailash/core/actors/supervisor.py +364 -0
  7. kailash/edge/__init__.py +16 -0
  8. kailash/edge/compliance.py +834 -0
  9. kailash/edge/discovery.py +659 -0
  10. kailash/edge/location.py +582 -0
  11. kailash/gateway/__init__.py +33 -0
  12. kailash/gateway/api.py +289 -0
  13. kailash/gateway/enhanced_gateway.py +357 -0
  14. kailash/gateway/resource_resolver.py +217 -0
  15. kailash/gateway/security.py +227 -0
  16. kailash/middleware/auth/models.py +2 -2
  17. kailash/middleware/database/base_models.py +1 -7
  18. kailash/middleware/database/repositories.py +3 -1
  19. kailash/middleware/gateway/__init__.py +22 -0
  20. kailash/middleware/gateway/checkpoint_manager.py +398 -0
  21. kailash/middleware/gateway/deduplicator.py +382 -0
  22. kailash/middleware/gateway/durable_gateway.py +417 -0
  23. kailash/middleware/gateway/durable_request.py +498 -0
  24. kailash/middleware/gateway/event_store.py +459 -0
  25. kailash/nodes/admin/audit_log.py +364 -6
  26. kailash/nodes/admin/permission_check.py +817 -33
  27. kailash/nodes/admin/role_management.py +1242 -108
  28. kailash/nodes/admin/schema_manager.py +438 -0
  29. kailash/nodes/admin/user_management.py +1209 -681
  30. kailash/nodes/api/http.py +95 -71
  31. kailash/nodes/base.py +281 -164
  32. kailash/nodes/base_async.py +30 -31
  33. kailash/nodes/code/__init__.py +8 -1
  34. kailash/nodes/code/async_python.py +1035 -0
  35. kailash/nodes/code/python.py +1 -0
  36. kailash/nodes/data/async_sql.py +12 -25
  37. kailash/nodes/data/sql.py +20 -11
  38. kailash/nodes/data/workflow_connection_pool.py +643 -0
  39. kailash/nodes/rag/__init__.py +1 -4
  40. kailash/resources/__init__.py +40 -0
  41. kailash/resources/factory.py +533 -0
  42. kailash/resources/health.py +319 -0
  43. kailash/resources/reference.py +288 -0
  44. kailash/resources/registry.py +392 -0
  45. kailash/runtime/async_local.py +711 -302
  46. kailash/testing/__init__.py +34 -0
  47. kailash/testing/async_test_case.py +353 -0
  48. kailash/testing/async_utils.py +345 -0
  49. kailash/testing/fixtures.py +458 -0
  50. kailash/testing/mock_registry.py +495 -0
  51. kailash/utils/resource_manager.py +420 -0
  52. kailash/workflow/__init__.py +8 -0
  53. kailash/workflow/async_builder.py +621 -0
  54. kailash/workflow/async_patterns.py +766 -0
  55. kailash/workflow/builder.py +93 -10
  56. kailash/workflow/cyclic_runner.py +111 -41
  57. kailash/workflow/graph.py +7 -2
  58. kailash/workflow/resilience.py +11 -1
  59. {kailash-0.4.2.dist-info → kailash-0.6.0.dist-info}/METADATA +12 -7
  60. {kailash-0.4.2.dist-info → kailash-0.6.0.dist-info}/RECORD +64 -28
  61. {kailash-0.4.2.dist-info → kailash-0.6.0.dist-info}/WHEEL +0 -0
  62. {kailash-0.4.2.dist-info → kailash-0.6.0.dist-info}/entry_points.txt +0 -0
  63. {kailash-0.4.2.dist-info → kailash-0.6.0.dist-info}/licenses/LICENSE +0 -0
  64. {kailash-0.4.2.dist-info → kailash-0.6.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,420 @@
1
+ """Resource management utilities for the Kailash SDK.
2
+
3
+ This module provides context managers and utilities for efficient resource
4
+ management across the SDK, ensuring proper cleanup and preventing memory leaks.
5
+ """
6
+
7
+ import asyncio
8
+ import logging
9
+ import threading
10
+ import weakref
11
+ from collections import defaultdict
12
+ from contextlib import asynccontextmanager, contextmanager
13
+ from datetime import UTC, datetime
14
+ from typing import Any, Callable, Dict, Generic, Optional, Set, TypeVar
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+ T = TypeVar("T")
19
+
20
+
21
+ class ResourcePool(Generic[T]):
22
+ """Generic resource pool for connection pooling and resource reuse.
23
+
24
+ This class provides a thread-safe pool for managing expensive resources
25
+ like database connections, HTTP clients, etc.
26
+ """
27
+
28
+ def __init__(
29
+ self,
30
+ factory: Callable[[], T],
31
+ max_size: int = 10,
32
+ timeout: float = 30.0,
33
+ cleanup: Optional[Callable[[T], None]] = None,
34
+ ):
35
+ """Initialize the resource pool.
36
+
37
+ Args:
38
+ factory: Function to create new resources
39
+ max_size: Maximum pool size
40
+ timeout: Timeout for acquiring resources
41
+ cleanup: Optional cleanup function for resources
42
+ """
43
+ self._factory = factory
44
+ self._max_size = max_size
45
+ self._timeout = timeout
46
+ self._cleanup = cleanup
47
+
48
+ self._pool: list[T] = []
49
+ self._in_use: Set[T] = set()
50
+ self._lock = threading.Lock()
51
+ self._semaphore = threading.Semaphore(max_size)
52
+ self._created_count = 0
53
+
54
+ @contextmanager
55
+ def acquire(self):
56
+ """Acquire a resource from the pool.
57
+
58
+ Yields:
59
+ Resource instance
60
+
61
+ Raises:
62
+ TimeoutError: If resource cannot be acquired within timeout
63
+ """
64
+ if not self._semaphore.acquire(timeout=self._timeout):
65
+ raise TimeoutError(f"Failed to acquire resource within {self._timeout}s")
66
+
67
+ resource = None
68
+ try:
69
+ with self._lock:
70
+ # Try to get from pool
71
+ if self._pool:
72
+ resource = self._pool.pop()
73
+ else:
74
+ # Create new resource if under limit
75
+ if self._created_count < self._max_size:
76
+ resource = self._factory()
77
+ self._created_count += 1
78
+ else:
79
+ raise RuntimeError("Pool exhausted")
80
+
81
+ self._in_use.add(resource)
82
+
83
+ yield resource
84
+
85
+ finally:
86
+ if resource is not None:
87
+ with self._lock:
88
+ self._in_use.discard(resource)
89
+ self._pool.append(resource)
90
+ self._semaphore.release()
91
+
92
+ def cleanup_all(self):
93
+ """Clean up all resources in the pool."""
94
+ with self._lock:
95
+ # Clean up pooled resources
96
+ for resource in self._pool:
97
+ if self._cleanup:
98
+ try:
99
+ self._cleanup(resource)
100
+ except Exception as e:
101
+ logger.error(f"Error cleaning up resource: {e}")
102
+
103
+ # Clean up in-use resources (best effort)
104
+ for resource in self._in_use:
105
+ if self._cleanup:
106
+ try:
107
+ self._cleanup(resource)
108
+ except Exception as e:
109
+ logger.error(f"Error cleaning up in-use resource: {e}")
110
+
111
+ self._pool.clear()
112
+ self._in_use.clear()
113
+ self._created_count = 0
114
+
115
+
116
+ class AsyncResourcePool(Generic[T]):
117
+ """Async version of ResourcePool for async resources."""
118
+
119
+ def __init__(
120
+ self,
121
+ factory: Callable[[], T],
122
+ max_size: int = 10,
123
+ timeout: float = 30.0,
124
+ cleanup: Optional[Callable[[T], Any]] = None,
125
+ ):
126
+ """Initialize the async resource pool.
127
+
128
+ Args:
129
+ factory: Async function to create new resources
130
+ max_size: Maximum pool size
131
+ timeout: Timeout for acquiring resources
132
+ cleanup: Optional async cleanup function
133
+ """
134
+ self._factory = factory
135
+ self._max_size = max_size
136
+ self._timeout = timeout
137
+ self._cleanup = cleanup
138
+
139
+ self._pool: list[T] = []
140
+ self._in_use: Set[T] = set()
141
+ self._lock = asyncio.Lock()
142
+ self._semaphore = asyncio.Semaphore(max_size)
143
+ self._created_count = 0
144
+
145
+ @asynccontextmanager
146
+ async def acquire(self):
147
+ """Acquire a resource from the pool asynchronously.
148
+
149
+ Yields:
150
+ Resource instance
151
+
152
+ Raises:
153
+ TimeoutError: If resource cannot be acquired within timeout
154
+ """
155
+ try:
156
+ await asyncio.wait_for(self._semaphore.acquire(), timeout=self._timeout)
157
+ except asyncio.TimeoutError:
158
+ raise TimeoutError(f"Failed to acquire resource within {self._timeout}s")
159
+
160
+ resource = None
161
+ try:
162
+ async with self._lock:
163
+ # Try to get from pool
164
+ if self._pool:
165
+ resource = self._pool.pop()
166
+ else:
167
+ # Create new resource if under limit
168
+ if self._created_count < self._max_size:
169
+ if asyncio.iscoroutinefunction(self._factory):
170
+ resource = await self._factory()
171
+ else:
172
+ resource = self._factory()
173
+ self._created_count += 1
174
+ else:
175
+ raise RuntimeError("Pool exhausted")
176
+
177
+ self._in_use.add(resource)
178
+
179
+ yield resource
180
+
181
+ finally:
182
+ if resource is not None:
183
+ async with self._lock:
184
+ self._in_use.discard(resource)
185
+ self._pool.append(resource)
186
+ self._semaphore.release()
187
+
188
+ async def cleanup_all(self):
189
+ """Clean up all resources in the pool asynchronously."""
190
+ async with self._lock:
191
+ # Clean up pooled resources
192
+ for resource in self._pool:
193
+ if self._cleanup:
194
+ try:
195
+ if asyncio.iscoroutinefunction(self._cleanup):
196
+ await self._cleanup(resource)
197
+ else:
198
+ self._cleanup(resource)
199
+ except Exception as e:
200
+ logger.error(f"Error cleaning up resource: {e}")
201
+
202
+ # Clean up in-use resources (best effort)
203
+ for resource in self._in_use:
204
+ if self._cleanup:
205
+ try:
206
+ if asyncio.iscoroutinefunction(self._cleanup):
207
+ await self._cleanup(resource)
208
+ else:
209
+ self._cleanup(resource)
210
+ except Exception as e:
211
+ logger.error(f"Error cleaning up in-use resource: {e}")
212
+
213
+ self._pool.clear()
214
+ self._in_use.clear()
215
+ self._created_count = 0
216
+
217
+
218
+ class ResourceTracker:
219
+ """Track and manage resources across the SDK to prevent leaks."""
220
+
221
+ def __init__(self):
222
+ self._resources: Dict[str, weakref.WeakSet] = defaultdict(weakref.WeakSet)
223
+ self._metrics: Dict[str, Dict[str, Any]] = defaultdict(dict)
224
+ self._lock = threading.Lock()
225
+
226
+ def register(self, resource_type: str, resource: Any):
227
+ """Register a resource for tracking.
228
+
229
+ Args:
230
+ resource_type: Type/category of resource
231
+ resource: Resource instance to track
232
+ """
233
+ with self._lock:
234
+ self._resources[resource_type].add(resource)
235
+
236
+ # Update metrics
237
+ if resource_type not in self._metrics:
238
+ self._metrics[resource_type] = {
239
+ "created": 0,
240
+ "active": 0,
241
+ "peak": 0,
242
+ "last_created": None,
243
+ }
244
+
245
+ self._metrics[resource_type]["created"] += 1
246
+ self._metrics[resource_type]["active"] = len(self._resources[resource_type])
247
+ self._metrics[resource_type]["peak"] = max(
248
+ self._metrics[resource_type]["peak"],
249
+ self._metrics[resource_type]["active"],
250
+ )
251
+ self._metrics[resource_type]["last_created"] = datetime.now(UTC)
252
+
253
+ def get_metrics(self) -> Dict[str, Dict[str, Any]]:
254
+ """Get current resource metrics.
255
+
256
+ Returns:
257
+ Dictionary of metrics by resource type
258
+ """
259
+ with self._lock:
260
+ # Update active counts
261
+ for resource_type in self._metrics:
262
+ self._metrics[resource_type]["active"] = len(
263
+ self._resources[resource_type]
264
+ )
265
+
266
+ return dict(self._metrics)
267
+
268
+ def get_active_resources(
269
+ self, resource_type: Optional[str] = None
270
+ ) -> Dict[str, int]:
271
+ """Get count of active resources.
272
+
273
+ Args:
274
+ resource_type: Optional filter by type
275
+
276
+ Returns:
277
+ Dictionary of resource type to active count
278
+ """
279
+ with self._lock:
280
+ if resource_type:
281
+ return {resource_type: len(self._resources.get(resource_type, set()))}
282
+ else:
283
+ return {
284
+ rtype: len(resources)
285
+ for rtype, resources in self._resources.items()
286
+ }
287
+
288
+
289
+ # Global resource tracker instance
290
+ _resource_tracker = ResourceTracker()
291
+
292
+
293
+ def get_resource_tracker() -> ResourceTracker:
294
+ """Get the global resource tracker instance."""
295
+ return _resource_tracker
296
+
297
+
298
+ @contextmanager
299
+ def managed_resource(
300
+ resource_type: str, resource: Any, cleanup: Optional[Callable] = None
301
+ ):
302
+ """Context manager for tracking and cleaning up resources.
303
+
304
+ Args:
305
+ resource_type: Type/category of resource
306
+ resource: Resource instance
307
+ cleanup: Optional cleanup function
308
+
309
+ Yields:
310
+ The resource instance
311
+ """
312
+ _resource_tracker.register(resource_type, resource)
313
+
314
+ try:
315
+ yield resource
316
+ finally:
317
+ if cleanup:
318
+ try:
319
+ cleanup(resource)
320
+ except Exception as e:
321
+ logger.error(f"Error cleaning up {resource_type}: {e}")
322
+
323
+
324
+ @asynccontextmanager
325
+ async def async_managed_resource(
326
+ resource_type: str, resource: Any, cleanup: Optional[Callable] = None
327
+ ):
328
+ """Async context manager for tracking and cleaning up resources.
329
+
330
+ Args:
331
+ resource_type: Type/category of resource
332
+ resource: Resource instance
333
+ cleanup: Optional async cleanup function
334
+
335
+ Yields:
336
+ The resource instance
337
+ """
338
+ _resource_tracker.register(resource_type, resource)
339
+
340
+ try:
341
+ yield resource
342
+ finally:
343
+ if cleanup:
344
+ try:
345
+ if asyncio.iscoroutinefunction(cleanup):
346
+ await cleanup(resource)
347
+ else:
348
+ cleanup(resource)
349
+ except Exception as e:
350
+ logger.error(f"Error cleaning up {resource_type}: {e}")
351
+
352
+
353
+ class ConcurrencyLimiter:
354
+ """Limit concurrent operations to prevent resource exhaustion."""
355
+
356
+ def __init__(self, max_concurrent: int = 10):
357
+ """Initialize the concurrency limiter.
358
+
359
+ Args:
360
+ max_concurrent: Maximum concurrent operations
361
+ """
362
+ self._semaphore = threading.Semaphore(max_concurrent)
363
+ self._active = 0
364
+ self._peak = 0
365
+ self._lock = threading.Lock()
366
+
367
+ @contextmanager
368
+ def limit(self):
369
+ """Context manager to limit concurrency."""
370
+ self._semaphore.acquire()
371
+ with self._lock:
372
+ self._active += 1
373
+ self._peak = max(self._peak, self._active)
374
+
375
+ try:
376
+ yield
377
+ finally:
378
+ with self._lock:
379
+ self._active -= 1
380
+ self._semaphore.release()
381
+
382
+ def get_stats(self) -> Dict[str, int]:
383
+ """Get concurrency statistics."""
384
+ with self._lock:
385
+ return {"active": self._active, "peak": self._peak}
386
+
387
+
388
+ class AsyncConcurrencyLimiter:
389
+ """Async version of ConcurrencyLimiter."""
390
+
391
+ def __init__(self, max_concurrent: int = 10):
392
+ """Initialize the async concurrency limiter.
393
+
394
+ Args:
395
+ max_concurrent: Maximum concurrent operations
396
+ """
397
+ self._semaphore = asyncio.Semaphore(max_concurrent)
398
+ self._active = 0
399
+ self._peak = 0
400
+ self._lock = asyncio.Lock()
401
+
402
+ @asynccontextmanager
403
+ async def limit(self):
404
+ """Async context manager to limit concurrency."""
405
+ await self._semaphore.acquire()
406
+ async with self._lock:
407
+ self._active += 1
408
+ self._peak = max(self._peak, self._active)
409
+
410
+ try:
411
+ yield
412
+ finally:
413
+ async with self._lock:
414
+ self._active -= 1
415
+ self._semaphore.release()
416
+
417
+ async def get_stats(self) -> Dict[str, int]:
418
+ """Get concurrency statistics."""
419
+ async with self._lock:
420
+ return {"active": self._active, "peak": self._peak}
@@ -1,5 +1,8 @@
1
1
  """Workflow system for the Kailash SDK."""
2
2
 
3
+ from kailash.workflow.async_builder import AsyncWorkflowBuilder, ErrorHandler
4
+ from kailash.workflow.async_builder import RetryPolicy as AsyncRetryPolicy
5
+ from kailash.workflow.async_patterns import AsyncPatterns
3
6
  from kailash.workflow.builder import WorkflowBuilder
4
7
  from kailash.workflow.cycle_analyzer import CycleAnalyzer
5
8
  from kailash.workflow.cycle_builder import CycleBuilder
@@ -30,6 +33,11 @@ __all__ = [
30
33
  "WorkflowVisualizer",
31
34
  "MermaidVisualizer",
32
35
  "WorkflowBuilder",
36
+ "AsyncWorkflowBuilder",
37
+ "AsyncPatterns",
38
+ "RetryPolicy",
39
+ "AsyncRetryPolicy",
40
+ "ErrorHandler",
33
41
  "CycleBuilder",
34
42
  "CycleConfig",
35
43
  "CycleTemplates",