kailash 0.5.0__py3-none-any.whl → 0.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. kailash/__init__.py +1 -1
  2. kailash/access_control/__init__.py +1 -1
  3. kailash/client/__init__.py +12 -0
  4. kailash/client/enhanced_client.py +306 -0
  5. kailash/core/actors/__init__.py +16 -0
  6. kailash/core/actors/adaptive_pool_controller.py +630 -0
  7. kailash/core/actors/connection_actor.py +566 -0
  8. kailash/core/actors/supervisor.py +364 -0
  9. kailash/core/ml/__init__.py +1 -0
  10. kailash/core/ml/query_patterns.py +544 -0
  11. kailash/core/monitoring/__init__.py +19 -0
  12. kailash/core/monitoring/connection_metrics.py +488 -0
  13. kailash/core/optimization/__init__.py +1 -0
  14. kailash/core/resilience/__init__.py +17 -0
  15. kailash/core/resilience/circuit_breaker.py +382 -0
  16. kailash/edge/__init__.py +16 -0
  17. kailash/edge/compliance.py +834 -0
  18. kailash/edge/discovery.py +659 -0
  19. kailash/edge/location.py +582 -0
  20. kailash/gateway/__init__.py +33 -0
  21. kailash/gateway/api.py +289 -0
  22. kailash/gateway/enhanced_gateway.py +357 -0
  23. kailash/gateway/resource_resolver.py +217 -0
  24. kailash/gateway/security.py +227 -0
  25. kailash/middleware/auth/access_control.py +6 -6
  26. kailash/middleware/auth/models.py +2 -2
  27. kailash/middleware/communication/ai_chat.py +7 -7
  28. kailash/middleware/communication/api_gateway.py +5 -15
  29. kailash/middleware/database/base_models.py +1 -7
  30. kailash/middleware/gateway/__init__.py +22 -0
  31. kailash/middleware/gateway/checkpoint_manager.py +398 -0
  32. kailash/middleware/gateway/deduplicator.py +382 -0
  33. kailash/middleware/gateway/durable_gateway.py +417 -0
  34. kailash/middleware/gateway/durable_request.py +498 -0
  35. kailash/middleware/gateway/event_store.py +499 -0
  36. kailash/middleware/mcp/enhanced_server.py +2 -2
  37. kailash/nodes/admin/permission_check.py +817 -33
  38. kailash/nodes/admin/role_management.py +1242 -108
  39. kailash/nodes/admin/schema_manager.py +438 -0
  40. kailash/nodes/admin/user_management.py +1124 -1582
  41. kailash/nodes/code/__init__.py +8 -1
  42. kailash/nodes/code/async_python.py +1035 -0
  43. kailash/nodes/code/python.py +1 -0
  44. kailash/nodes/data/async_sql.py +9 -3
  45. kailash/nodes/data/query_pipeline.py +641 -0
  46. kailash/nodes/data/query_router.py +895 -0
  47. kailash/nodes/data/sql.py +20 -11
  48. kailash/nodes/data/workflow_connection_pool.py +1071 -0
  49. kailash/nodes/monitoring/__init__.py +3 -5
  50. kailash/nodes/monitoring/connection_dashboard.py +822 -0
  51. kailash/nodes/rag/__init__.py +2 -7
  52. kailash/resources/__init__.py +40 -0
  53. kailash/resources/factory.py +533 -0
  54. kailash/resources/health.py +319 -0
  55. kailash/resources/reference.py +288 -0
  56. kailash/resources/registry.py +392 -0
  57. kailash/runtime/async_local.py +711 -302
  58. kailash/testing/__init__.py +34 -0
  59. kailash/testing/async_test_case.py +353 -0
  60. kailash/testing/async_utils.py +345 -0
  61. kailash/testing/fixtures.py +458 -0
  62. kailash/testing/mock_registry.py +495 -0
  63. kailash/workflow/__init__.py +8 -0
  64. kailash/workflow/async_builder.py +621 -0
  65. kailash/workflow/async_patterns.py +766 -0
  66. kailash/workflow/cyclic_runner.py +107 -16
  67. kailash/workflow/graph.py +7 -2
  68. kailash/workflow/resilience.py +11 -1
  69. {kailash-0.5.0.dist-info → kailash-0.6.1.dist-info}/METADATA +19 -4
  70. {kailash-0.5.0.dist-info → kailash-0.6.1.dist-info}/RECORD +74 -28
  71. {kailash-0.5.0.dist-info → kailash-0.6.1.dist-info}/WHEEL +0 -0
  72. {kailash-0.5.0.dist-info → kailash-0.6.1.dist-info}/entry_points.txt +0 -0
  73. {kailash-0.5.0.dist-info → kailash-0.6.1.dist-info}/licenses/LICENSE +0 -0
  74. {kailash-0.5.0.dist-info → kailash-0.6.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,499 @@
1
+ """Event store for request audit trail and event sourcing.
2
+
3
+ This module provides:
4
+ - Append-only event log
5
+ - Event replay capability
6
+ - Event projections
7
+ - Audit trail for compliance
8
+ """
9
+
10
+ import asyncio
11
+ import json
12
+ import logging
13
+ import time
14
+ import uuid
15
+ from dataclasses import dataclass, field
16
+ from datetime import UTC, datetime
17
+ from enum import Enum
18
+ from typing import Any, AsyncIterator, Callable, Dict, List, Optional
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+
23
+ class EventType(Enum):
24
+ """Standard event types for request lifecycle."""
25
+
26
+ REQUEST_CREATED = "request.created"
27
+ REQUEST_VALIDATED = "request.validated"
28
+ REQUEST_STARTED = "request.started"
29
+ REQUEST_CHECKPOINTED = "request.checkpointed"
30
+ REQUEST_COMPLETED = "request.completed"
31
+ REQUEST_FAILED = "request.failed"
32
+ REQUEST_CANCELLED = "request.cancelled"
33
+ REQUEST_RESUMED = "request.resumed"
34
+ REQUEST_RETRIED = "request.retried"
35
+
36
+ WORKFLOW_CREATED = "workflow.created"
37
+ WORKFLOW_STARTED = "workflow.started"
38
+ WORKFLOW_NODE_STARTED = "workflow.node.started"
39
+ WORKFLOW_NODE_COMPLETED = "workflow.node.completed"
40
+ WORKFLOW_NODE_FAILED = "workflow.node.failed"
41
+ WORKFLOW_COMPLETED = "workflow.completed"
42
+ WORKFLOW_FAILED = "workflow.failed"
43
+
44
+ DEDUPLICATION_HIT = "deduplication.hit"
45
+ DEDUPLICATION_MISS = "deduplication.miss"
46
+
47
+ ERROR_OCCURRED = "error.occurred"
48
+ ERROR_HANDLED = "error.handled"
49
+
50
+
51
+ @dataclass
52
+ class RequestEvent:
53
+ """Immutable event in the request lifecycle."""
54
+
55
+ event_id: str = field(default_factory=lambda: f"evt_{uuid.uuid4().hex[:12]}")
56
+ event_type: EventType = EventType.REQUEST_CREATED
57
+ request_id: str = ""
58
+ timestamp: datetime = field(default_factory=lambda: datetime.now(UTC))
59
+ sequence_number: int = 0
60
+ data: Dict[str, Any] = field(default_factory=dict)
61
+ metadata: Dict[str, Any] = field(default_factory=dict)
62
+
63
+ def to_dict(self) -> Dict[str, Any]:
64
+ """Convert to dictionary for storage."""
65
+ return {
66
+ "event_id": self.event_id,
67
+ "event_type": self.event_type.value,
68
+ "request_id": self.request_id,
69
+ "timestamp": self.timestamp.isoformat(),
70
+ "sequence_number": self.sequence_number,
71
+ "data": self.data,
72
+ "metadata": self.metadata,
73
+ }
74
+
75
+ @classmethod
76
+ def from_dict(cls, data: Dict[str, Any]) -> "RequestEvent":
77
+ """Create from dictionary."""
78
+ return cls(
79
+ event_id=data["event_id"],
80
+ event_type=EventType(data["event_type"]),
81
+ request_id=data["request_id"],
82
+ timestamp=datetime.fromisoformat(data["timestamp"]),
83
+ sequence_number=data["sequence_number"],
84
+ data=data["data"],
85
+ metadata=data.get("metadata", {}),
86
+ )
87
+
88
+
89
+ class EventStore:
90
+ """Append-only event store with replay capability."""
91
+
92
+ def __init__(
93
+ self,
94
+ storage_backend: Optional[Any] = None,
95
+ batch_size: int = 100,
96
+ flush_interval_seconds: float = 1.0,
97
+ ):
98
+ """Initialize event store."""
99
+ self.storage_backend = storage_backend
100
+ self.batch_size = batch_size
101
+ self.flush_interval = flush_interval_seconds
102
+
103
+ # In-memory buffer
104
+ self._buffer: List[RequestEvent] = []
105
+ self._buffer_lock = asyncio.Lock()
106
+ self._flush_in_progress = False
107
+
108
+ # Event stream
109
+ self._event_stream: List[RequestEvent] = []
110
+ self._stream_lock = asyncio.Lock()
111
+
112
+ # Projections
113
+ self._projections: Dict[str, Any] = {}
114
+ self._projection_handlers: Dict[str, Callable] = {}
115
+
116
+ # Sequence tracking
117
+ self._sequences: Dict[str, int] = {}
118
+
119
+ # Metrics
120
+ self.event_count = 0
121
+ self.flush_count = 0
122
+
123
+ # Start flush task
124
+ try:
125
+ self._flush_task = asyncio.create_task(self._flush_loop())
126
+ except RuntimeError:
127
+ # If no event loop is running, defer task creation
128
+ self._flush_task = None
129
+
130
+ async def _ensure_flush_task(self):
131
+ """Ensure the flush task is running."""
132
+ if self._flush_task is None:
133
+ self._flush_task = asyncio.create_task(self._flush_loop())
134
+
135
+ async def append(
136
+ self,
137
+ event_type: EventType,
138
+ request_id: str,
139
+ data: Dict[str, Any],
140
+ metadata: Optional[Dict[str, Any]] = None,
141
+ ) -> RequestEvent:
142
+ """Append an event to the store."""
143
+ # Ensure flush task is running
144
+ await self._ensure_flush_task()
145
+
146
+ async with self._buffer_lock:
147
+ # Get next sequence number
148
+ sequence = self._sequences.get(request_id, 0)
149
+ self._sequences[request_id] = sequence + 1
150
+
151
+ # Create event
152
+ event = RequestEvent(
153
+ event_type=event_type,
154
+ request_id=request_id,
155
+ sequence_number=sequence,
156
+ data=data,
157
+ metadata=metadata or {},
158
+ )
159
+
160
+ # Add to buffer
161
+ self._buffer.append(event)
162
+ self.event_count += 1
163
+
164
+ # Check if we need to flush (but don't flush inside the lock)
165
+ needs_flush = len(self._buffer) >= self.batch_size
166
+
167
+ # Apply projections outside the lock
168
+ await self._apply_projections(event)
169
+
170
+ # Flush if needed (outside the lock to avoid deadlock)
171
+ if needs_flush and not self._flush_in_progress:
172
+ # Set flag to prevent concurrent flushes
173
+ self._flush_in_progress = True
174
+ try:
175
+ await self._flush_buffer()
176
+ finally:
177
+ self._flush_in_progress = False
178
+
179
+ logger.debug(
180
+ f"Appended event {event.event_type.value} for request {request_id} "
181
+ f"(seq: {sequence})"
182
+ )
183
+
184
+ return event
185
+
186
+ async def get_events(
187
+ self,
188
+ request_id: str,
189
+ start_sequence: int = 0,
190
+ end_sequence: Optional[int] = None,
191
+ event_types: Optional[List[EventType]] = None,
192
+ ) -> List[RequestEvent]:
193
+ """Get events for a request."""
194
+ # Ensure buffer is flushed
195
+ await self._flush_buffer()
196
+
197
+ events = []
198
+
199
+ # Get from in-memory stream
200
+ async with self._stream_lock:
201
+ for event in self._event_stream:
202
+ if event.request_id != request_id:
203
+ continue
204
+
205
+ if event.sequence_number < start_sequence:
206
+ continue
207
+
208
+ if end_sequence is not None and event.sequence_number > end_sequence:
209
+ continue
210
+
211
+ if event_types and event.event_type not in event_types:
212
+ continue
213
+
214
+ events.append(event)
215
+
216
+ # Get from storage if available
217
+ if self.storage_backend and not events:
218
+ stored_events = await self._load_from_storage(
219
+ request_id,
220
+ start_sequence,
221
+ end_sequence,
222
+ )
223
+ events.extend(stored_events)
224
+
225
+ # Sort by sequence
226
+ events.sort(key=lambda e: e.sequence_number)
227
+
228
+ return events
229
+
230
+ async def replay(
231
+ self,
232
+ request_id: str,
233
+ handler: Callable[[RequestEvent], Any],
234
+ start_sequence: int = 0,
235
+ end_sequence: Optional[int] = None,
236
+ ) -> None:
237
+ """Replay events for a request."""
238
+ events = await self.get_events(
239
+ request_id,
240
+ start_sequence,
241
+ end_sequence,
242
+ )
243
+
244
+ for event in events:
245
+ if asyncio.iscoroutinefunction(handler):
246
+ await handler(event)
247
+ else:
248
+ handler(event)
249
+
250
+ async def stream_events(
251
+ self,
252
+ request_id: Optional[str] = None,
253
+ event_types: Optional[List[EventType]] = None,
254
+ follow: bool = False,
255
+ ) -> AsyncIterator[RequestEvent]:
256
+ """Stream events as they occur."""
257
+ # Ensure buffer is flushed before streaming
258
+ await self._flush_buffer()
259
+
260
+ last_index = 0
261
+
262
+ while True:
263
+ # Get new events
264
+ async with self._stream_lock:
265
+ events = self._event_stream[last_index:]
266
+ last_index = len(self._event_stream)
267
+
268
+ # Filter and yield
269
+ for event in events:
270
+ if request_id and event.request_id != request_id:
271
+ continue
272
+
273
+ if event_types and event.event_type not in event_types:
274
+ continue
275
+
276
+ yield event
277
+
278
+ if not follow:
279
+ break
280
+
281
+ # Wait for new events
282
+ await asyncio.sleep(0.1)
283
+
284
+ def register_projection(
285
+ self,
286
+ name: str,
287
+ handler: Callable[[RequestEvent, Dict[str, Any]], Any],
288
+ initial_state: Optional[Dict[str, Any]] = None,
289
+ ) -> None:
290
+ """Register a projection handler."""
291
+ self._projection_handlers[name] = handler
292
+ self._projections[name] = initial_state or {}
293
+
294
+ logger.info(f"Registered projection: {name}")
295
+
296
+ def get_projection(self, name: str) -> Optional[Dict[str, Any]]:
297
+ """Get current projection state."""
298
+ return self._projections.get(name)
299
+
300
+ async def _apply_projections(self, event: RequestEvent) -> None:
301
+ """Apply registered projections to an event."""
302
+ for name, handler in self._projection_handlers.items():
303
+ try:
304
+ state = self._projections[name]
305
+
306
+ if asyncio.iscoroutinefunction(handler):
307
+ new_state = await handler(event, state)
308
+ else:
309
+ new_state = handler(event, state)
310
+
311
+ if new_state is not None:
312
+ self._projections[name] = new_state
313
+
314
+ except Exception as e:
315
+ logger.error(
316
+ f"Projection {name} failed for event {event.event_id}: {e}"
317
+ )
318
+
319
+ async def _flush_buffer(self) -> None:
320
+ """Flush event buffer to storage."""
321
+ # Acquire lock with timeout to prevent deadlock
322
+ try:
323
+ # Use wait_for to add timeout on lock acquisition
324
+ async with asyncio.timeout(1.0): # 1 second timeout
325
+ async with self._buffer_lock:
326
+ if not self._buffer:
327
+ return
328
+
329
+ events_to_flush = self._buffer.copy()
330
+ self._buffer.clear()
331
+ except asyncio.TimeoutError:
332
+ logger.warning("Timeout acquiring buffer lock during flush")
333
+ return
334
+
335
+ # Add to in-memory stream
336
+ async with self._stream_lock:
337
+ self._event_stream.extend(events_to_flush)
338
+
339
+ # Store if backend available
340
+ if self.storage_backend:
341
+ await self._store_events(events_to_flush)
342
+
343
+ self.flush_count += 1
344
+ logger.debug(f"Flushed {len(events_to_flush)} events")
345
+
346
+ async def _flush_loop(self) -> None:
347
+ """Periodically flush the buffer."""
348
+ while True:
349
+ try:
350
+ await asyncio.sleep(self.flush_interval)
351
+ if not self._flush_in_progress:
352
+ self._flush_in_progress = True
353
+ try:
354
+ await self._flush_buffer()
355
+ finally:
356
+ self._flush_in_progress = False
357
+ except asyncio.CancelledError:
358
+ # Final flush before shutdown
359
+ if not self._flush_in_progress:
360
+ await self._flush_buffer()
361
+ break
362
+ except Exception as e:
363
+ logger.error(f"Flush error: {e}")
364
+
365
+ async def _store_events(self, events: List[RequestEvent]) -> None:
366
+ """Store events in backend."""
367
+ try:
368
+ # Group by request ID for efficient storage
369
+ by_request = {}
370
+ for event in events:
371
+ if event.request_id not in by_request:
372
+ by_request[event.request_id] = []
373
+ by_request[event.request_id].append(event.to_dict())
374
+
375
+ # Store each request's events
376
+ for request_id, request_events in by_request.items():
377
+ key = f"events:{request_id}"
378
+ await self.storage_backend.append(key, request_events)
379
+
380
+ except Exception as e:
381
+ logger.error(f"Failed to store events: {e}")
382
+
383
+ async def _load_from_storage(
384
+ self,
385
+ request_id: str,
386
+ start_sequence: int,
387
+ end_sequence: Optional[int],
388
+ ) -> List[RequestEvent]:
389
+ """Load events from storage."""
390
+ try:
391
+ key = f"events:{request_id}"
392
+ stored = await self.storage_backend.get(key)
393
+
394
+ if not stored:
395
+ return []
396
+
397
+ events = []
398
+ for event_dict in stored:
399
+ event = RequestEvent.from_dict(event_dict)
400
+
401
+ if event.sequence_number < start_sequence:
402
+ continue
403
+
404
+ if end_sequence is not None and event.sequence_number > end_sequence:
405
+ continue
406
+
407
+ events.append(event)
408
+
409
+ return events
410
+
411
+ except Exception as e:
412
+ logger.error(f"Failed to load events for {request_id}: {e}")
413
+ return []
414
+
415
+ def get_stats(self) -> Dict[str, Any]:
416
+ """Get event store statistics."""
417
+ return {
418
+ "event_count": self.event_count,
419
+ "flush_count": self.flush_count,
420
+ "buffer_size": len(self._buffer),
421
+ "stream_size": len(self._event_stream),
422
+ "active_projections": list(self._projection_handlers.keys()),
423
+ "request_count": len(self._sequences),
424
+ }
425
+
426
+ async def close(self) -> None:
427
+ """Close event store and flush remaining events."""
428
+ if self._flush_task is not None:
429
+ self._flush_task.cancel()
430
+ try:
431
+ await self._flush_task
432
+ except asyncio.CancelledError:
433
+ pass
434
+ # Final flush
435
+ await self._flush_buffer()
436
+
437
+
438
+ # Example projection handlers
439
+ def request_state_projection(
440
+ event: RequestEvent, state: Dict[str, Any]
441
+ ) -> Dict[str, Any]:
442
+ """Track current state of all requests."""
443
+ request_id = event.request_id
444
+
445
+ if request_id not in state:
446
+ state[request_id] = {
447
+ "current_state": "initialized",
448
+ "created_at": event.timestamp,
449
+ "updated_at": event.timestamp,
450
+ "event_count": 0,
451
+ }
452
+
453
+ request_state = state[request_id]
454
+ request_state["event_count"] += 1
455
+ request_state["updated_at"] = event.timestamp
456
+
457
+ # Update state based on event type
458
+ if event.event_type == EventType.REQUEST_STARTED:
459
+ request_state["current_state"] = "executing"
460
+ elif event.event_type == EventType.REQUEST_COMPLETED:
461
+ request_state["current_state"] = "completed"
462
+ elif event.event_type == EventType.REQUEST_FAILED:
463
+ request_state["current_state"] = "failed"
464
+ elif event.event_type == EventType.REQUEST_CANCELLED:
465
+ request_state["current_state"] = "cancelled"
466
+
467
+ return state
468
+
469
+
470
+ def performance_metrics_projection(
471
+ event: RequestEvent, state: Dict[str, Any]
472
+ ) -> Dict[str, Any]:
473
+ """Track performance metrics across all requests."""
474
+ if "total_requests" not in state:
475
+ state.update(
476
+ {
477
+ "total_requests": 0,
478
+ "completed_requests": 0,
479
+ "failed_requests": 0,
480
+ "cancelled_requests": 0,
481
+ "total_duration_ms": 0,
482
+ "checkpoint_count": 0,
483
+ }
484
+ )
485
+
486
+ state["total_requests"] += 1
487
+
488
+ if event.event_type == EventType.REQUEST_COMPLETED:
489
+ state["completed_requests"] += 1
490
+ if "duration_ms" in event.data:
491
+ state["total_duration_ms"] += event.data["duration_ms"]
492
+ elif event.event_type == EventType.REQUEST_FAILED:
493
+ state["failed_requests"] += 1
494
+ elif event.event_type == EventType.REQUEST_CANCELLED:
495
+ state["cancelled_requests"] += 1
496
+ elif event.event_type == EventType.REQUEST_CHECKPOINTED:
497
+ state["checkpoint_count"] += 1
498
+
499
+ return state
@@ -389,7 +389,7 @@ result = {'execution_result': execution_result}
389
389
  tool_node = self.tools[tool_name]
390
390
 
391
391
  try:
392
- result = tool_node.process({"tool_input": arguments})
392
+ result = tool_node.execute(tool_input=arguments)
393
393
 
394
394
  # Emit middleware event
395
395
  if self.event_stream:
@@ -433,7 +433,7 @@ result = {'execution_result': execution_result}
433
433
  resource_node = self.resources[uri]
434
434
 
435
435
  try:
436
- result = resource_node.process({"resource_uri": uri})
436
+ result = resource_node.execute({"resource_uri": uri})
437
437
 
438
438
  # Emit middleware event
439
439
  if self.event_stream: