kailash 0.6.6__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. kailash/__init__.py +35 -5
  2. kailash/adapters/__init__.py +5 -0
  3. kailash/adapters/mcp_platform_adapter.py +273 -0
  4. kailash/channels/__init__.py +21 -0
  5. kailash/channels/api_channel.py +409 -0
  6. kailash/channels/base.py +271 -0
  7. kailash/channels/cli_channel.py +661 -0
  8. kailash/channels/event_router.py +496 -0
  9. kailash/channels/mcp_channel.py +648 -0
  10. kailash/channels/session.py +423 -0
  11. kailash/mcp_server/discovery.py +1 -1
  12. kailash/middleware/mcp/enhanced_server.py +22 -16
  13. kailash/nexus/__init__.py +21 -0
  14. kailash/nexus/factory.py +413 -0
  15. kailash/nexus/gateway.py +545 -0
  16. kailash/nodes/__init__.py +2 -0
  17. kailash/nodes/ai/iterative_llm_agent.py +988 -17
  18. kailash/nodes/ai/llm_agent.py +29 -9
  19. kailash/nodes/api/__init__.py +2 -2
  20. kailash/nodes/api/monitoring.py +1 -1
  21. kailash/nodes/base_async.py +54 -14
  22. kailash/nodes/code/async_python.py +1 -1
  23. kailash/nodes/data/bulk_operations.py +939 -0
  24. kailash/nodes/data/query_builder.py +373 -0
  25. kailash/nodes/data/query_cache.py +512 -0
  26. kailash/nodes/monitoring/__init__.py +10 -0
  27. kailash/nodes/monitoring/deadlock_detector.py +964 -0
  28. kailash/nodes/monitoring/performance_anomaly.py +1078 -0
  29. kailash/nodes/monitoring/race_condition_detector.py +1151 -0
  30. kailash/nodes/monitoring/transaction_metrics.py +790 -0
  31. kailash/nodes/monitoring/transaction_monitor.py +931 -0
  32. kailash/nodes/system/__init__.py +17 -0
  33. kailash/nodes/system/command_parser.py +820 -0
  34. kailash/nodes/transaction/__init__.py +48 -0
  35. kailash/nodes/transaction/distributed_transaction_manager.py +983 -0
  36. kailash/nodes/transaction/saga_coordinator.py +652 -0
  37. kailash/nodes/transaction/saga_state_storage.py +411 -0
  38. kailash/nodes/transaction/saga_step.py +467 -0
  39. kailash/nodes/transaction/transaction_context.py +756 -0
  40. kailash/nodes/transaction/two_phase_commit.py +978 -0
  41. kailash/nodes/transform/processors.py +17 -1
  42. kailash/nodes/validation/__init__.py +21 -0
  43. kailash/nodes/validation/test_executor.py +532 -0
  44. kailash/nodes/validation/validation_nodes.py +447 -0
  45. kailash/resources/factory.py +1 -1
  46. kailash/runtime/async_local.py +84 -21
  47. kailash/runtime/local.py +21 -2
  48. kailash/runtime/parameter_injector.py +187 -31
  49. kailash/security.py +16 -1
  50. kailash/servers/__init__.py +32 -0
  51. kailash/servers/durable_workflow_server.py +430 -0
  52. kailash/servers/enterprise_workflow_server.py +466 -0
  53. kailash/servers/gateway.py +183 -0
  54. kailash/servers/workflow_server.py +290 -0
  55. kailash/utils/data_validation.py +192 -0
  56. kailash/workflow/builder.py +291 -12
  57. kailash/workflow/validation.py +144 -8
  58. {kailash-0.6.6.dist-info → kailash-0.7.0.dist-info}/METADATA +1 -1
  59. {kailash-0.6.6.dist-info → kailash-0.7.0.dist-info}/RECORD +63 -25
  60. {kailash-0.6.6.dist-info → kailash-0.7.0.dist-info}/WHEEL +0 -0
  61. {kailash-0.6.6.dist-info → kailash-0.7.0.dist-info}/entry_points.txt +0 -0
  62. {kailash-0.6.6.dist-info → kailash-0.7.0.dist-info}/licenses/LICENSE +0 -0
  63. {kailash-0.6.6.dist-info → kailash-0.7.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,430 @@
1
+ """Durable workflow server implementation.
2
+
3
+ This module provides DurableWorkflowServer - a renamed and improved version of
4
+ DurableAPIGateway with request durability and checkpointing capabilities.
5
+ """
6
+
7
+ import asyncio
8
+ import logging
9
+ from datetime import UTC, datetime
10
+ from typing import Any, Callable, Dict, List, Optional
11
+
12
+ from fastapi import HTTPException, Request, Response
13
+ from fastapi.responses import JSONResponse
14
+
15
+ from ..middleware.gateway.checkpoint_manager import CheckpointManager
16
+ from ..middleware.gateway.deduplicator import RequestDeduplicator
17
+ from ..middleware.gateway.durable_request import (
18
+ DurableRequest,
19
+ RequestMetadata,
20
+ RequestState,
21
+ )
22
+ from ..middleware.gateway.event_store import (
23
+ EventStore,
24
+ EventType,
25
+ performance_metrics_projection,
26
+ request_state_projection,
27
+ )
28
+ from .workflow_server import WorkflowServer
29
+
30
+ logger = logging.getLogger(__name__)
31
+
32
+
33
+ class DurableWorkflowServer(WorkflowServer):
34
+ """Workflow server with durable request handling.
35
+
36
+ Extends the basic WorkflowServer with:
37
+ - Request durability and checkpointing
38
+ - Automatic deduplication
39
+ - Event sourcing for audit trail
40
+ - Long-running request support
41
+ - Recovery mechanisms
42
+
43
+ This server provides reliability features for production deployments
44
+ where request durability is important. For full enterprise features,
45
+ consider using EnterpriseWorkflowServer.
46
+ """
47
+
48
+ def __init__(
49
+ self,
50
+ title: str = "Kailash Durable Workflow Server",
51
+ description: str = "Durable workflow server with checkpointing",
52
+ version: str = "1.0.0",
53
+ max_workers: int = 10,
54
+ cors_origins: Optional[list[str]] = None,
55
+ # Durability configuration
56
+ enable_durability: bool = True,
57
+ checkpoint_manager: Optional[CheckpointManager] = None,
58
+ deduplicator: Optional[RequestDeduplicator] = None,
59
+ event_store: Optional[EventStore] = None,
60
+ durability_opt_in: bool = True, # If True, durability is opt-in per endpoint
61
+ **kwargs,
62
+ ):
63
+ """Initialize durable workflow server."""
64
+ super().__init__(
65
+ title=title,
66
+ description=description,
67
+ version=version,
68
+ max_workers=max_workers,
69
+ cors_origins=cors_origins,
70
+ **kwargs,
71
+ )
72
+
73
+ # Durability components
74
+ self.enable_durability = enable_durability
75
+ self.durability_opt_in = durability_opt_in
76
+ self.checkpoint_manager = checkpoint_manager or CheckpointManager()
77
+ # Initialize deduplicator lazily to avoid event loop issues
78
+ self._deduplicator = deduplicator
79
+ self._event_store = event_store
80
+
81
+ # Track active requests
82
+ self.active_requests: Dict[str, DurableRequest] = {}
83
+
84
+ # Track background tasks
85
+ self._background_tasks: List[asyncio.Task] = []
86
+
87
+ # Initialize durability components lazily
88
+ self._durability_initialized = False
89
+
90
+ # Add durability middleware if enabled
91
+ if self.enable_durability:
92
+ self._add_durability_middleware()
93
+
94
+ # Register durability endpoints
95
+ self._register_durability_endpoints()
96
+
97
+ @property
98
+ def deduplicator(self) -> RequestDeduplicator:
99
+ """Get deduplicator instance, initializing if needed."""
100
+ if self._deduplicator is None:
101
+ self._deduplicator = RequestDeduplicator()
102
+ return self._deduplicator
103
+
104
+ @property
105
+ def event_store(self) -> EventStore:
106
+ """Get event store instance, initializing if needed."""
107
+ if self._event_store is None:
108
+ self._event_store = EventStore()
109
+ # Register event projections
110
+ self._event_store.register_projection(
111
+ "request_states",
112
+ request_state_projection,
113
+ )
114
+ self._event_store.register_projection(
115
+ "performance_metrics",
116
+ performance_metrics_projection,
117
+ )
118
+ return self._event_store
119
+
120
+ def _add_durability_middleware(self):
121
+ """Add middleware for durable request handling."""
122
+
123
+ @self.app.middleware("http")
124
+ async def durability_middleware(request: Request, call_next):
125
+ """Middleware to handle request durability."""
126
+ # Check if this endpoint should use durability
127
+ should_be_durable = self._should_use_durability(request)
128
+
129
+ if not should_be_durable:
130
+ # Pass through without durability
131
+ return await call_next(request)
132
+
133
+ # Extract request metadata
134
+ request_id = (
135
+ request.headers.get("X-Request-ID")
136
+ or f"req_{datetime.now(UTC).timestamp()}"
137
+ )
138
+ current_time = datetime.now(UTC)
139
+ metadata = RequestMetadata(
140
+ request_id=request_id,
141
+ method=request.method,
142
+ path=str(request.url.path),
143
+ headers=dict(request.headers),
144
+ query_params=dict(request.query_params),
145
+ body=None, # Will be set later if needed
146
+ client_ip=request.client.host if request.client else "unknown",
147
+ user_id=None, # Will be set from auth if available
148
+ tenant_id=None, # Will be set from auth if available
149
+ idempotency_key=request.headers.get("Idempotency-Key"),
150
+ created_at=current_time,
151
+ updated_at=current_time,
152
+ )
153
+
154
+ try:
155
+ # Check for duplicate request
156
+ cached_response = await self.deduplicator.check_duplicate(
157
+ method=request.method,
158
+ path=str(request.url.path),
159
+ query_params=dict(request.query_params),
160
+ body=metadata.body,
161
+ headers=dict(request.headers),
162
+ idempotency_key=metadata.idempotency_key,
163
+ )
164
+ if cached_response:
165
+ logger.info(f"Duplicate request detected: {request_id}")
166
+ return JSONResponse(content=cached_response)
167
+
168
+ # Create durable request
169
+ durable_request = DurableRequest(
170
+ metadata=metadata,
171
+ )
172
+ self.active_requests[request_id] = durable_request
173
+
174
+ # Emit start event
175
+ await self.event_store.append(
176
+ EventType.REQUEST_STARTED,
177
+ request_id,
178
+ {
179
+ "path": metadata.path,
180
+ "method": metadata.method,
181
+ "timestamp": metadata.created_at.isoformat(),
182
+ },
183
+ )
184
+
185
+ # Create checkpoint before processing
186
+ from ..middleware.gateway.durable_request import Checkpoint
187
+
188
+ checkpoint = Checkpoint(
189
+ checkpoint_id=f"ckpt_{request_id}_init",
190
+ request_id=request_id,
191
+ sequence=0,
192
+ name="request_initialized",
193
+ state=RequestState.INITIALIZED,
194
+ data={
195
+ "metadata": {
196
+ "request_id": metadata.request_id,
197
+ "method": metadata.method,
198
+ "path": metadata.path,
199
+ "client_ip": metadata.client_ip,
200
+ "created_at": metadata.created_at.isoformat(),
201
+ },
202
+ "created_at": datetime.now(UTC).isoformat(),
203
+ },
204
+ workflow_state=None,
205
+ created_at=datetime.now(UTC),
206
+ size_bytes=0,
207
+ )
208
+ await self.checkpoint_manager.save_checkpoint(checkpoint)
209
+
210
+ # Process request
211
+ response = await call_next(request)
212
+
213
+ # Update state to completed
214
+ durable_request.state = RequestState.COMPLETED
215
+
216
+ # Cache response for deduplication
217
+ if response.status_code < 400:
218
+ # Only cache successful responses
219
+ response_body = b"".join(
220
+ [chunk async for chunk in response.body_iterator]
221
+ )
222
+ try:
223
+ response_data = {"content": response_body.decode()}
224
+ except UnicodeDecodeError:
225
+ response_data = {"content": response_body.hex()}
226
+
227
+ await self.deduplicator.cache_response(
228
+ method=metadata.method,
229
+ path=metadata.path,
230
+ query_params=metadata.query_params,
231
+ body=metadata.body,
232
+ headers=metadata.headers,
233
+ idempotency_key=metadata.idempotency_key,
234
+ response_data=response_data,
235
+ status_code=response.status_code,
236
+ response_headers=dict(response.headers),
237
+ )
238
+
239
+ # Recreate response with new body
240
+ response = Response(
241
+ content=response_body,
242
+ status_code=response.status_code,
243
+ headers=dict(response.headers),
244
+ media_type=response.media_type,
245
+ )
246
+
247
+ # Emit completion event
248
+ await self.event_store.append(
249
+ EventType.REQUEST_COMPLETED,
250
+ request_id,
251
+ {
252
+ "status_code": response.status_code,
253
+ "timestamp": datetime.now(UTC).isoformat(),
254
+ },
255
+ )
256
+
257
+ return response
258
+
259
+ except Exception as e:
260
+ # Update state to failed
261
+ if request_id in self.active_requests:
262
+ self.active_requests[request_id].state = RequestState.FAILED
263
+
264
+ # Emit failure event
265
+ await self.event_store.append(
266
+ EventType.REQUEST_FAILED,
267
+ request_id,
268
+ {
269
+ "error": str(e),
270
+ "timestamp": datetime.now(UTC).isoformat(),
271
+ },
272
+ )
273
+
274
+ logger.error(f"Request {request_id} failed: {e}")
275
+ raise
276
+
277
+ finally:
278
+ # Clean up active request
279
+ if request_id in self.active_requests:
280
+ del self.active_requests[request_id]
281
+
282
+ def _should_use_durability(self, request: Request) -> bool:
283
+ """Determine if request should use durability features."""
284
+ if not self.enable_durability:
285
+ return False
286
+
287
+ if self.durability_opt_in:
288
+ # Check for durability header
289
+ return request.headers.get("X-Durable", "").lower() == "true"
290
+ else:
291
+ # Durability enabled by default, check for opt-out
292
+ return request.headers.get("X-Durable", "").lower() != "false"
293
+
294
+ def _register_durability_endpoints(self):
295
+ """Register durability-specific endpoints."""
296
+
297
+ @self.app.get("/durability/status")
298
+ async def durability_status():
299
+ """Get durability system status."""
300
+ return {
301
+ "enabled": self.enable_durability,
302
+ "opt_in": self.durability_opt_in,
303
+ "active_requests": len(self.active_requests),
304
+ "checkpoint_count": len(
305
+ getattr(self.checkpoint_manager, "_memory_checkpoints", [])
306
+ ),
307
+ "event_count": self.event_store.event_count,
308
+ }
309
+
310
+ @self.app.get("/durability/requests")
311
+ async def list_active_requests():
312
+ """List currently active durable requests."""
313
+ return {
314
+ request_id: {
315
+ "state": req.state.value,
316
+ "metadata": {
317
+ "request_id": req.metadata.request_id,
318
+ "method": req.metadata.method,
319
+ "path": req.metadata.path,
320
+ "client_ip": req.metadata.client_ip,
321
+ "created_at": req.metadata.created_at.isoformat(),
322
+ },
323
+ "created_at": req.metadata.created_at.isoformat(),
324
+ }
325
+ for request_id, req in self.active_requests.items()
326
+ }
327
+
328
+ @self.app.get("/durability/requests/{request_id}")
329
+ async def get_request_status(request_id: str):
330
+ """Get status of a specific request."""
331
+ if request_id in self.active_requests:
332
+ req = self.active_requests[request_id]
333
+ return {
334
+ "request_id": request_id,
335
+ "state": req.state.value,
336
+ "metadata": req.metadata.model_dump(),
337
+ "active": True,
338
+ }
339
+
340
+ # Check checkpoint storage
341
+ checkpoint = await self.checkpoint_manager.load_latest_checkpoint(
342
+ request_id
343
+ )
344
+ if checkpoint:
345
+ return {
346
+ "request_id": request_id,
347
+ "state": checkpoint.state.value,
348
+ "metadata": checkpoint.data.get("metadata", {}),
349
+ "active": False,
350
+ }
351
+
352
+ raise HTTPException(status_code=404, detail="Request not found")
353
+
354
+ @self.app.post("/durability/requests/{request_id}/recover")
355
+ async def recover_request(request_id: str):
356
+ """Attempt to recover a failed request."""
357
+ checkpoint = await self.checkpoint_manager.load_latest_checkpoint(
358
+ request_id
359
+ )
360
+ if not checkpoint:
361
+ raise HTTPException(
362
+ status_code=404, detail="Request checkpoint not found"
363
+ )
364
+
365
+ # TODO: Implement request recovery logic
366
+ return {
367
+ "message": f"Recovery initiated for request {request_id}",
368
+ "checkpoint": checkpoint.to_dict(),
369
+ }
370
+
371
+ @self.app.get("/durability/events")
372
+ async def list_events(limit: int = 100, offset: int = 0):
373
+ """List recent durability events."""
374
+ events = await self.event_store.get_events(limit=limit, offset=offset)
375
+ return {
376
+ "events": [
377
+ {
378
+ "type": event.type.value,
379
+ "data": event.data,
380
+ "timestamp": event.timestamp.isoformat(),
381
+ "event_id": event.event_id,
382
+ }
383
+ for event in events
384
+ ],
385
+ "total": len(events),
386
+ "limit": limit,
387
+ "offset": offset,
388
+ }
389
+
390
+ async def cleanup_completed_requests(self, max_age_hours: int = 24):
391
+ """Clean up old completed request data."""
392
+ cutoff_time = datetime.now(UTC).timestamp() - (max_age_hours * 3600)
393
+
394
+ # Clean up checkpoints - using garbage collection method
395
+ await self.checkpoint_manager._garbage_collection()
396
+
397
+ # Clean up cached responses - using internal cleanup
398
+ await self.deduplicator._cleanup_expired()
399
+
400
+ logger.info(f"Cleaned up durability data older than {max_age_hours} hours")
401
+
402
+ def _register_root_endpoints(self):
403
+ """Override to add durability info to root endpoint."""
404
+ super()._register_root_endpoints()
405
+
406
+ # Override the root endpoint to include durability info
407
+ @self.app.get("/")
408
+ async def root():
409
+ """Server information with durability details."""
410
+ base_info = {
411
+ "name": self.app.title,
412
+ "version": self.app.version,
413
+ "workflows": list(self.workflows.keys()),
414
+ "mcp_servers": list(self.mcp_servers.keys()),
415
+ "type": "durable_workflow_server",
416
+ }
417
+
418
+ # Add durability info
419
+ base_info["durability"] = {
420
+ "enabled": self.enable_durability,
421
+ "opt_in": self.durability_opt_in,
422
+ "features": [
423
+ "request_checkpointing",
424
+ "automatic_deduplication",
425
+ "event_sourcing",
426
+ "request_recovery",
427
+ ],
428
+ }
429
+
430
+ return base_info