kailash 0.6.6__py3-none-any.whl → 0.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. kailash/__init__.py +35 -5
  2. kailash/access_control.py +64 -46
  3. kailash/adapters/__init__.py +5 -0
  4. kailash/adapters/mcp_platform_adapter.py +273 -0
  5. kailash/api/workflow_api.py +34 -3
  6. kailash/channels/__init__.py +21 -0
  7. kailash/channels/api_channel.py +409 -0
  8. kailash/channels/base.py +271 -0
  9. kailash/channels/cli_channel.py +661 -0
  10. kailash/channels/event_router.py +496 -0
  11. kailash/channels/mcp_channel.py +648 -0
  12. kailash/channels/session.py +423 -0
  13. kailash/mcp_server/discovery.py +57 -18
  14. kailash/middleware/communication/api_gateway.py +23 -3
  15. kailash/middleware/communication/realtime.py +83 -0
  16. kailash/middleware/core/agent_ui.py +1 -1
  17. kailash/middleware/gateway/storage_backends.py +393 -0
  18. kailash/middleware/mcp/enhanced_server.py +22 -16
  19. kailash/nexus/__init__.py +21 -0
  20. kailash/nexus/cli/__init__.py +5 -0
  21. kailash/nexus/cli/__main__.py +6 -0
  22. kailash/nexus/cli/main.py +176 -0
  23. kailash/nexus/factory.py +413 -0
  24. kailash/nexus/gateway.py +545 -0
  25. kailash/nodes/__init__.py +8 -5
  26. kailash/nodes/ai/iterative_llm_agent.py +988 -17
  27. kailash/nodes/ai/llm_agent.py +29 -9
  28. kailash/nodes/api/__init__.py +2 -2
  29. kailash/nodes/api/monitoring.py +1 -1
  30. kailash/nodes/base.py +29 -5
  31. kailash/nodes/base_async.py +54 -14
  32. kailash/nodes/code/async_python.py +1 -1
  33. kailash/nodes/code/python.py +50 -6
  34. kailash/nodes/data/async_sql.py +90 -0
  35. kailash/nodes/data/bulk_operations.py +939 -0
  36. kailash/nodes/data/query_builder.py +373 -0
  37. kailash/nodes/data/query_cache.py +512 -0
  38. kailash/nodes/monitoring/__init__.py +10 -0
  39. kailash/nodes/monitoring/deadlock_detector.py +964 -0
  40. kailash/nodes/monitoring/performance_anomaly.py +1078 -0
  41. kailash/nodes/monitoring/race_condition_detector.py +1151 -0
  42. kailash/nodes/monitoring/transaction_metrics.py +790 -0
  43. kailash/nodes/monitoring/transaction_monitor.py +931 -0
  44. kailash/nodes/security/behavior_analysis.py +414 -0
  45. kailash/nodes/system/__init__.py +17 -0
  46. kailash/nodes/system/command_parser.py +820 -0
  47. kailash/nodes/transaction/__init__.py +48 -0
  48. kailash/nodes/transaction/distributed_transaction_manager.py +983 -0
  49. kailash/nodes/transaction/saga_coordinator.py +652 -0
  50. kailash/nodes/transaction/saga_state_storage.py +411 -0
  51. kailash/nodes/transaction/saga_step.py +467 -0
  52. kailash/nodes/transaction/transaction_context.py +756 -0
  53. kailash/nodes/transaction/two_phase_commit.py +978 -0
  54. kailash/nodes/transform/processors.py +17 -1
  55. kailash/nodes/validation/__init__.py +21 -0
  56. kailash/nodes/validation/test_executor.py +532 -0
  57. kailash/nodes/validation/validation_nodes.py +447 -0
  58. kailash/resources/factory.py +1 -1
  59. kailash/runtime/access_controlled.py +9 -7
  60. kailash/runtime/async_local.py +84 -21
  61. kailash/runtime/local.py +21 -2
  62. kailash/runtime/parameter_injector.py +187 -31
  63. kailash/runtime/runner.py +6 -4
  64. kailash/runtime/testing.py +1 -1
  65. kailash/security.py +22 -3
  66. kailash/servers/__init__.py +32 -0
  67. kailash/servers/durable_workflow_server.py +430 -0
  68. kailash/servers/enterprise_workflow_server.py +522 -0
  69. kailash/servers/gateway.py +183 -0
  70. kailash/servers/workflow_server.py +293 -0
  71. kailash/utils/data_validation.py +192 -0
  72. kailash/workflow/builder.py +382 -15
  73. kailash/workflow/cyclic_runner.py +102 -10
  74. kailash/workflow/validation.py +144 -8
  75. kailash/workflow/visualization.py +99 -27
  76. {kailash-0.6.6.dist-info → kailash-0.8.0.dist-info}/METADATA +3 -2
  77. {kailash-0.6.6.dist-info → kailash-0.8.0.dist-info}/RECORD +81 -40
  78. kailash/workflow/builder_improvements.py +0 -207
  79. {kailash-0.6.6.dist-info → kailash-0.8.0.dist-info}/WHEEL +0 -0
  80. {kailash-0.6.6.dist-info → kailash-0.8.0.dist-info}/entry_points.txt +0 -0
  81. {kailash-0.6.6.dist-info → kailash-0.8.0.dist-info}/licenses/LICENSE +0 -0
  82. {kailash-0.6.6.dist-info → kailash-0.8.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,522 @@
1
+ """Enterprise workflow server implementation.
2
+
3
+ This module provides EnterpriseWorkflowServer - a renamed and improved version of
4
+ EnhancedDurableAPIGateway with full enterprise features enabled by default.
5
+ """
6
+
7
+ import asyncio
8
+ import json
9
+ import logging
10
+ import uuid
11
+ from dataclasses import dataclass, field
12
+ from datetime import UTC, datetime
13
+ from typing import Any, Dict, List, Optional, Set, Union
14
+
15
+ from ..gateway.resource_resolver import ResourceReference, ResourceResolver
16
+ from ..gateway.security import SecretManager
17
+ from ..resources.registry import ResourceRegistry
18
+ from ..runtime.async_local import AsyncLocalRuntime, ExecutionContext
19
+ from ..workflow import Workflow
20
+ from .durable_workflow_server import DurableWorkflowServer
21
+
22
+ logger = logging.getLogger(__name__)
23
+
24
+
25
+ class WorkflowNotFoundError(Exception):
26
+ """Raised when workflow is not found."""
27
+
28
+ pass
29
+
30
+
31
+ @dataclass
32
+ class WorkflowRequest:
33
+ """Enhanced workflow request with resource support."""
34
+
35
+ request_id: str = field(default_factory=lambda: str(uuid.uuid4()))
36
+ inputs: Dict[str, Any] = field(default_factory=dict)
37
+ resources: Dict[str, Union[str, ResourceReference]] = field(default_factory=dict)
38
+ context: Dict[str, Any] = field(default_factory=dict)
39
+ timestamp: datetime = field(default_factory=lambda: datetime.now(UTC))
40
+
41
+ def to_dict(self) -> Dict[str, Any]:
42
+ """Convert to JSON-serializable dict."""
43
+ return {
44
+ "request_id": self.request_id,
45
+ "inputs": self.inputs,
46
+ "resources": {
47
+ k: v if isinstance(v, str) else v.to_dict()
48
+ for k, v in self.resources.items()
49
+ },
50
+ "context": self.context,
51
+ "timestamp": self.timestamp.isoformat(),
52
+ }
53
+
54
+
55
+ @dataclass
56
+ class WorkflowResponse:
57
+ """Response from workflow execution."""
58
+
59
+ request_id: str
60
+ workflow_id: str
61
+ status: str # pending, running, completed, failed
62
+ result: Optional[Any] = None
63
+ error: Optional[str] = None
64
+ started_at: Optional[datetime] = None
65
+ completed_at: Optional[datetime] = None
66
+ execution_time: Optional[float] = None
67
+
68
+ def to_dict(self) -> Dict[str, Any]:
69
+ """Convert to JSON-serializable dict."""
70
+ return {
71
+ "request_id": self.request_id,
72
+ "workflow_id": self.workflow_id,
73
+ "status": self.status,
74
+ "result": self.result,
75
+ "error": self.error,
76
+ "started_at": self.started_at.isoformat() if self.started_at else None,
77
+ "completed_at": (
78
+ self.completed_at.isoformat() if self.completed_at else None
79
+ ),
80
+ "execution_time": self.execution_time,
81
+ }
82
+
83
+
84
+ class EnterpriseWorkflowServer(DurableWorkflowServer):
85
+ """Enterprise workflow server with full production features.
86
+
87
+ This is the recommended server for production deployments, providing:
88
+
89
+ **Core Features:**
90
+ - Multi-workflow hosting with dynamic registration
91
+ - REST API endpoints for workflow execution
92
+ - WebSocket support for real-time updates
93
+ - MCP server integration
94
+
95
+ **Durability Features:**
96
+ - Request durability and checkpointing
97
+ - Automatic deduplication
98
+ - Event sourcing for audit trail
99
+ - Long-running request support
100
+ - Recovery mechanisms
101
+
102
+ **Enterprise Features:**
103
+ - Resource reference resolution for non-serializable objects
104
+ - Integration with ResourceRegistry for shared resources
105
+ - Secret management for credentials
106
+ - Async workflow execution support
107
+ - Health checks for resources
108
+ - Security integrations
109
+ - Monitoring and metrics
110
+
111
+ This server enables all features by default but can be configured
112
+ to disable specific capabilities for development or testing.
113
+ """
114
+
115
+ def __init__(
116
+ self,
117
+ title: str = "Kailash Enterprise Workflow Server",
118
+ description: str = "Enterprise workflow server with full production features",
119
+ version: str = "1.0.0",
120
+ max_workers: int = 20,
121
+ cors_origins: Optional[list[str]] = None,
122
+ # Durability configuration (enabled by default)
123
+ enable_durability: bool = True,
124
+ durability_opt_in: bool = False, # Enterprise default: always on
125
+ # Enterprise feature configuration
126
+ resource_registry: Optional[ResourceRegistry] = None,
127
+ secret_manager: Optional[SecretManager] = None,
128
+ enable_async_execution: bool = True,
129
+ enable_health_checks: bool = True,
130
+ enable_resource_management: bool = True,
131
+ **kwargs,
132
+ ):
133
+ """Initialize enterprise workflow server."""
134
+ super().__init__(
135
+ title=title,
136
+ description=description,
137
+ version=version,
138
+ max_workers=max_workers,
139
+ cors_origins=cors_origins,
140
+ enable_durability=enable_durability,
141
+ durability_opt_in=durability_opt_in,
142
+ **kwargs,
143
+ )
144
+
145
+ # Enterprise components
146
+ self.resource_registry = resource_registry or ResourceRegistry()
147
+ self.secret_manager = secret_manager or SecretManager()
148
+ self.enable_async_execution = enable_async_execution
149
+ self.enable_health_checks = enable_health_checks
150
+ self.enable_resource_management = enable_resource_management
151
+
152
+ # Resource tracking
153
+ self._workflow_resources: Dict[str, Set[str]] = {}
154
+ self._async_runtime: Optional[AsyncLocalRuntime] = None
155
+ self._resource_resolver: Optional[ResourceResolver] = None
156
+
157
+ # Initialize enterprise components
158
+ self._initialize_enterprise_features()
159
+
160
+ # Register enterprise endpoints
161
+ self._register_enterprise_endpoints()
162
+
163
+ def _initialize_enterprise_features(self):
164
+ """Initialize enterprise feature components."""
165
+ if self.enable_async_execution:
166
+ self._async_runtime = AsyncLocalRuntime()
167
+
168
+ if self.enable_resource_management:
169
+ self._resource_resolver = ResourceResolver(
170
+ resource_registry=self.resource_registry,
171
+ secret_manager=self.secret_manager,
172
+ )
173
+
174
+ logger.info("Enterprise features initialized")
175
+
176
+ def _register_enterprise_endpoints(self):
177
+ """Register enterprise-specific endpoints."""
178
+
179
+ @self.app.get("/enterprise/features")
180
+ async def get_enterprise_features():
181
+ """Get enabled enterprise features."""
182
+ return {
183
+ "durability": self.enable_durability,
184
+ "async_execution": self.enable_async_execution,
185
+ "resource_management": self.enable_resource_management,
186
+ "health_checks": self.enable_health_checks,
187
+ "secret_management": True,
188
+ "features": [
189
+ "request_durability",
190
+ "resource_registry",
191
+ "secret_management",
192
+ "async_workflows",
193
+ "health_monitoring",
194
+ "resource_resolution",
195
+ "enterprise_security",
196
+ ],
197
+ }
198
+
199
+ @self.app.get("/enterprise/resources")
200
+ async def list_resources():
201
+ """List all registered resources."""
202
+ if not self.enable_resource_management:
203
+ return {"error": "Resource management disabled"}
204
+
205
+ return {
206
+ "resources": list(self.resource_registry.list_resources()),
207
+ "total": len(self.resource_registry.list_resources()),
208
+ }
209
+
210
+ @self.app.get("/enterprise/resources/{resource_name}")
211
+ async def get_resource_info(resource_name: str):
212
+ """Get information about a specific resource."""
213
+ if not self.enable_resource_management:
214
+ return {"error": "Resource management disabled"}
215
+
216
+ try:
217
+ resource = await self.resource_registry.get_resource(resource_name)
218
+ health = await self.resource_registry.check_health(resource_name)
219
+
220
+ return {
221
+ "name": resource_name,
222
+ "type": type(resource).__name__,
223
+ "health": health,
224
+ "workflows": list(
225
+ self._workflow_resources.get(resource_name, set())
226
+ ),
227
+ }
228
+ except (KeyError, Exception) as e:
229
+ from fastapi import HTTPException
230
+
231
+ raise HTTPException(status_code=404, detail="Resource not found")
232
+
233
+ @self.app.get("/enterprise/health")
234
+ async def enterprise_health_check():
235
+ """Comprehensive enterprise health check."""
236
+ health_status = {
237
+ "status": "healthy",
238
+ "server_type": "enterprise_workflow_server",
239
+ "timestamp": datetime.now(UTC).isoformat(),
240
+ "components": {},
241
+ }
242
+
243
+ # Check base server health
244
+ base_health = await self._get_base_health()
245
+ health_status["components"]["base_server"] = base_health
246
+
247
+ # Check resource health
248
+ if self.enable_resource_management and self.enable_health_checks:
249
+ resource_health = await self._check_resource_health()
250
+ health_status["components"]["resources"] = resource_health
251
+
252
+ # Check async runtime health
253
+ if self.enable_async_execution and self._async_runtime:
254
+ runtime_health = await self._check_runtime_health()
255
+ health_status["components"]["async_runtime"] = runtime_health
256
+
257
+ # Check secret manager health
258
+ secret_health = await self._check_secret_manager_health()
259
+ health_status["components"]["secret_manager"] = secret_health
260
+
261
+ # Determine overall status
262
+ component_statuses = [
263
+ comp.get("status", "unhealthy")
264
+ for comp in health_status["components"].values()
265
+ ]
266
+
267
+ if all(status == "healthy" for status in component_statuses):
268
+ health_status["status"] = "healthy"
269
+ elif any(status == "healthy" for status in component_statuses):
270
+ health_status["status"] = "degraded"
271
+ else:
272
+ health_status["status"] = "unhealthy"
273
+
274
+ return health_status
275
+
276
+ @self.app.post("/enterprise/workflows/{workflow_id}/execute_async")
277
+ async def execute_workflow_async(workflow_id: str, request: dict):
278
+ """Execute workflow asynchronously with resource resolution."""
279
+ if not self.enable_async_execution:
280
+ from fastapi import HTTPException
281
+
282
+ raise HTTPException(status_code=503, detail="Async execution disabled")
283
+
284
+ if workflow_id not in self.workflows:
285
+ from fastapi import HTTPException
286
+
287
+ raise HTTPException(
288
+ status_code=404, detail=f"Workflow '{workflow_id}' not found"
289
+ )
290
+
291
+ try:
292
+ # Create enhanced request
293
+ workflow_request = WorkflowRequest(
294
+ inputs=request.get("inputs", {}),
295
+ resources=request.get("resources", {}),
296
+ context=request.get("context", {}),
297
+ )
298
+
299
+ # Resolve resources if enabled
300
+ resolved_inputs = workflow_request.inputs.copy()
301
+ if self.enable_resource_management and workflow_request.resources:
302
+ resolved_resources = (
303
+ await self._resource_resolver.resolve_resources(
304
+ workflow_request.resources
305
+ )
306
+ )
307
+ resolved_inputs.update(resolved_resources)
308
+
309
+ # Execute workflow asynchronously
310
+ workflow_obj = self.workflows[workflow_id].workflow
311
+ execution_context = ExecutionContext(
312
+ request_id=workflow_request.request_id,
313
+ workflow_id=workflow_id,
314
+ metadata=workflow_request.context,
315
+ )
316
+
317
+ result = await self._async_runtime.execute_async(
318
+ workflow_obj,
319
+ inputs=resolved_inputs,
320
+ context=execution_context,
321
+ )
322
+
323
+ # Create response
324
+ response = WorkflowResponse(
325
+ request_id=workflow_request.request_id,
326
+ workflow_id=workflow_id,
327
+ status="completed",
328
+ result=result,
329
+ started_at=workflow_request.timestamp,
330
+ completed_at=datetime.now(UTC),
331
+ )
332
+
333
+ response.execution_time = (
334
+ response.completed_at - response.started_at
335
+ ).total_seconds()
336
+
337
+ return response.to_dict()
338
+
339
+ except Exception as e:
340
+ logger.error(f"Async workflow execution failed: {e}")
341
+
342
+ error_response = WorkflowResponse(
343
+ request_id=workflow_request.request_id,
344
+ workflow_id=workflow_id,
345
+ status="failed",
346
+ error=str(e),
347
+ started_at=workflow_request.timestamp,
348
+ completed_at=datetime.now(UTC),
349
+ )
350
+
351
+ return error_response.to_dict()
352
+
353
+ async def _get_base_health(self) -> Dict[str, Any]:
354
+ """Get base server health status."""
355
+ return {
356
+ "status": "healthy",
357
+ "workflows": len(self.workflows),
358
+ "mcp_servers": len(self.mcp_servers),
359
+ "active_requests": (
360
+ len(self.active_requests) if hasattr(self, "active_requests") else 0
361
+ ),
362
+ }
363
+
364
+ async def _check_resource_health(self) -> Dict[str, Any]:
365
+ """Check health of all registered resources."""
366
+ resource_health = {"status": "healthy", "resources": {}}
367
+
368
+ try:
369
+ for resource_name in self.resource_registry.list_resources():
370
+ try:
371
+ health = await self.resource_registry.check_health(resource_name)
372
+ resource_health["resources"][resource_name] = health
373
+ except Exception as e:
374
+ resource_health["resources"][resource_name] = {
375
+ "status": "unhealthy",
376
+ "error": str(e),
377
+ }
378
+ resource_health["status"] = "degraded"
379
+
380
+ except Exception as e:
381
+ resource_health["status"] = "unhealthy"
382
+ resource_health["error"] = str(e)
383
+
384
+ return resource_health
385
+
386
+ async def _check_runtime_health(self) -> Dict[str, Any]:
387
+ """Check async runtime health."""
388
+ try:
389
+ # Simple health check - try to access runtime
390
+ if self._async_runtime:
391
+ return {
392
+ "status": "healthy",
393
+ "type": type(self._async_runtime).__name__,
394
+ }
395
+ else:
396
+ return {
397
+ "status": "unhealthy",
398
+ "error": "Runtime not initialized",
399
+ }
400
+ except Exception as e:
401
+ return {
402
+ "status": "unhealthy",
403
+ "error": str(e),
404
+ }
405
+
406
+ async def _check_secret_manager_health(self) -> Dict[str, Any]:
407
+ """Check secret manager health."""
408
+ try:
409
+ # Simple health check for secret manager
410
+ return {
411
+ "status": "healthy",
412
+ "type": type(self.secret_manager).__name__,
413
+ }
414
+ except Exception as e:
415
+ return {
416
+ "status": "unhealthy",
417
+ "error": str(e),
418
+ }
419
+
420
+ def register_resource(self, name: str, resource: Any):
421
+ """Register a resource for use in workflows."""
422
+ if not self.enable_resource_management:
423
+ raise RuntimeError("Resource management disabled")
424
+
425
+ self.resource_registry.register_factory(name, lambda: resource)
426
+ logger.info(f"Registered enterprise resource: {name}")
427
+
428
+ def _register_root_endpoints(self):
429
+ """Override to add enterprise info to root endpoint."""
430
+
431
+ # Register the enterprise root endpoint first (before super() to take precedence)
432
+ @self.app.get("/")
433
+ async def root():
434
+ """Server information with enterprise details."""
435
+ base_info = {
436
+ "name": self.app.title,
437
+ "version": self.app.version,
438
+ "workflows": list(self.workflows.keys()),
439
+ "mcp_servers": list(self.mcp_servers.keys()),
440
+ "type": "enterprise_workflow_server",
441
+ }
442
+
443
+ # Add enterprise info
444
+ base_info["enterprise"] = {
445
+ "durability": self.enable_durability,
446
+ "async_execution": self.enable_async_execution,
447
+ "resource_management": self.enable_resource_management,
448
+ "health_checks": self.enable_health_checks,
449
+ "features": [
450
+ "request_durability",
451
+ "resource_registry",
452
+ "secret_management",
453
+ "async_workflows",
454
+ "health_monitoring",
455
+ "resource_resolution",
456
+ "enterprise_security",
457
+ ],
458
+ "resources": (
459
+ len(self.resource_registry.list_resources())
460
+ if self.enable_resource_management
461
+ else 0
462
+ ),
463
+ }
464
+
465
+ return base_info
466
+
467
+ # Now call super() to get other endpoints (health, workflows, etc.) but skip root
468
+ # We'll register them manually to avoid route conflicts
469
+ @self.app.get("/workflows")
470
+ async def list_workflows():
471
+ """List all registered workflows."""
472
+ return {
473
+ name: {
474
+ "type": reg.type,
475
+ "description": reg.description,
476
+ "version": reg.version,
477
+ "tags": reg.tags,
478
+ "endpoints": self._get_workflow_endpoints(name),
479
+ }
480
+ for name, reg in self.workflows.items()
481
+ }
482
+
483
+ @self.app.get("/health")
484
+ async def health_check():
485
+ """Server health check."""
486
+ health_status = {
487
+ "status": "healthy",
488
+ "server_type": "enterprise_workflow_server",
489
+ "workflows": {},
490
+ "mcp_servers": {},
491
+ }
492
+
493
+ # Check workflow health
494
+ for name, reg in self.workflows.items():
495
+ if reg.type == "embedded":
496
+ health_status["workflows"][name] = "healthy"
497
+ else:
498
+ # TODO: Implement proxy health check
499
+ health_status["workflows"][name] = "unknown"
500
+
501
+ # Check MCP server health
502
+ for name, server in self.mcp_servers.items():
503
+ # TODO: Implement MCP health check
504
+ health_status["mcp_servers"][name] = "unknown"
505
+
506
+ return health_status
507
+
508
+ @self.app.websocket("/ws")
509
+ async def websocket_endpoint(websocket):
510
+ """WebSocket for real-time updates."""
511
+ from fastapi import WebSocket
512
+
513
+ await websocket.accept()
514
+ try:
515
+ while True:
516
+ # Basic WebSocket echo - subclasses can override
517
+ data = await websocket.receive_text()
518
+ await websocket.send_text(f"Echo: {data}")
519
+ except Exception as e:
520
+ logger.error(f"WebSocket error: {e}")
521
+ finally:
522
+ await websocket.close()