kailash 0.6.6__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/__init__.py +35 -5
- kailash/adapters/__init__.py +5 -0
- kailash/adapters/mcp_platform_adapter.py +273 -0
- kailash/channels/__init__.py +21 -0
- kailash/channels/api_channel.py +409 -0
- kailash/channels/base.py +271 -0
- kailash/channels/cli_channel.py +661 -0
- kailash/channels/event_router.py +496 -0
- kailash/channels/mcp_channel.py +648 -0
- kailash/channels/session.py +423 -0
- kailash/mcp_server/discovery.py +1 -1
- kailash/middleware/mcp/enhanced_server.py +22 -16
- kailash/nexus/__init__.py +21 -0
- kailash/nexus/factory.py +413 -0
- kailash/nexus/gateway.py +545 -0
- kailash/nodes/__init__.py +2 -0
- kailash/nodes/ai/iterative_llm_agent.py +988 -17
- kailash/nodes/ai/llm_agent.py +29 -9
- kailash/nodes/api/__init__.py +2 -2
- kailash/nodes/api/monitoring.py +1 -1
- kailash/nodes/base_async.py +54 -14
- kailash/nodes/code/async_python.py +1 -1
- kailash/nodes/data/bulk_operations.py +939 -0
- kailash/nodes/data/query_builder.py +373 -0
- kailash/nodes/data/query_cache.py +512 -0
- kailash/nodes/monitoring/__init__.py +10 -0
- kailash/nodes/monitoring/deadlock_detector.py +964 -0
- kailash/nodes/monitoring/performance_anomaly.py +1078 -0
- kailash/nodes/monitoring/race_condition_detector.py +1151 -0
- kailash/nodes/monitoring/transaction_metrics.py +790 -0
- kailash/nodes/monitoring/transaction_monitor.py +931 -0
- kailash/nodes/system/__init__.py +17 -0
- kailash/nodes/system/command_parser.py +820 -0
- kailash/nodes/transaction/__init__.py +48 -0
- kailash/nodes/transaction/distributed_transaction_manager.py +983 -0
- kailash/nodes/transaction/saga_coordinator.py +652 -0
- kailash/nodes/transaction/saga_state_storage.py +411 -0
- kailash/nodes/transaction/saga_step.py +467 -0
- kailash/nodes/transaction/transaction_context.py +756 -0
- kailash/nodes/transaction/two_phase_commit.py +978 -0
- kailash/nodes/transform/processors.py +17 -1
- kailash/nodes/validation/__init__.py +21 -0
- kailash/nodes/validation/test_executor.py +532 -0
- kailash/nodes/validation/validation_nodes.py +447 -0
- kailash/resources/factory.py +1 -1
- kailash/runtime/async_local.py +84 -21
- kailash/runtime/local.py +21 -2
- kailash/runtime/parameter_injector.py +187 -31
- kailash/security.py +16 -1
- kailash/servers/__init__.py +32 -0
- kailash/servers/durable_workflow_server.py +430 -0
- kailash/servers/enterprise_workflow_server.py +466 -0
- kailash/servers/gateway.py +183 -0
- kailash/servers/workflow_server.py +290 -0
- kailash/utils/data_validation.py +192 -0
- kailash/workflow/builder.py +291 -12
- kailash/workflow/validation.py +144 -8
- {kailash-0.6.6.dist-info → kailash-0.7.0.dist-info}/METADATA +1 -1
- {kailash-0.6.6.dist-info → kailash-0.7.0.dist-info}/RECORD +63 -25
- {kailash-0.6.6.dist-info → kailash-0.7.0.dist-info}/WHEEL +0 -0
- {kailash-0.6.6.dist-info → kailash-0.7.0.dist-info}/entry_points.txt +0 -0
- {kailash-0.6.6.dist-info → kailash-0.7.0.dist-info}/licenses/LICENSE +0 -0
- {kailash-0.6.6.dist-info → kailash-0.7.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,466 @@
|
|
1
|
+
"""Enterprise workflow server implementation.
|
2
|
+
|
3
|
+
This module provides EnterpriseWorkflowServer - a renamed and improved version of
|
4
|
+
EnhancedDurableAPIGateway with full enterprise features enabled by default.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import asyncio
|
8
|
+
import json
|
9
|
+
import logging
|
10
|
+
import uuid
|
11
|
+
from dataclasses import dataclass, field
|
12
|
+
from datetime import UTC, datetime
|
13
|
+
from typing import Any, Dict, List, Optional, Set, Union
|
14
|
+
|
15
|
+
from ..gateway.resource_resolver import ResourceReference, ResourceResolver
|
16
|
+
from ..gateway.security import SecretManager
|
17
|
+
from ..resources.registry import ResourceRegistry
|
18
|
+
from ..runtime.async_local import AsyncLocalRuntime, ExecutionContext
|
19
|
+
from ..workflow import Workflow
|
20
|
+
from .durable_workflow_server import DurableWorkflowServer
|
21
|
+
|
22
|
+
logger = logging.getLogger(__name__)
|
23
|
+
|
24
|
+
|
25
|
+
class WorkflowNotFoundError(Exception):
|
26
|
+
"""Raised when workflow is not found."""
|
27
|
+
|
28
|
+
pass
|
29
|
+
|
30
|
+
|
31
|
+
@dataclass
|
32
|
+
class WorkflowRequest:
|
33
|
+
"""Enhanced workflow request with resource support."""
|
34
|
+
|
35
|
+
request_id: str = field(default_factory=lambda: str(uuid.uuid4()))
|
36
|
+
inputs: Dict[str, Any] = field(default_factory=dict)
|
37
|
+
resources: Dict[str, Union[str, ResourceReference]] = field(default_factory=dict)
|
38
|
+
context: Dict[str, Any] = field(default_factory=dict)
|
39
|
+
timestamp: datetime = field(default_factory=lambda: datetime.now(UTC))
|
40
|
+
|
41
|
+
def to_dict(self) -> Dict[str, Any]:
|
42
|
+
"""Convert to JSON-serializable dict."""
|
43
|
+
return {
|
44
|
+
"request_id": self.request_id,
|
45
|
+
"inputs": self.inputs,
|
46
|
+
"resources": {
|
47
|
+
k: v if isinstance(v, str) else v.to_dict()
|
48
|
+
for k, v in self.resources.items()
|
49
|
+
},
|
50
|
+
"context": self.context,
|
51
|
+
"timestamp": self.timestamp.isoformat(),
|
52
|
+
}
|
53
|
+
|
54
|
+
|
55
|
+
@dataclass
|
56
|
+
class WorkflowResponse:
|
57
|
+
"""Response from workflow execution."""
|
58
|
+
|
59
|
+
request_id: str
|
60
|
+
workflow_id: str
|
61
|
+
status: str # pending, running, completed, failed
|
62
|
+
result: Optional[Any] = None
|
63
|
+
error: Optional[str] = None
|
64
|
+
started_at: Optional[datetime] = None
|
65
|
+
completed_at: Optional[datetime] = None
|
66
|
+
execution_time: Optional[float] = None
|
67
|
+
|
68
|
+
def to_dict(self) -> Dict[str, Any]:
|
69
|
+
"""Convert to JSON-serializable dict."""
|
70
|
+
return {
|
71
|
+
"request_id": self.request_id,
|
72
|
+
"workflow_id": self.workflow_id,
|
73
|
+
"status": self.status,
|
74
|
+
"result": self.result,
|
75
|
+
"error": self.error,
|
76
|
+
"started_at": self.started_at.isoformat() if self.started_at else None,
|
77
|
+
"completed_at": (
|
78
|
+
self.completed_at.isoformat() if self.completed_at else None
|
79
|
+
),
|
80
|
+
"execution_time": self.execution_time,
|
81
|
+
}
|
82
|
+
|
83
|
+
|
84
|
+
class EnterpriseWorkflowServer(DurableWorkflowServer):
|
85
|
+
"""Enterprise workflow server with full production features.
|
86
|
+
|
87
|
+
This is the recommended server for production deployments, providing:
|
88
|
+
|
89
|
+
**Core Features:**
|
90
|
+
- Multi-workflow hosting with dynamic registration
|
91
|
+
- REST API endpoints for workflow execution
|
92
|
+
- WebSocket support for real-time updates
|
93
|
+
- MCP server integration
|
94
|
+
|
95
|
+
**Durability Features:**
|
96
|
+
- Request durability and checkpointing
|
97
|
+
- Automatic deduplication
|
98
|
+
- Event sourcing for audit trail
|
99
|
+
- Long-running request support
|
100
|
+
- Recovery mechanisms
|
101
|
+
|
102
|
+
**Enterprise Features:**
|
103
|
+
- Resource reference resolution for non-serializable objects
|
104
|
+
- Integration with ResourceRegistry for shared resources
|
105
|
+
- Secret management for credentials
|
106
|
+
- Async workflow execution support
|
107
|
+
- Health checks for resources
|
108
|
+
- Security integrations
|
109
|
+
- Monitoring and metrics
|
110
|
+
|
111
|
+
This server enables all features by default but can be configured
|
112
|
+
to disable specific capabilities for development or testing.
|
113
|
+
"""
|
114
|
+
|
115
|
+
def __init__(
|
116
|
+
self,
|
117
|
+
title: str = "Kailash Enterprise Workflow Server",
|
118
|
+
description: str = "Enterprise workflow server with full production features",
|
119
|
+
version: str = "1.0.0",
|
120
|
+
max_workers: int = 20,
|
121
|
+
cors_origins: Optional[list[str]] = None,
|
122
|
+
# Durability configuration (enabled by default)
|
123
|
+
enable_durability: bool = True,
|
124
|
+
durability_opt_in: bool = False, # Enterprise default: always on
|
125
|
+
# Enterprise feature configuration
|
126
|
+
resource_registry: Optional[ResourceRegistry] = None,
|
127
|
+
secret_manager: Optional[SecretManager] = None,
|
128
|
+
enable_async_execution: bool = True,
|
129
|
+
enable_health_checks: bool = True,
|
130
|
+
enable_resource_management: bool = True,
|
131
|
+
**kwargs,
|
132
|
+
):
|
133
|
+
"""Initialize enterprise workflow server."""
|
134
|
+
super().__init__(
|
135
|
+
title=title,
|
136
|
+
description=description,
|
137
|
+
version=version,
|
138
|
+
max_workers=max_workers,
|
139
|
+
cors_origins=cors_origins,
|
140
|
+
enable_durability=enable_durability,
|
141
|
+
durability_opt_in=durability_opt_in,
|
142
|
+
**kwargs,
|
143
|
+
)
|
144
|
+
|
145
|
+
# Enterprise components
|
146
|
+
self.resource_registry = resource_registry or ResourceRegistry()
|
147
|
+
self.secret_manager = secret_manager or SecretManager()
|
148
|
+
self.enable_async_execution = enable_async_execution
|
149
|
+
self.enable_health_checks = enable_health_checks
|
150
|
+
self.enable_resource_management = enable_resource_management
|
151
|
+
|
152
|
+
# Resource tracking
|
153
|
+
self._workflow_resources: Dict[str, Set[str]] = {}
|
154
|
+
self._async_runtime: Optional[AsyncLocalRuntime] = None
|
155
|
+
self._resource_resolver: Optional[ResourceResolver] = None
|
156
|
+
|
157
|
+
# Initialize enterprise components
|
158
|
+
self._initialize_enterprise_features()
|
159
|
+
|
160
|
+
# Register enterprise endpoints
|
161
|
+
self._register_enterprise_endpoints()
|
162
|
+
|
163
|
+
def _initialize_enterprise_features(self):
|
164
|
+
"""Initialize enterprise feature components."""
|
165
|
+
if self.enable_async_execution:
|
166
|
+
self._async_runtime = AsyncLocalRuntime()
|
167
|
+
|
168
|
+
if self.enable_resource_management:
|
169
|
+
self._resource_resolver = ResourceResolver(
|
170
|
+
resource_registry=self.resource_registry,
|
171
|
+
secret_manager=self.secret_manager,
|
172
|
+
)
|
173
|
+
|
174
|
+
logger.info("Enterprise features initialized")
|
175
|
+
|
176
|
+
def _register_enterprise_endpoints(self):
|
177
|
+
"""Register enterprise-specific endpoints."""
|
178
|
+
|
179
|
+
@self.app.get("/enterprise/features")
|
180
|
+
async def get_enterprise_features():
|
181
|
+
"""Get enabled enterprise features."""
|
182
|
+
return {
|
183
|
+
"durability": self.enable_durability,
|
184
|
+
"async_execution": self.enable_async_execution,
|
185
|
+
"resource_management": self.enable_resource_management,
|
186
|
+
"health_checks": self.enable_health_checks,
|
187
|
+
"secret_management": True,
|
188
|
+
"features": [
|
189
|
+
"request_durability",
|
190
|
+
"resource_registry",
|
191
|
+
"secret_management",
|
192
|
+
"async_workflows",
|
193
|
+
"health_monitoring",
|
194
|
+
"resource_resolution",
|
195
|
+
"enterprise_security",
|
196
|
+
],
|
197
|
+
}
|
198
|
+
|
199
|
+
@self.app.get("/enterprise/resources")
|
200
|
+
async def list_resources():
|
201
|
+
"""List all registered resources."""
|
202
|
+
if not self.enable_resource_management:
|
203
|
+
return {"error": "Resource management disabled"}
|
204
|
+
|
205
|
+
return {
|
206
|
+
"resources": list(self.resource_registry.list_resources()),
|
207
|
+
"total": len(self.resource_registry.list_resources()),
|
208
|
+
}
|
209
|
+
|
210
|
+
@self.app.get("/enterprise/resources/{resource_name}")
|
211
|
+
async def get_resource_info(resource_name: str):
|
212
|
+
"""Get information about a specific resource."""
|
213
|
+
if not self.enable_resource_management:
|
214
|
+
return {"error": "Resource management disabled"}
|
215
|
+
|
216
|
+
try:
|
217
|
+
resource = await self.resource_registry.get_resource(resource_name)
|
218
|
+
health = await self.resource_registry.check_health(resource_name)
|
219
|
+
|
220
|
+
return {
|
221
|
+
"name": resource_name,
|
222
|
+
"type": type(resource).__name__,
|
223
|
+
"health": health,
|
224
|
+
"workflows": list(
|
225
|
+
self._workflow_resources.get(resource_name, set())
|
226
|
+
),
|
227
|
+
}
|
228
|
+
except (KeyError, Exception) as e:
|
229
|
+
from fastapi import HTTPException
|
230
|
+
|
231
|
+
raise HTTPException(status_code=404, detail="Resource not found")
|
232
|
+
|
233
|
+
@self.app.get("/enterprise/health")
|
234
|
+
async def enterprise_health_check():
|
235
|
+
"""Comprehensive enterprise health check."""
|
236
|
+
health_status = {
|
237
|
+
"status": "healthy",
|
238
|
+
"server_type": "enterprise_workflow_server",
|
239
|
+
"timestamp": datetime.now(UTC).isoformat(),
|
240
|
+
"components": {},
|
241
|
+
}
|
242
|
+
|
243
|
+
# Check base server health
|
244
|
+
base_health = await self._get_base_health()
|
245
|
+
health_status["components"]["base_server"] = base_health
|
246
|
+
|
247
|
+
# Check resource health
|
248
|
+
if self.enable_resource_management and self.enable_health_checks:
|
249
|
+
resource_health = await self._check_resource_health()
|
250
|
+
health_status["components"]["resources"] = resource_health
|
251
|
+
|
252
|
+
# Check async runtime health
|
253
|
+
if self.enable_async_execution and self._async_runtime:
|
254
|
+
runtime_health = await self._check_runtime_health()
|
255
|
+
health_status["components"]["async_runtime"] = runtime_health
|
256
|
+
|
257
|
+
# Check secret manager health
|
258
|
+
secret_health = await self._check_secret_manager_health()
|
259
|
+
health_status["components"]["secret_manager"] = secret_health
|
260
|
+
|
261
|
+
# Determine overall status
|
262
|
+
component_statuses = [
|
263
|
+
comp.get("status", "unhealthy")
|
264
|
+
for comp in health_status["components"].values()
|
265
|
+
]
|
266
|
+
|
267
|
+
if all(status == "healthy" for status in component_statuses):
|
268
|
+
health_status["status"] = "healthy"
|
269
|
+
elif any(status == "healthy" for status in component_statuses):
|
270
|
+
health_status["status"] = "degraded"
|
271
|
+
else:
|
272
|
+
health_status["status"] = "unhealthy"
|
273
|
+
|
274
|
+
return health_status
|
275
|
+
|
276
|
+
@self.app.post("/enterprise/workflows/{workflow_id}/execute_async")
|
277
|
+
async def execute_workflow_async(workflow_id: str, request: dict):
|
278
|
+
"""Execute workflow asynchronously with resource resolution."""
|
279
|
+
if not self.enable_async_execution:
|
280
|
+
from fastapi import HTTPException
|
281
|
+
|
282
|
+
raise HTTPException(status_code=503, detail="Async execution disabled")
|
283
|
+
|
284
|
+
if workflow_id not in self.workflows:
|
285
|
+
from fastapi import HTTPException
|
286
|
+
|
287
|
+
raise HTTPException(
|
288
|
+
status_code=404, detail=f"Workflow '{workflow_id}' not found"
|
289
|
+
)
|
290
|
+
|
291
|
+
try:
|
292
|
+
# Create enhanced request
|
293
|
+
workflow_request = WorkflowRequest(
|
294
|
+
inputs=request.get("inputs", {}),
|
295
|
+
resources=request.get("resources", {}),
|
296
|
+
context=request.get("context", {}),
|
297
|
+
)
|
298
|
+
|
299
|
+
# Resolve resources if enabled
|
300
|
+
resolved_inputs = workflow_request.inputs.copy()
|
301
|
+
if self.enable_resource_management and workflow_request.resources:
|
302
|
+
resolved_resources = (
|
303
|
+
await self._resource_resolver.resolve_resources(
|
304
|
+
workflow_request.resources
|
305
|
+
)
|
306
|
+
)
|
307
|
+
resolved_inputs.update(resolved_resources)
|
308
|
+
|
309
|
+
# Execute workflow asynchronously
|
310
|
+
workflow_obj = self.workflows[workflow_id].workflow
|
311
|
+
execution_context = ExecutionContext(
|
312
|
+
request_id=workflow_request.request_id,
|
313
|
+
workflow_id=workflow_id,
|
314
|
+
metadata=workflow_request.context,
|
315
|
+
)
|
316
|
+
|
317
|
+
result = await self._async_runtime.execute_async(
|
318
|
+
workflow_obj,
|
319
|
+
inputs=resolved_inputs,
|
320
|
+
context=execution_context,
|
321
|
+
)
|
322
|
+
|
323
|
+
# Create response
|
324
|
+
response = WorkflowResponse(
|
325
|
+
request_id=workflow_request.request_id,
|
326
|
+
workflow_id=workflow_id,
|
327
|
+
status="completed",
|
328
|
+
result=result,
|
329
|
+
started_at=workflow_request.timestamp,
|
330
|
+
completed_at=datetime.now(UTC),
|
331
|
+
)
|
332
|
+
|
333
|
+
response.execution_time = (
|
334
|
+
response.completed_at - response.started_at
|
335
|
+
).total_seconds()
|
336
|
+
|
337
|
+
return response.to_dict()
|
338
|
+
|
339
|
+
except Exception as e:
|
340
|
+
logger.error(f"Async workflow execution failed: {e}")
|
341
|
+
|
342
|
+
error_response = WorkflowResponse(
|
343
|
+
request_id=workflow_request.request_id,
|
344
|
+
workflow_id=workflow_id,
|
345
|
+
status="failed",
|
346
|
+
error=str(e),
|
347
|
+
started_at=workflow_request.timestamp,
|
348
|
+
completed_at=datetime.now(UTC),
|
349
|
+
)
|
350
|
+
|
351
|
+
return error_response.to_dict()
|
352
|
+
|
353
|
+
async def _get_base_health(self) -> Dict[str, Any]:
|
354
|
+
"""Get base server health status."""
|
355
|
+
return {
|
356
|
+
"status": "healthy",
|
357
|
+
"workflows": len(self.workflows),
|
358
|
+
"mcp_servers": len(self.mcp_servers),
|
359
|
+
"active_requests": (
|
360
|
+
len(self.active_requests) if hasattr(self, "active_requests") else 0
|
361
|
+
),
|
362
|
+
}
|
363
|
+
|
364
|
+
async def _check_resource_health(self) -> Dict[str, Any]:
|
365
|
+
"""Check health of all registered resources."""
|
366
|
+
resource_health = {"status": "healthy", "resources": {}}
|
367
|
+
|
368
|
+
try:
|
369
|
+
for resource_name in self.resource_registry.list_resources():
|
370
|
+
try:
|
371
|
+
health = await self.resource_registry.check_health(resource_name)
|
372
|
+
resource_health["resources"][resource_name] = health
|
373
|
+
except Exception as e:
|
374
|
+
resource_health["resources"][resource_name] = {
|
375
|
+
"status": "unhealthy",
|
376
|
+
"error": str(e),
|
377
|
+
}
|
378
|
+
resource_health["status"] = "degraded"
|
379
|
+
|
380
|
+
except Exception as e:
|
381
|
+
resource_health["status"] = "unhealthy"
|
382
|
+
resource_health["error"] = str(e)
|
383
|
+
|
384
|
+
return resource_health
|
385
|
+
|
386
|
+
async def _check_runtime_health(self) -> Dict[str, Any]:
|
387
|
+
"""Check async runtime health."""
|
388
|
+
try:
|
389
|
+
# Simple health check - try to access runtime
|
390
|
+
if self._async_runtime:
|
391
|
+
return {
|
392
|
+
"status": "healthy",
|
393
|
+
"type": type(self._async_runtime).__name__,
|
394
|
+
}
|
395
|
+
else:
|
396
|
+
return {
|
397
|
+
"status": "unhealthy",
|
398
|
+
"error": "Runtime not initialized",
|
399
|
+
}
|
400
|
+
except Exception as e:
|
401
|
+
return {
|
402
|
+
"status": "unhealthy",
|
403
|
+
"error": str(e),
|
404
|
+
}
|
405
|
+
|
406
|
+
async def _check_secret_manager_health(self) -> Dict[str, Any]:
|
407
|
+
"""Check secret manager health."""
|
408
|
+
try:
|
409
|
+
# Simple health check for secret manager
|
410
|
+
return {
|
411
|
+
"status": "healthy",
|
412
|
+
"type": type(self.secret_manager).__name__,
|
413
|
+
}
|
414
|
+
except Exception as e:
|
415
|
+
return {
|
416
|
+
"status": "unhealthy",
|
417
|
+
"error": str(e),
|
418
|
+
}
|
419
|
+
|
420
|
+
def register_resource(self, name: str, resource: Any):
|
421
|
+
"""Register a resource for use in workflows."""
|
422
|
+
if not self.enable_resource_management:
|
423
|
+
raise RuntimeError("Resource management disabled")
|
424
|
+
|
425
|
+
self.resource_registry.register_factory(name, lambda: resource)
|
426
|
+
logger.info(f"Registered enterprise resource: {name}")
|
427
|
+
|
428
|
+
def _register_root_endpoints(self):
|
429
|
+
"""Override to add enterprise info to root endpoint."""
|
430
|
+
# Don't call super() to avoid duplicate endpoint registration
|
431
|
+
|
432
|
+
# Register the enterprise root endpoint
|
433
|
+
@self.app.get("/")
|
434
|
+
async def root():
|
435
|
+
"""Server information with enterprise details."""
|
436
|
+
base_info = {
|
437
|
+
"name": self.app.title,
|
438
|
+
"version": self.app.version,
|
439
|
+
"workflows": list(self.workflows.keys()),
|
440
|
+
"mcp_servers": list(self.mcp_servers.keys()),
|
441
|
+
"type": "enterprise_workflow_server",
|
442
|
+
}
|
443
|
+
|
444
|
+
# Add enterprise info
|
445
|
+
base_info["enterprise"] = {
|
446
|
+
"durability": self.enable_durability,
|
447
|
+
"async_execution": self.enable_async_execution,
|
448
|
+
"resource_management": self.enable_resource_management,
|
449
|
+
"health_checks": self.enable_health_checks,
|
450
|
+
"features": [
|
451
|
+
"request_durability",
|
452
|
+
"resource_registry",
|
453
|
+
"secret_management",
|
454
|
+
"async_workflows",
|
455
|
+
"health_monitoring",
|
456
|
+
"resource_resolution",
|
457
|
+
"enterprise_security",
|
458
|
+
],
|
459
|
+
"resources": (
|
460
|
+
len(self.resource_registry.list_resources())
|
461
|
+
if self.enable_resource_management
|
462
|
+
else 0
|
463
|
+
),
|
464
|
+
}
|
465
|
+
|
466
|
+
return base_info
|
@@ -0,0 +1,183 @@
|
|
1
|
+
"""Gateway creation utilities with enterprise defaults.
|
2
|
+
|
3
|
+
This module provides the main create_gateway function that creates
|
4
|
+
production-ready servers with enterprise features enabled by default.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import logging
|
8
|
+
from typing import Any, List, Optional
|
9
|
+
|
10
|
+
from ..gateway.security import SecretManager
|
11
|
+
from ..resources.registry import ResourceRegistry
|
12
|
+
from .durable_workflow_server import DurableWorkflowServer
|
13
|
+
from .enterprise_workflow_server import EnterpriseWorkflowServer
|
14
|
+
from .workflow_server import WorkflowServer
|
15
|
+
|
16
|
+
logger = logging.getLogger(__name__)
|
17
|
+
|
18
|
+
|
19
|
+
def create_gateway(
|
20
|
+
title: str = "Kailash Enterprise Gateway",
|
21
|
+
description: str = "Production-ready workflow server with enterprise features",
|
22
|
+
version: str = "1.0.0",
|
23
|
+
# Server type selection
|
24
|
+
server_type: str = "enterprise", # "enterprise", "durable", "basic"
|
25
|
+
# Basic configuration
|
26
|
+
max_workers: int = 20,
|
27
|
+
cors_origins: Optional[List[str]] = None,
|
28
|
+
# Enterprise features (enabled by default)
|
29
|
+
enable_durability: bool = True,
|
30
|
+
enable_resource_management: bool = True,
|
31
|
+
enable_async_execution: bool = True,
|
32
|
+
enable_health_checks: bool = True,
|
33
|
+
# Enterprise components
|
34
|
+
resource_registry: Optional[ResourceRegistry] = None,
|
35
|
+
secret_manager: Optional[SecretManager] = None,
|
36
|
+
# Backward compatibility
|
37
|
+
**kwargs,
|
38
|
+
) -> EnterpriseWorkflowServer:
|
39
|
+
"""Create a production-ready workflow server.
|
40
|
+
|
41
|
+
By default, creates an EnterpriseWorkflowServer with all enterprise
|
42
|
+
features enabled. This is the recommended configuration for production
|
43
|
+
deployments.
|
44
|
+
|
45
|
+
Args:
|
46
|
+
title: Server title for documentation
|
47
|
+
description: Server description
|
48
|
+
version: Server version
|
49
|
+
server_type: Type of server to create ("enterprise", "durable", "basic")
|
50
|
+
max_workers: Maximum thread pool workers (default: 20 for enterprise)
|
51
|
+
cors_origins: Allowed CORS origins
|
52
|
+
enable_durability: Enable request durability features
|
53
|
+
enable_resource_management: Enable resource registry
|
54
|
+
enable_async_execution: Enable async workflow execution
|
55
|
+
enable_health_checks: Enable comprehensive health checks
|
56
|
+
resource_registry: Optional ResourceRegistry instance
|
57
|
+
secret_manager: Optional SecretManager instance
|
58
|
+
**kwargs: Additional arguments passed to server constructor
|
59
|
+
|
60
|
+
Returns:
|
61
|
+
Configured workflow server instance
|
62
|
+
|
63
|
+
Examples:
|
64
|
+
>>> # Enterprise server with all features (recommended)
|
65
|
+
>>> gateway = create_gateway()
|
66
|
+
|
67
|
+
>>> # Enterprise server with custom configuration
|
68
|
+
>>> gateway = create_gateway(
|
69
|
+
... title="My Application",
|
70
|
+
... cors_origins=["http://localhost:3000"],
|
71
|
+
... max_workers=50
|
72
|
+
... )
|
73
|
+
|
74
|
+
>>> # Durable server without full enterprise features
|
75
|
+
>>> gateway = create_gateway(
|
76
|
+
... server_type="durable",
|
77
|
+
... enable_resource_management=False
|
78
|
+
... )
|
79
|
+
|
80
|
+
>>> # Basic server for development
|
81
|
+
>>> gateway = create_gateway(
|
82
|
+
... server_type="basic",
|
83
|
+
... enable_durability=False
|
84
|
+
... )
|
85
|
+
"""
|
86
|
+
# Log server creation
|
87
|
+
logger.info(f"Creating {server_type} workflow server: {title}")
|
88
|
+
|
89
|
+
# Common configuration
|
90
|
+
common_config = {
|
91
|
+
"title": title,
|
92
|
+
"description": description,
|
93
|
+
"version": version,
|
94
|
+
"max_workers": max_workers,
|
95
|
+
"cors_origins": cors_origins,
|
96
|
+
**kwargs,
|
97
|
+
}
|
98
|
+
|
99
|
+
# Create server based on type
|
100
|
+
if server_type == "enterprise":
|
101
|
+
server = EnterpriseWorkflowServer(
|
102
|
+
enable_durability=enable_durability,
|
103
|
+
enable_resource_management=enable_resource_management,
|
104
|
+
enable_async_execution=enable_async_execution,
|
105
|
+
enable_health_checks=enable_health_checks,
|
106
|
+
resource_registry=resource_registry,
|
107
|
+
secret_manager=secret_manager,
|
108
|
+
**common_config,
|
109
|
+
)
|
110
|
+
|
111
|
+
elif server_type == "durable":
|
112
|
+
server = DurableWorkflowServer(
|
113
|
+
enable_durability=enable_durability, **common_config
|
114
|
+
)
|
115
|
+
|
116
|
+
elif server_type == "basic":
|
117
|
+
server = WorkflowServer(**common_config)
|
118
|
+
|
119
|
+
else:
|
120
|
+
raise ValueError(f"Unknown server type: {server_type}")
|
121
|
+
|
122
|
+
logger.info(
|
123
|
+
f"Created {type(server).__name__} with features: durability={enable_durability}, "
|
124
|
+
f"resources={enable_resource_management}, async={enable_async_execution}"
|
125
|
+
)
|
126
|
+
|
127
|
+
return server
|
128
|
+
|
129
|
+
|
130
|
+
def create_enterprise_gateway(**kwargs) -> EnterpriseWorkflowServer:
|
131
|
+
"""Create enterprise workflow server (explicit enterprise features).
|
132
|
+
|
133
|
+
This is an alias for create_gateway(server_type="enterprise") that makes
|
134
|
+
it explicit that enterprise features are desired.
|
135
|
+
"""
|
136
|
+
return create_gateway(server_type="enterprise", **kwargs)
|
137
|
+
|
138
|
+
|
139
|
+
def create_durable_gateway(**kwargs) -> DurableWorkflowServer:
|
140
|
+
"""Create durable workflow server without full enterprise features.
|
141
|
+
|
142
|
+
This creates a server with durability features but without resource
|
143
|
+
management and other enterprise capabilities.
|
144
|
+
"""
|
145
|
+
return create_gateway(server_type="durable", **kwargs)
|
146
|
+
|
147
|
+
|
148
|
+
def create_basic_gateway(**kwargs) -> WorkflowServer:
|
149
|
+
"""Create basic workflow server for development/testing.
|
150
|
+
|
151
|
+
This creates a minimal server without durability or enterprise features.
|
152
|
+
Suitable for development and testing scenarios.
|
153
|
+
"""
|
154
|
+
return create_gateway(server_type="basic", **kwargs)
|
155
|
+
|
156
|
+
|
157
|
+
# Backward compatibility - maintain the existing create_gateway signature
|
158
|
+
# but issue deprecation warning for old usage patterns
|
159
|
+
def create_gateway_legacy(agent_ui_middleware=None, auth_manager=None, **kwargs):
|
160
|
+
"""Legacy create_gateway function for backward compatibility.
|
161
|
+
|
162
|
+
This function maintains compatibility with the old APIGateway-based
|
163
|
+
create_gateway function. New code should use the new create_gateway()
|
164
|
+
function which creates EnterpriseWorkflowServer by default.
|
165
|
+
"""
|
166
|
+
import warnings
|
167
|
+
|
168
|
+
warnings.warn(
|
169
|
+
"Legacy create_gateway usage detected. Consider migrating to the new "
|
170
|
+
"create_gateway() function which creates EnterpriseWorkflowServer by default. "
|
171
|
+
"See migration guide for details.",
|
172
|
+
DeprecationWarning,
|
173
|
+
stacklevel=2,
|
174
|
+
)
|
175
|
+
|
176
|
+
# For now, delegate to the old APIGateway implementation
|
177
|
+
from ..middleware.communication.api_gateway import (
|
178
|
+
create_gateway as old_create_gateway,
|
179
|
+
)
|
180
|
+
|
181
|
+
return old_create_gateway(
|
182
|
+
agent_ui_middleware=agent_ui_middleware, auth_manager=auth_manager, **kwargs
|
183
|
+
)
|