kailash 0.5.0__py3-none-any.whl → 0.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/__init__.py +1 -1
- kailash/client/__init__.py +12 -0
- kailash/client/enhanced_client.py +306 -0
- kailash/core/actors/__init__.py +16 -0
- kailash/core/actors/connection_actor.py +566 -0
- kailash/core/actors/supervisor.py +364 -0
- kailash/edge/__init__.py +16 -0
- kailash/edge/compliance.py +834 -0
- kailash/edge/discovery.py +659 -0
- kailash/edge/location.py +582 -0
- kailash/gateway/__init__.py +33 -0
- kailash/gateway/api.py +289 -0
- kailash/gateway/enhanced_gateway.py +357 -0
- kailash/gateway/resource_resolver.py +217 -0
- kailash/gateway/security.py +227 -0
- kailash/middleware/auth/models.py +2 -2
- kailash/middleware/database/base_models.py +1 -7
- kailash/middleware/gateway/__init__.py +22 -0
- kailash/middleware/gateway/checkpoint_manager.py +398 -0
- kailash/middleware/gateway/deduplicator.py +382 -0
- kailash/middleware/gateway/durable_gateway.py +417 -0
- kailash/middleware/gateway/durable_request.py +498 -0
- kailash/middleware/gateway/event_store.py +459 -0
- kailash/nodes/admin/permission_check.py +817 -33
- kailash/nodes/admin/role_management.py +1242 -108
- kailash/nodes/admin/schema_manager.py +438 -0
- kailash/nodes/admin/user_management.py +1124 -1582
- kailash/nodes/code/__init__.py +8 -1
- kailash/nodes/code/async_python.py +1035 -0
- kailash/nodes/code/python.py +1 -0
- kailash/nodes/data/async_sql.py +9 -3
- kailash/nodes/data/sql.py +20 -11
- kailash/nodes/data/workflow_connection_pool.py +643 -0
- kailash/nodes/rag/__init__.py +1 -4
- kailash/resources/__init__.py +40 -0
- kailash/resources/factory.py +533 -0
- kailash/resources/health.py +319 -0
- kailash/resources/reference.py +288 -0
- kailash/resources/registry.py +392 -0
- kailash/runtime/async_local.py +711 -302
- kailash/testing/__init__.py +34 -0
- kailash/testing/async_test_case.py +353 -0
- kailash/testing/async_utils.py +345 -0
- kailash/testing/fixtures.py +458 -0
- kailash/testing/mock_registry.py +495 -0
- kailash/workflow/__init__.py +8 -0
- kailash/workflow/async_builder.py +621 -0
- kailash/workflow/async_patterns.py +766 -0
- kailash/workflow/cyclic_runner.py +107 -16
- kailash/workflow/graph.py +7 -2
- kailash/workflow/resilience.py +11 -1
- {kailash-0.5.0.dist-info → kailash-0.6.0.dist-info}/METADATA +7 -4
- {kailash-0.5.0.dist-info → kailash-0.6.0.dist-info}/RECORD +57 -22
- {kailash-0.5.0.dist-info → kailash-0.6.0.dist-info}/WHEEL +0 -0
- {kailash-0.5.0.dist-info → kailash-0.6.0.dist-info}/entry_points.txt +0 -0
- {kailash-0.5.0.dist-info → kailash-0.6.0.dist-info}/licenses/LICENSE +0 -0
- {kailash-0.5.0.dist-info → kailash-0.6.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,621 @@
|
|
1
|
+
"""
|
2
|
+
AsyncWorkflowBuilder - Async-first workflow development with enhanced ergonomics.
|
3
|
+
|
4
|
+
This module provides an async-optimized workflow builder with built-in patterns,
|
5
|
+
resource management integration, and type-safe construction helpers.
|
6
|
+
"""
|
7
|
+
|
8
|
+
import ast
|
9
|
+
import asyncio
|
10
|
+
import inspect
|
11
|
+
import textwrap
|
12
|
+
import uuid
|
13
|
+
from dataclasses import dataclass
|
14
|
+
from typing import Any, Callable, Dict, List, Optional, Set, TypeVar, Union
|
15
|
+
|
16
|
+
from ..nodes.base import Node
|
17
|
+
from ..resources.registry import ResourceFactory, ResourceRegistry
|
18
|
+
from .builder import WorkflowBuilder
|
19
|
+
from .graph import Workflow
|
20
|
+
|
21
|
+
T = TypeVar("T")
|
22
|
+
|
23
|
+
|
24
|
+
@dataclass
|
25
|
+
class RetryPolicy:
|
26
|
+
"""Retry policy configuration for async nodes."""
|
27
|
+
|
28
|
+
max_attempts: int = 3
|
29
|
+
initial_delay: float = 1.0
|
30
|
+
backoff_factor: float = 2.0
|
31
|
+
max_delay: float = 60.0
|
32
|
+
retry_exceptions: Optional[List[str]] = None
|
33
|
+
|
34
|
+
def to_dict(self) -> Dict[str, Any]:
|
35
|
+
return {
|
36
|
+
"max_attempts": self.max_attempts,
|
37
|
+
"initial_delay": self.initial_delay,
|
38
|
+
"backoff_factor": self.backoff_factor,
|
39
|
+
"max_delay": self.max_delay,
|
40
|
+
"retry_exceptions": self.retry_exceptions,
|
41
|
+
}
|
42
|
+
|
43
|
+
|
44
|
+
@dataclass
|
45
|
+
class ErrorHandler:
|
46
|
+
"""Error handler configuration for async nodes."""
|
47
|
+
|
48
|
+
handler_type: str # 'log', 'ignore', 'fallback', 'custom'
|
49
|
+
fallback_value: Optional[Any] = None
|
50
|
+
custom_handler: Optional[str] = None # Code string for custom handler
|
51
|
+
log_level: str = "error"
|
52
|
+
|
53
|
+
|
54
|
+
class AsyncWorkflowBuilder(WorkflowBuilder):
|
55
|
+
"""Async-optimized workflow builder with enhanced features."""
|
56
|
+
|
57
|
+
def __init__(
|
58
|
+
self,
|
59
|
+
name: str = None,
|
60
|
+
resource_registry: ResourceRegistry = None,
|
61
|
+
description: str = None,
|
62
|
+
):
|
63
|
+
super().__init__()
|
64
|
+
self.name = name or f"async_workflow_{uuid.uuid4().hex[:8]}"
|
65
|
+
self.description = description
|
66
|
+
self._resource_registry = resource_registry or ResourceRegistry()
|
67
|
+
self._resource_requirements: Set[str] = set()
|
68
|
+
self._error_handlers: Dict[str, ErrorHandler] = {}
|
69
|
+
self._retry_policies: Dict[str, RetryPolicy] = {}
|
70
|
+
self._node_metadata: Dict[str, Dict[str, Any]] = {}
|
71
|
+
self._workflow_metadata: Dict[str, Any] = {
|
72
|
+
"async_workflow": True,
|
73
|
+
"builder_version": "1.0",
|
74
|
+
"name": self.name,
|
75
|
+
"description": description,
|
76
|
+
}
|
77
|
+
|
78
|
+
def add_async_code(
|
79
|
+
self,
|
80
|
+
node_id: str,
|
81
|
+
code: str,
|
82
|
+
*,
|
83
|
+
timeout: int = 30,
|
84
|
+
max_concurrent_tasks: int = 10,
|
85
|
+
retry_policy: RetryPolicy = None,
|
86
|
+
error_handler: ErrorHandler = None,
|
87
|
+
required_resources: List[str] = None,
|
88
|
+
description: str = None,
|
89
|
+
**kwargs,
|
90
|
+
) -> "AsyncWorkflowBuilder":
|
91
|
+
"""Add async Python code node with enhanced configuration."""
|
92
|
+
# Clean up code indentation
|
93
|
+
code = textwrap.dedent(code).strip()
|
94
|
+
|
95
|
+
# Validate code
|
96
|
+
self._validate_async_code(code)
|
97
|
+
|
98
|
+
# Track resource requirements
|
99
|
+
if required_resources:
|
100
|
+
self._resource_requirements.update(required_resources)
|
101
|
+
self._node_metadata.setdefault(node_id, {})[
|
102
|
+
"required_resources"
|
103
|
+
] = required_resources
|
104
|
+
|
105
|
+
# Configure node
|
106
|
+
config = {
|
107
|
+
"code": code,
|
108
|
+
"timeout": timeout,
|
109
|
+
"max_concurrent_tasks": max_concurrent_tasks,
|
110
|
+
**kwargs,
|
111
|
+
}
|
112
|
+
|
113
|
+
# Add description if provided
|
114
|
+
if description:
|
115
|
+
config["description"] = description
|
116
|
+
self._node_metadata.setdefault(node_id, {})["description"] = description
|
117
|
+
|
118
|
+
# Add node using base builder
|
119
|
+
self.add_node("AsyncPythonCodeNode", node_id, config)
|
120
|
+
|
121
|
+
# Configure error handling
|
122
|
+
if retry_policy:
|
123
|
+
self._retry_policies[node_id] = retry_policy
|
124
|
+
self._node_metadata.setdefault(node_id, {})[
|
125
|
+
"retry_policy"
|
126
|
+
] = retry_policy.to_dict()
|
127
|
+
if error_handler:
|
128
|
+
self._error_handlers[node_id] = error_handler
|
129
|
+
self._node_metadata.setdefault(node_id, {})["error_handler"] = {
|
130
|
+
"type": error_handler.handler_type,
|
131
|
+
"fallback_value": error_handler.fallback_value,
|
132
|
+
}
|
133
|
+
|
134
|
+
return self # Fluent interface
|
135
|
+
|
136
|
+
def add_parallel_map(
|
137
|
+
self,
|
138
|
+
node_id: str,
|
139
|
+
map_function: str,
|
140
|
+
*,
|
141
|
+
input_field: str = "items",
|
142
|
+
output_field: str = "results",
|
143
|
+
max_workers: int = 10,
|
144
|
+
batch_size: int = None,
|
145
|
+
timeout_per_item: int = 5,
|
146
|
+
continue_on_error: bool = False,
|
147
|
+
description: str = None,
|
148
|
+
) -> "AsyncWorkflowBuilder":
|
149
|
+
"""Add node that processes items in parallel using asyncio.gather."""
|
150
|
+
# Validate function
|
151
|
+
self._validate_async_function(map_function)
|
152
|
+
|
153
|
+
code = f"""
|
154
|
+
import asyncio
|
155
|
+
from asyncio import Semaphore
|
156
|
+
import time
|
157
|
+
|
158
|
+
# Define processing function
|
159
|
+
{map_function}
|
160
|
+
|
161
|
+
# Validate function is defined
|
162
|
+
if 'process_item' not in locals():
|
163
|
+
raise ValueError("map_function must define 'process_item' function")
|
164
|
+
|
165
|
+
# Create semaphore for concurrency control
|
166
|
+
semaphore = Semaphore({max_workers})
|
167
|
+
|
168
|
+
async def process_with_timeout(item, index):
|
169
|
+
async with semaphore:
|
170
|
+
start_time = time.time()
|
171
|
+
try:
|
172
|
+
# Check if process_item is async
|
173
|
+
if asyncio.iscoroutinefunction(process_item):
|
174
|
+
result = await asyncio.wait_for(
|
175
|
+
process_item(item),
|
176
|
+
timeout={timeout_per_item}
|
177
|
+
)
|
178
|
+
else:
|
179
|
+
result = await asyncio.wait_for(
|
180
|
+
asyncio.create_task(asyncio.to_thread(process_item, item)),
|
181
|
+
timeout={timeout_per_item}
|
182
|
+
)
|
183
|
+
return {{
|
184
|
+
"index": index,
|
185
|
+
"success": True,
|
186
|
+
"result": result,
|
187
|
+
"duration": time.time() - start_time
|
188
|
+
}}
|
189
|
+
except asyncio.TimeoutError:
|
190
|
+
return {{
|
191
|
+
"index": index,
|
192
|
+
"success": False,
|
193
|
+
"error": "timeout",
|
194
|
+
"item": item,
|
195
|
+
"duration": time.time() - start_time
|
196
|
+
}}
|
197
|
+
except Exception as e:
|
198
|
+
return {{
|
199
|
+
"index": index,
|
200
|
+
"success": False,
|
201
|
+
"error": str(e),
|
202
|
+
"error_type": type(e).__name__,
|
203
|
+
"item": item,
|
204
|
+
"duration": time.time() - start_time
|
205
|
+
}}
|
206
|
+
|
207
|
+
# Get input items - check both direct field name and generate_output
|
208
|
+
input_items = None
|
209
|
+
if '{input_field}' in locals():
|
210
|
+
input_items = {input_field}
|
211
|
+
elif 'generate_output' in locals() and isinstance(generate_output, dict):
|
212
|
+
# When connected from another node, input might be in generate_output
|
213
|
+
input_items = generate_output.get('{input_field}')
|
214
|
+
elif 'generate_output' in locals() and '{input_field}' == 'items':
|
215
|
+
# Special case: if the output is directly the items list
|
216
|
+
input_items = generate_output
|
217
|
+
|
218
|
+
if input_items is None:
|
219
|
+
available_vars = list(locals().keys())
|
220
|
+
raise ValueError(f"Input field '{input_field}' not found. Available: {{available_vars}}")
|
221
|
+
|
222
|
+
if not isinstance(input_items, (list, tuple)):
|
223
|
+
raise ValueError(f"'{input_field}' must be a list or tuple, got {{type(input_items).__name__}}")
|
224
|
+
|
225
|
+
# Process items
|
226
|
+
total_start = time.time()
|
227
|
+
all_results = []
|
228
|
+
|
229
|
+
# Process in batches if specified
|
230
|
+
batch_size_val = {batch_size}
|
231
|
+
if batch_size_val:
|
232
|
+
for i in range(0, len(input_items), batch_size_val):
|
233
|
+
batch = input_items[i:i+batch_size_val]
|
234
|
+
batch_results = await asyncio.gather(
|
235
|
+
*[process_with_timeout(item, i+j) for j, item in enumerate(batch)],
|
236
|
+
return_exceptions=True
|
237
|
+
)
|
238
|
+
# Filter out exceptions and convert to error results
|
239
|
+
for j, result in enumerate(batch_results):
|
240
|
+
if isinstance(result, Exception):
|
241
|
+
all_results.append({{
|
242
|
+
"index": i+j,
|
243
|
+
"success": False,
|
244
|
+
"error": str(result),
|
245
|
+
"error_type": type(result).__name__,
|
246
|
+
"item": batch[j],
|
247
|
+
"duration": 0
|
248
|
+
}})
|
249
|
+
else:
|
250
|
+
all_results.append(result)
|
251
|
+
else:
|
252
|
+
batch_results = await asyncio.gather(
|
253
|
+
*[process_with_timeout(item, i) for i, item in enumerate(input_items)],
|
254
|
+
return_exceptions=True
|
255
|
+
)
|
256
|
+
# Filter out exceptions and convert to error results
|
257
|
+
for i, result in enumerate(batch_results):
|
258
|
+
if isinstance(result, Exception):
|
259
|
+
all_results.append({{
|
260
|
+
"index": i,
|
261
|
+
"success": False,
|
262
|
+
"error": str(result),
|
263
|
+
"error_type": type(result).__name__,
|
264
|
+
"item": input_items[i],
|
265
|
+
"duration": 0
|
266
|
+
}})
|
267
|
+
else:
|
268
|
+
all_results.append(result)
|
269
|
+
|
270
|
+
# Organize results
|
271
|
+
successful = [r for r in all_results if r.get("success", False)]
|
272
|
+
failed = [r for r in all_results if not r.get("success", False)]
|
273
|
+
|
274
|
+
# Extract processed items
|
275
|
+
processed_items = [r["result"] for r in successful]
|
276
|
+
|
277
|
+
# Continue on error flag
|
278
|
+
if not {continue_on_error} and failed:
|
279
|
+
error_summary = {{
|
280
|
+
"total_errors": len(failed),
|
281
|
+
"error_types": {{}}
|
282
|
+
}}
|
283
|
+
for f in failed:
|
284
|
+
error_type = f.get("error", "unknown")
|
285
|
+
error_summary["error_types"][error_type] = error_summary["error_types"].get(error_type, 0) + 1
|
286
|
+
|
287
|
+
raise RuntimeError(f"Processing failed for {{len(failed)}} items: {{error_summary}}")
|
288
|
+
|
289
|
+
result = {{
|
290
|
+
"{output_field}": processed_items,
|
291
|
+
"statistics": {{
|
292
|
+
"total": len(input_items),
|
293
|
+
"successful": len(successful),
|
294
|
+
"failed": len(failed),
|
295
|
+
"total_duration": time.time() - total_start,
|
296
|
+
"average_duration": sum(r["duration"] for r in all_results) / len(all_results) if all_results else 0
|
297
|
+
}},
|
298
|
+
"errors": failed if failed else []
|
299
|
+
}}
|
300
|
+
"""
|
301
|
+
|
302
|
+
return self.add_async_code(
|
303
|
+
node_id,
|
304
|
+
code,
|
305
|
+
max_concurrent_tasks=max_workers,
|
306
|
+
timeout=(
|
307
|
+
timeout_per_item * len(input_field)
|
308
|
+
if hasattr(input_field, "__len__")
|
309
|
+
else 300
|
310
|
+
),
|
311
|
+
description=description
|
312
|
+
or f"Parallel map processing with {max_workers} workers",
|
313
|
+
)
|
314
|
+
|
315
|
+
def add_resource_node(
|
316
|
+
self,
|
317
|
+
node_id: str,
|
318
|
+
resource_name: str,
|
319
|
+
operation: str,
|
320
|
+
params: Dict[str, Any] = None,
|
321
|
+
*,
|
322
|
+
output_field: str = "result",
|
323
|
+
description: str = None,
|
324
|
+
**kwargs,
|
325
|
+
) -> "AsyncWorkflowBuilder":
|
326
|
+
"""Add node that interacts with a registered resource."""
|
327
|
+
# Track resource requirement
|
328
|
+
self._resource_requirements.add(resource_name)
|
329
|
+
|
330
|
+
# Build parameter string for operation call
|
331
|
+
param_parts = []
|
332
|
+
if params:
|
333
|
+
for key, value in params.items():
|
334
|
+
if isinstance(value, str):
|
335
|
+
param_parts.append(f'{key}="{value}"')
|
336
|
+
else:
|
337
|
+
param_parts.append(f"{key}={repr(value)}")
|
338
|
+
|
339
|
+
param_str = ", ".join(param_parts)
|
340
|
+
|
341
|
+
code = f"""
|
342
|
+
# Access resource (function is provided by runtime)
|
343
|
+
if 'get_resource' in globals():
|
344
|
+
resource = await get_resource("{resource_name}")
|
345
|
+
else:
|
346
|
+
# Fallback for testing - resource should be in inputs
|
347
|
+
resource = locals().get("{resource_name}")
|
348
|
+
if resource is None:
|
349
|
+
raise RuntimeError(f"Resource '{resource_name}' not available")
|
350
|
+
|
351
|
+
# Validate resource has the operation
|
352
|
+
if not hasattr(resource, "{operation}"):
|
353
|
+
raise AttributeError(f"Resource '{resource_name}' does not have operation '{operation}'")
|
354
|
+
|
355
|
+
# Execute operation
|
356
|
+
operation_result = await resource.{operation}({param_str})
|
357
|
+
|
358
|
+
# Wrap result
|
359
|
+
result = {{
|
360
|
+
"{output_field}": operation_result,
|
361
|
+
"resource": "{resource_name}",
|
362
|
+
"operation": "{operation}"
|
363
|
+
}}
|
364
|
+
"""
|
365
|
+
|
366
|
+
return self.add_async_code(
|
367
|
+
node_id,
|
368
|
+
code,
|
369
|
+
required_resources=[resource_name],
|
370
|
+
description=description or f"Execute {operation} on {resource_name}",
|
371
|
+
**kwargs,
|
372
|
+
)
|
373
|
+
|
374
|
+
def add_scatter_gather(
|
375
|
+
self,
|
376
|
+
scatter_id: str,
|
377
|
+
process_id_prefix: str,
|
378
|
+
gather_id: str,
|
379
|
+
process_function: str,
|
380
|
+
*,
|
381
|
+
worker_count: int = 4,
|
382
|
+
scatter_field: str = "items",
|
383
|
+
gather_field: str = "results",
|
384
|
+
description: str = None,
|
385
|
+
) -> "AsyncWorkflowBuilder":
|
386
|
+
"""Add scatter-gather pattern for parallel processing."""
|
387
|
+
# Use parallel_map which is simpler and more reliable
|
388
|
+
return self.add_parallel_map(
|
389
|
+
scatter_id,
|
390
|
+
process_function,
|
391
|
+
input_field=scatter_field,
|
392
|
+
output_field=gather_field,
|
393
|
+
max_workers=worker_count,
|
394
|
+
description=description
|
395
|
+
or f"Scatter-gather processing with {worker_count} workers",
|
396
|
+
)
|
397
|
+
|
398
|
+
def _validate_async_code(self, code: str):
|
399
|
+
"""Validate async Python code."""
|
400
|
+
try:
|
401
|
+
# Try to compile the code - but allow module-level await
|
402
|
+
# by wrapping in an async function for validation
|
403
|
+
wrapped_code = "async def __validate_wrapper():\n"
|
404
|
+
for line in code.split("\n"):
|
405
|
+
wrapped_code += f" {line}\n"
|
406
|
+
|
407
|
+
# Try to compile the wrapped version first
|
408
|
+
try:
|
409
|
+
compile(wrapped_code, "<string>", "exec")
|
410
|
+
except SyntaxError:
|
411
|
+
# If wrapped version fails, try original (might be valid module-level code)
|
412
|
+
compile(code, "<string>", "exec")
|
413
|
+
|
414
|
+
except SyntaxError as e:
|
415
|
+
# Only reject if it's a true syntax error, not await-related
|
416
|
+
if "await" not in str(e):
|
417
|
+
raise ValueError(f"Invalid Python code: {e}")
|
418
|
+
# For await-related errors, we'll allow them since AsyncPythonCodeNode handles module-level await
|
419
|
+
|
420
|
+
def _validate_async_function(self, function_code: str):
|
421
|
+
"""Validate async function definition."""
|
422
|
+
# Check if it defines process_item function
|
423
|
+
if "def process_item" not in function_code:
|
424
|
+
raise ValueError(
|
425
|
+
"Function must define 'def process_item(item)' or 'async def process_item(item)'"
|
426
|
+
)
|
427
|
+
|
428
|
+
# Validate syntax
|
429
|
+
self._validate_async_code(function_code)
|
430
|
+
|
431
|
+
# Resource management methods
|
432
|
+
def require_resource(
|
433
|
+
self,
|
434
|
+
name: str,
|
435
|
+
factory: ResourceFactory,
|
436
|
+
health_check: Callable = None,
|
437
|
+
cleanup_handler: Callable = None,
|
438
|
+
description: str = None,
|
439
|
+
) -> "AsyncWorkflowBuilder":
|
440
|
+
"""Declare a required resource for this workflow."""
|
441
|
+
# Register with resource registry
|
442
|
+
self._resource_registry.register_factory(
|
443
|
+
name, factory, health_check=health_check, cleanup_handler=cleanup_handler
|
444
|
+
)
|
445
|
+
|
446
|
+
# Track requirement
|
447
|
+
self._resource_requirements.add(name)
|
448
|
+
|
449
|
+
# Add to workflow metadata
|
450
|
+
self._workflow_metadata.setdefault("resources", {})[name] = {
|
451
|
+
"factory_type": type(factory).__name__,
|
452
|
+
"description": description or f"Resource: {name}",
|
453
|
+
"has_health_check": health_check is not None,
|
454
|
+
"has_cleanup": cleanup_handler is not None,
|
455
|
+
}
|
456
|
+
|
457
|
+
return self
|
458
|
+
|
459
|
+
def with_database(
|
460
|
+
self,
|
461
|
+
name: str = "db",
|
462
|
+
host: str = "localhost",
|
463
|
+
port: int = 5432,
|
464
|
+
database: str = None,
|
465
|
+
user: str = None,
|
466
|
+
password: str = None,
|
467
|
+
min_size: int = 10,
|
468
|
+
max_size: int = 20,
|
469
|
+
**kwargs,
|
470
|
+
) -> "AsyncWorkflowBuilder":
|
471
|
+
"""Add database resource requirement."""
|
472
|
+
from ..resources.factory import DatabasePoolFactory
|
473
|
+
|
474
|
+
config = {
|
475
|
+
"host": host,
|
476
|
+
"port": port,
|
477
|
+
"min_size": min_size,
|
478
|
+
"max_size": max_size,
|
479
|
+
**kwargs,
|
480
|
+
}
|
481
|
+
|
482
|
+
# Only add non-None values
|
483
|
+
if database:
|
484
|
+
config["database"] = database
|
485
|
+
if user:
|
486
|
+
config["user"] = user
|
487
|
+
if password:
|
488
|
+
config["password"] = password
|
489
|
+
|
490
|
+
factory = DatabasePoolFactory(**config)
|
491
|
+
|
492
|
+
# Health check for PostgreSQL
|
493
|
+
async def pg_health_check(pool):
|
494
|
+
try:
|
495
|
+
async with pool.acquire() as conn:
|
496
|
+
await conn.fetchval("SELECT 1")
|
497
|
+
return True
|
498
|
+
except Exception:
|
499
|
+
return False
|
500
|
+
|
501
|
+
# Cleanup handler
|
502
|
+
async def pg_cleanup(pool):
|
503
|
+
await pool.close()
|
504
|
+
|
505
|
+
return self.require_resource(
|
506
|
+
name,
|
507
|
+
factory,
|
508
|
+
health_check=pg_health_check,
|
509
|
+
cleanup_handler=pg_cleanup,
|
510
|
+
description=f"PostgreSQL database connection pool to {host}:{port}/{database or 'default'}",
|
511
|
+
)
|
512
|
+
|
513
|
+
def with_http_client(
|
514
|
+
self,
|
515
|
+
name: str = "http",
|
516
|
+
base_url: str = None,
|
517
|
+
headers: Dict[str, str] = None,
|
518
|
+
timeout: int = 30,
|
519
|
+
**kwargs,
|
520
|
+
) -> "AsyncWorkflowBuilder":
|
521
|
+
"""Add HTTP client resource requirement."""
|
522
|
+
from ..resources.factory import HttpClientFactory
|
523
|
+
|
524
|
+
config = {"timeout": timeout, **kwargs}
|
525
|
+
|
526
|
+
if headers:
|
527
|
+
config["headers"] = headers
|
528
|
+
|
529
|
+
factory = HttpClientFactory(base_url=base_url, **config)
|
530
|
+
|
531
|
+
# Cleanup handler for aiohttp
|
532
|
+
async def http_cleanup(session):
|
533
|
+
await session.close()
|
534
|
+
|
535
|
+
return self.require_resource(
|
536
|
+
name,
|
537
|
+
factory,
|
538
|
+
cleanup_handler=http_cleanup,
|
539
|
+
description="HTTP client session"
|
540
|
+
+ (f" for {base_url}" if base_url else ""),
|
541
|
+
)
|
542
|
+
|
543
|
+
def with_cache(
|
544
|
+
self,
|
545
|
+
name: str = "cache",
|
546
|
+
backend: str = "redis",
|
547
|
+
host: str = "localhost",
|
548
|
+
port: int = 6379,
|
549
|
+
**kwargs,
|
550
|
+
) -> "AsyncWorkflowBuilder":
|
551
|
+
"""Add cache resource requirement."""
|
552
|
+
if backend == "redis":
|
553
|
+
from ..resources.factory import CacheFactory
|
554
|
+
|
555
|
+
factory = CacheFactory(backend=backend, host=host, port=port, **kwargs)
|
556
|
+
|
557
|
+
# Health check for Redis
|
558
|
+
async def redis_health_check(cache):
|
559
|
+
try:
|
560
|
+
await cache.ping() if hasattr(cache, "ping") else True
|
561
|
+
return True
|
562
|
+
except Exception:
|
563
|
+
return False
|
564
|
+
|
565
|
+
# Cleanup handler
|
566
|
+
async def redis_cleanup(cache):
|
567
|
+
if hasattr(cache, "close"):
|
568
|
+
cache.close()
|
569
|
+
if hasattr(cache, "wait_closed"):
|
570
|
+
await cache.wait_closed()
|
571
|
+
|
572
|
+
return self.require_resource(
|
573
|
+
name,
|
574
|
+
factory,
|
575
|
+
health_check=redis_health_check,
|
576
|
+
cleanup_handler=redis_cleanup,
|
577
|
+
description=f"Redis cache connection to {host}:{port}",
|
578
|
+
)
|
579
|
+
else:
|
580
|
+
raise ValueError(f"Unsupported cache backend: {backend}")
|
581
|
+
|
582
|
+
def build(self) -> Workflow:
|
583
|
+
"""Build the async workflow with enhanced metadata."""
|
584
|
+
# Add resource requirements to workflow metadata
|
585
|
+
self._workflow_metadata["required_resources"] = list(
|
586
|
+
self._resource_requirements
|
587
|
+
)
|
588
|
+
self._workflow_metadata["node_metadata"] = self._node_metadata
|
589
|
+
|
590
|
+
# Build base workflow
|
591
|
+
workflow = super().build()
|
592
|
+
|
593
|
+
# Enhance workflow with async metadata
|
594
|
+
if hasattr(workflow, "metadata"):
|
595
|
+
workflow.metadata.update(self._workflow_metadata)
|
596
|
+
else:
|
597
|
+
workflow.metadata = self._workflow_metadata
|
598
|
+
|
599
|
+
# Attach resource registry to workflow
|
600
|
+
workflow.resource_registry = self._resource_registry
|
601
|
+
|
602
|
+
return workflow
|
603
|
+
|
604
|
+
def get_resource_registry(self) -> ResourceRegistry:
|
605
|
+
"""Get the resource registry for this workflow."""
|
606
|
+
return self._resource_registry
|
607
|
+
|
608
|
+
def list_required_resources(self) -> List[str]:
|
609
|
+
"""List all required resources for this workflow."""
|
610
|
+
return list(self._resource_requirements)
|
611
|
+
|
612
|
+
def get_node_metadata(self, node_id: str) -> Dict[str, Any]:
|
613
|
+
"""Get metadata for a specific node."""
|
614
|
+
return self._node_metadata.get(node_id, {})
|
615
|
+
|
616
|
+
def add_connection(
|
617
|
+
self, from_node: str, from_output: str, to_node: str, to_input: str
|
618
|
+
) -> "AsyncWorkflowBuilder":
|
619
|
+
"""Connect two nodes in the workflow (fluent interface version)."""
|
620
|
+
super().add_connection(from_node, from_output, to_node, to_input)
|
621
|
+
return self
|