kailash 0.5.0__py3-none-any.whl → 0.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. kailash/__init__.py +1 -1
  2. kailash/access_control/__init__.py +1 -1
  3. kailash/client/__init__.py +12 -0
  4. kailash/client/enhanced_client.py +306 -0
  5. kailash/core/actors/__init__.py +16 -0
  6. kailash/core/actors/adaptive_pool_controller.py +630 -0
  7. kailash/core/actors/connection_actor.py +566 -0
  8. kailash/core/actors/supervisor.py +364 -0
  9. kailash/core/ml/__init__.py +1 -0
  10. kailash/core/ml/query_patterns.py +544 -0
  11. kailash/core/monitoring/__init__.py +19 -0
  12. kailash/core/monitoring/connection_metrics.py +488 -0
  13. kailash/core/optimization/__init__.py +1 -0
  14. kailash/core/resilience/__init__.py +17 -0
  15. kailash/core/resilience/circuit_breaker.py +382 -0
  16. kailash/edge/__init__.py +16 -0
  17. kailash/edge/compliance.py +834 -0
  18. kailash/edge/discovery.py +659 -0
  19. kailash/edge/location.py +582 -0
  20. kailash/gateway/__init__.py +33 -0
  21. kailash/gateway/api.py +289 -0
  22. kailash/gateway/enhanced_gateway.py +357 -0
  23. kailash/gateway/resource_resolver.py +217 -0
  24. kailash/gateway/security.py +227 -0
  25. kailash/middleware/auth/access_control.py +6 -6
  26. kailash/middleware/auth/models.py +2 -2
  27. kailash/middleware/communication/ai_chat.py +7 -7
  28. kailash/middleware/communication/api_gateway.py +5 -15
  29. kailash/middleware/database/base_models.py +1 -7
  30. kailash/middleware/gateway/__init__.py +22 -0
  31. kailash/middleware/gateway/checkpoint_manager.py +398 -0
  32. kailash/middleware/gateway/deduplicator.py +382 -0
  33. kailash/middleware/gateway/durable_gateway.py +417 -0
  34. kailash/middleware/gateway/durable_request.py +498 -0
  35. kailash/middleware/gateway/event_store.py +499 -0
  36. kailash/middleware/mcp/enhanced_server.py +2 -2
  37. kailash/nodes/admin/permission_check.py +817 -33
  38. kailash/nodes/admin/role_management.py +1242 -108
  39. kailash/nodes/admin/schema_manager.py +438 -0
  40. kailash/nodes/admin/user_management.py +1124 -1582
  41. kailash/nodes/code/__init__.py +8 -1
  42. kailash/nodes/code/async_python.py +1035 -0
  43. kailash/nodes/code/python.py +1 -0
  44. kailash/nodes/data/async_sql.py +9 -3
  45. kailash/nodes/data/query_pipeline.py +641 -0
  46. kailash/nodes/data/query_router.py +895 -0
  47. kailash/nodes/data/sql.py +20 -11
  48. kailash/nodes/data/workflow_connection_pool.py +1071 -0
  49. kailash/nodes/monitoring/__init__.py +3 -5
  50. kailash/nodes/monitoring/connection_dashboard.py +822 -0
  51. kailash/nodes/rag/__init__.py +2 -7
  52. kailash/resources/__init__.py +40 -0
  53. kailash/resources/factory.py +533 -0
  54. kailash/resources/health.py +319 -0
  55. kailash/resources/reference.py +288 -0
  56. kailash/resources/registry.py +392 -0
  57. kailash/runtime/async_local.py +711 -302
  58. kailash/testing/__init__.py +34 -0
  59. kailash/testing/async_test_case.py +353 -0
  60. kailash/testing/async_utils.py +345 -0
  61. kailash/testing/fixtures.py +458 -0
  62. kailash/testing/mock_registry.py +495 -0
  63. kailash/workflow/__init__.py +8 -0
  64. kailash/workflow/async_builder.py +621 -0
  65. kailash/workflow/async_patterns.py +766 -0
  66. kailash/workflow/cyclic_runner.py +107 -16
  67. kailash/workflow/graph.py +7 -2
  68. kailash/workflow/resilience.py +11 -1
  69. {kailash-0.5.0.dist-info → kailash-0.6.1.dist-info}/METADATA +19 -4
  70. {kailash-0.5.0.dist-info → kailash-0.6.1.dist-info}/RECORD +74 -28
  71. {kailash-0.5.0.dist-info → kailash-0.6.1.dist-info}/WHEEL +0 -0
  72. {kailash-0.5.0.dist-info → kailash-0.6.1.dist-info}/entry_points.txt +0 -0
  73. {kailash-0.5.0.dist-info → kailash-0.6.1.dist-info}/licenses/LICENSE +0 -0
  74. {kailash-0.5.0.dist-info → kailash-0.6.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,766 @@
1
+ """
2
+ AsyncPatterns - Common async workflow patterns for the AsyncWorkflowBuilder.
3
+
4
+ This module provides reusable patterns for common async scenarios like
5
+ retry with backoff, rate limiting, timeout with fallback, and batch processing.
6
+ """
7
+
8
+ from typing import Any, Callable, Dict, List, Optional, Union
9
+
10
+ from .async_builder import AsyncWorkflowBuilder
11
+
12
+
13
+ class AsyncPatterns:
14
+ """Common async workflow patterns."""
15
+
16
+ @staticmethod
17
+ def retry_with_backoff(
18
+ builder: AsyncWorkflowBuilder,
19
+ node_id: str,
20
+ operation_code: str,
21
+ *,
22
+ max_retries: int = 3,
23
+ initial_backoff: float = 1.0,
24
+ backoff_factor: float = 2.0,
25
+ max_backoff: float = 60.0,
26
+ retry_on: List[str] = None,
27
+ description: str = None,
28
+ ) -> AsyncWorkflowBuilder:
29
+ """Add node with exponential backoff retry logic."""
30
+ # Build retry exception list
31
+ if retry_on:
32
+ exception_checks = " or ".join(f"isinstance(e, {exc})" for exc in retry_on)
33
+ else:
34
+ exception_checks = "True" # Retry on any exception
35
+
36
+ # Indent the operation code properly
37
+ indented_operation = "\n".join(
38
+ f" {line}" if line.strip() else ""
39
+ for line in operation_code.strip().split("\n")
40
+ )
41
+
42
+ code = f"""
43
+ import asyncio
44
+ import random
45
+ import time
46
+
47
+ max_retries = {max_retries}
48
+ initial_backoff = {initial_backoff}
49
+ backoff_factor = {backoff_factor}
50
+ max_backoff = {max_backoff}
51
+
52
+ result = None
53
+ last_error = None
54
+ attempts = []
55
+
56
+ for attempt in range(max_retries):
57
+ attempt_start = time.time()
58
+ try:
59
+ # Attempt operation
60
+ {indented_operation}
61
+
62
+ # Record successful attempt
63
+ attempts.append({{
64
+ "attempt": attempt + 1,
65
+ "success": True,
66
+ "duration": time.time() - attempt_start
67
+ }})
68
+ break # Success, exit retry loop
69
+
70
+ except Exception as e:
71
+ last_error = e
72
+
73
+ # Check if we should retry this exception
74
+ should_retry = {exception_checks}
75
+
76
+ if not should_retry:
77
+ # Don't retry this exception type
78
+ raise
79
+
80
+ # Record failed attempt
81
+ attempts.append({{
82
+ "attempt": attempt + 1,
83
+ "success": False,
84
+ "error": str(e),
85
+ "error_type": type(e).__name__,
86
+ "duration": time.time() - attempt_start
87
+ }})
88
+
89
+ if attempt == max_retries - 1:
90
+ # Final attempt failed
91
+ result = {{
92
+ "success": False,
93
+ "error": str(last_error),
94
+ "error_type": type(last_error).__name__,
95
+ "attempts": attempts,
96
+ "total_attempts": len(attempts)
97
+ }}
98
+ raise RuntimeError(f"Operation failed after {{max_retries}} attempts: {{last_error}}")
99
+ else:
100
+ # Calculate backoff with jitter
101
+ backoff = min(
102
+ initial_backoff * (backoff_factor ** attempt) + random.uniform(0, 1),
103
+ max_backoff
104
+ )
105
+ await asyncio.sleep(backoff)
106
+
107
+ # If we get here, operation succeeded
108
+ # Merge retry metadata with user result
109
+ if result is None:
110
+ result = {{}}
111
+ elif not isinstance(result, dict):
112
+ result = {{"value": result}}
113
+
114
+ # Always add retry metadata
115
+ result["success"] = True
116
+ result["attempts"] = attempts
117
+ result["total_attempts"] = len(attempts)
118
+ """
119
+
120
+ return builder.add_async_code(
121
+ node_id,
122
+ code,
123
+ description=description
124
+ or f"Retry operation with exponential backoff (max {max_retries} attempts)",
125
+ )
126
+
127
+ @staticmethod
128
+ def rate_limited(
129
+ builder: AsyncWorkflowBuilder,
130
+ node_id: str,
131
+ operation_code: str,
132
+ *,
133
+ requests_per_second: float = 10,
134
+ burst_size: int = None,
135
+ description: str = None,
136
+ ) -> AsyncWorkflowBuilder:
137
+ """Add node with rate limiting using token bucket algorithm."""
138
+ if burst_size is None:
139
+ burst_size = int(requests_per_second * 2)
140
+
141
+ # Indent the operation code properly
142
+ indented_operation = "\n".join(
143
+ f"{line}" if line.strip() else ""
144
+ for line in operation_code.strip().split("\n")
145
+ )
146
+
147
+ code = f"""
148
+ import asyncio
149
+ import time
150
+ from collections import deque
151
+
152
+ # Rate limiting configuration
153
+ requests_per_second = {requests_per_second}
154
+ burst_size = {burst_size}
155
+ min_interval = 1.0 / requests_per_second
156
+
157
+ # Initialize rate limiter state (global for persistence across calls)
158
+ if '_rate_limiter_state' not in globals():
159
+ globals()['_rate_limiter_state'] = {{
160
+ 'tokens': burst_size,
161
+ 'last_update': time.time(),
162
+ 'request_times': deque(maxlen=100)
163
+ }}
164
+
165
+ state = globals()['_rate_limiter_state']
166
+
167
+ # Update tokens based on time passed
168
+ current_time = time.time()
169
+ time_passed = current_time - state['last_update']
170
+ state['tokens'] = min(burst_size, state['tokens'] + time_passed * requests_per_second)
171
+ state['last_update'] = current_time
172
+
173
+ # Wait if no tokens available
174
+ while state['tokens'] < 1:
175
+ wait_time = (1 - state['tokens']) / requests_per_second
176
+ await asyncio.sleep(wait_time)
177
+
178
+ # Update tokens again
179
+ current_time = time.time()
180
+ time_passed = current_time - state['last_update']
181
+ state['tokens'] = min(burst_size, state['tokens'] + time_passed * requests_per_second)
182
+ state['last_update'] = current_time
183
+
184
+ # Consume a token
185
+ state['tokens'] -= 1
186
+ state['request_times'].append(current_time)
187
+
188
+ # Execute operation
189
+ operation_start = time.time()
190
+ {indented_operation}
191
+ operation_duration = time.time() - operation_start
192
+
193
+ # Add rate limiting info to result
194
+ if isinstance(result, dict):
195
+ result['_rate_limit_info'] = {{
196
+ 'tokens_remaining': state['tokens'],
197
+ 'requests_in_window': len([t for t in state['request_times'] if current_time - t < 1]),
198
+ 'operation_duration': operation_duration
199
+ }}
200
+ """
201
+
202
+ return builder.add_async_code(
203
+ node_id,
204
+ code,
205
+ description=description
206
+ or f"Rate-limited operation ({requests_per_second} req/s)",
207
+ )
208
+
209
+ @staticmethod
210
+ def timeout_with_fallback(
211
+ builder: AsyncWorkflowBuilder,
212
+ primary_node_id: str,
213
+ fallback_node_id: str,
214
+ primary_code: str,
215
+ fallback_code: str,
216
+ *,
217
+ timeout_seconds: float = 5.0,
218
+ description: str = None,
219
+ ) -> AsyncWorkflowBuilder:
220
+ """Add primary operation with timeout and fallback."""
221
+ # Indent the primary code properly
222
+ indented_primary = "\n".join(
223
+ f" {line}" if line.strip() else ""
224
+ for line in primary_code.strip().split("\n")
225
+ )
226
+
227
+ # Primary node with timeout
228
+ primary_with_timeout = f"""
229
+ import asyncio
230
+
231
+ try:
232
+ # Run primary operation with timeout
233
+ async def primary_operation():
234
+ {indented_primary}
235
+ return result
236
+
237
+ result = await asyncio.wait_for(primary_operation(), timeout={timeout_seconds})
238
+ if isinstance(result, dict):
239
+ result['_source'] = 'primary'
240
+ else:
241
+ result = {{"value": result, "_source": "primary"}}
242
+
243
+ except asyncio.TimeoutError:
244
+ # Primary timed out, will use fallback
245
+ result = {{
246
+ "_timeout": True,
247
+ "_source": "timeout",
248
+ "_timeout_seconds": {timeout_seconds}
249
+ }}
250
+ except Exception as e:
251
+ # Primary failed with error
252
+ result = {{
253
+ "_error": True,
254
+ "_source": "error",
255
+ "_error_message": str(e),
256
+ "_error_type": type(e).__name__
257
+ }}
258
+ """
259
+
260
+ builder.add_async_code(
261
+ primary_node_id,
262
+ primary_with_timeout,
263
+ timeout=int(timeout_seconds) + 5, # Add buffer to node timeout
264
+ description=f"Primary operation with {timeout_seconds}s timeout",
265
+ )
266
+
267
+ # Indent the fallback code properly
268
+ indented_fallback = "\n".join(
269
+ f" {line}" if line.strip() else ""
270
+ for line in fallback_code.strip().split("\n")
271
+ )
272
+
273
+ # Fallback node
274
+ fallback_with_check = f"""
275
+ # Check if we need fallback
276
+ primary_failed = False
277
+ if isinstance(primary_result, dict):
278
+ primary_failed = primary_result.get("_timeout", False) or primary_result.get("_error", False)
279
+
280
+ if primary_failed:
281
+ # Execute fallback
282
+ {indented_fallback}
283
+ if isinstance(result, dict):
284
+ result['_source'] = 'fallback'
285
+ result['_primary_timeout'] = primary_result.get("_timeout", False)
286
+ result['_primary_error'] = primary_result.get("_error", False)
287
+ else:
288
+ result = {{
289
+ "value": result,
290
+ "_source": "fallback",
291
+ "_primary_timeout": primary_result.get("_timeout", False),
292
+ "_primary_error": primary_result.get("_error", False)
293
+ }}
294
+ else:
295
+ # Primary succeeded, pass through
296
+ result = primary_result
297
+ """
298
+
299
+ builder.add_async_code(
300
+ fallback_node_id, fallback_with_check, description="Fallback operation"
301
+ )
302
+
303
+ # Connect primary to fallback
304
+ builder.add_connection(
305
+ primary_node_id, "result", fallback_node_id, "primary_result"
306
+ )
307
+
308
+ return builder
309
+
310
+ @staticmethod
311
+ def batch_processor(
312
+ builder: AsyncWorkflowBuilder,
313
+ node_id: str,
314
+ process_batch_code: str,
315
+ *,
316
+ batch_size: int = 100,
317
+ flush_interval: float = 5.0,
318
+ description: str = None,
319
+ ) -> AsyncWorkflowBuilder:
320
+ """Add batch processing node with time-based flushing."""
321
+ # Indent the process batch code properly
322
+ indented_batch_code = "\n".join(
323
+ f" {line}" if line.strip() else ""
324
+ for line in process_batch_code.strip().split("\n")
325
+ )
326
+
327
+ code = f"""
328
+ import asyncio
329
+ import time
330
+ from typing import List
331
+
332
+ # Batch configuration
333
+ batch_size = {batch_size}
334
+ flush_interval = {flush_interval}
335
+
336
+ # Initialize batch state (global for persistence)
337
+ if '_batch_state' not in globals():
338
+ globals()['_batch_state'] = {{
339
+ 'items': [],
340
+ 'last_flush': time.time()
341
+ }}
342
+
343
+ batch_state = globals()['_batch_state']
344
+
345
+ # Add items to batch
346
+ new_items = items if 'items' in locals() else []
347
+ if isinstance(new_items, (list, tuple)):
348
+ batch_state['items'].extend(new_items)
349
+ elif new_items is not None:
350
+ batch_state['items'].append(new_items)
351
+
352
+ # Check if we should process batch
353
+ should_process = False
354
+ reason = None
355
+
356
+ if len(batch_state['items']) >= batch_size:
357
+ should_process = True
358
+ reason = "batch_full"
359
+ elif time.time() - batch_state['last_flush'] >= flush_interval and batch_state['items']:
360
+ should_process = True
361
+ reason = "time_based"
362
+ elif locals().get('force_flush', False) and batch_state['items']: # Allow forced flush
363
+ should_process = True
364
+ reason = "forced"
365
+
366
+ results = []
367
+ if should_process:
368
+ # Process batch
369
+ batch_to_process = batch_state['items'][:batch_size]
370
+ remaining_items = batch_state['items'][batch_size:]
371
+
372
+ # User-defined batch processing
373
+ items = batch_to_process # Make available to process code
374
+ {indented_batch_code}
375
+
376
+ # Update state
377
+ batch_state['items'] = remaining_items
378
+ batch_state['last_flush'] = time.time()
379
+
380
+ # Results should be set by process_batch_code
381
+ if 'batch_results' in locals():
382
+ results = batch_results
383
+
384
+ result = {{
385
+ "processed_count": len(results),
386
+ "results": results,
387
+ "remaining_in_batch": len(batch_state['items']),
388
+ "flush_reason": reason,
389
+ "next_flush_in": max(0, flush_interval - (time.time() - batch_state['last_flush']))
390
+ }}
391
+ """
392
+
393
+ return builder.add_async_code(
394
+ node_id,
395
+ code,
396
+ description=description
397
+ or f"Batch processor (size={batch_size}, interval={flush_interval}s)",
398
+ )
399
+
400
+ @staticmethod
401
+ def circuit_breaker(
402
+ builder: AsyncWorkflowBuilder,
403
+ node_id: str,
404
+ operation_code: str,
405
+ *,
406
+ failure_threshold: int = 5,
407
+ reset_timeout: float = 60.0,
408
+ description: str = None,
409
+ ) -> AsyncWorkflowBuilder:
410
+ """Add circuit breaker pattern for fault tolerance."""
411
+ # Indent the operation code properly
412
+ indented_operation = "\n".join(
413
+ f" {line}" if line.strip() else ""
414
+ for line in operation_code.strip().split("\n")
415
+ )
416
+
417
+ code = f"""
418
+ import time
419
+
420
+ # Use string constants instead of Enum to avoid __build_class__ issues
421
+ CIRCUIT_CLOSED = "closed"
422
+ CIRCUIT_OPEN = "open"
423
+ CIRCUIT_HALF_OPEN = "half_open"
424
+
425
+ # Initialize circuit breaker state
426
+ if '_circuit_breaker_state' not in globals():
427
+ globals()['_circuit_breaker_state'] = {{
428
+ 'state': CIRCUIT_CLOSED,
429
+ 'failure_count': 0,
430
+ 'last_failure_time': None,
431
+ 'success_count': 0
432
+ }}
433
+
434
+ cb_state = globals()['_circuit_breaker_state']
435
+ failure_threshold = {failure_threshold}
436
+ reset_timeout = {reset_timeout}
437
+
438
+ # Check if we should attempt reset
439
+ current_time = time.time()
440
+ if (cb_state['state'] == CIRCUIT_OPEN and
441
+ cb_state['last_failure_time'] and
442
+ current_time - cb_state['last_failure_time'] >= reset_timeout):
443
+ cb_state['state'] = CIRCUIT_HALF_OPEN
444
+ cb_state['success_count'] = 0
445
+
446
+ # Handle circuit breaker states
447
+ if cb_state['state'] == CIRCUIT_OPEN:
448
+ result = {{
449
+ "success": False,
450
+ "error": "Circuit breaker is OPEN",
451
+ "circuit_state": cb_state['state'],
452
+ "failure_count": cb_state['failure_count'],
453
+ "time_until_retry": reset_timeout - (current_time - cb_state['last_failure_time']) if cb_state['last_failure_time'] else 0
454
+ }}
455
+ else:
456
+ try:
457
+ # Execute operation
458
+ operation_start = time.time()
459
+ {indented_operation}
460
+ operation_duration = time.time() - operation_start
461
+
462
+ # Operation succeeded
463
+ cb_state['failure_count'] = 0
464
+ if cb_state['state'] == CIRCUIT_HALF_OPEN:
465
+ cb_state['success_count'] += 1
466
+ if cb_state['success_count'] >= 3: # Require multiple successes to fully close
467
+ cb_state['state'] = CIRCUIT_CLOSED
468
+
469
+ # Add circuit breaker info to result
470
+ if isinstance(result, dict):
471
+ result['_circuit_breaker_info'] = {{
472
+ 'state': cb_state['state'],
473
+ 'failure_count': cb_state['failure_count'],
474
+ 'operation_duration': operation_duration
475
+ }}
476
+
477
+ except Exception as e:
478
+ # Operation failed
479
+ cb_state['failure_count'] += 1
480
+ cb_state['last_failure_time'] = current_time
481
+
482
+ if cb_state['failure_count'] >= failure_threshold:
483
+ cb_state['state'] = CIRCUIT_OPEN
484
+
485
+ result = {{
486
+ "success": False,
487
+ "error": str(e),
488
+ "error_type": type(e).__name__,
489
+ "circuit_state": cb_state['state'],
490
+ "failure_count": cb_state['failure_count']
491
+ }}
492
+
493
+ # Re-raise the exception unless circuit is now open
494
+ if cb_state['state'] != CIRCUIT_OPEN:
495
+ raise
496
+ """
497
+
498
+ return builder.add_async_code(
499
+ node_id,
500
+ code,
501
+ description=description
502
+ or f"Circuit breaker protected operation (threshold={failure_threshold})",
503
+ )
504
+
505
+ @staticmethod
506
+ def parallel_fetch(
507
+ builder: AsyncWorkflowBuilder,
508
+ node_id: str,
509
+ fetch_operations: Dict[str, str],
510
+ *,
511
+ timeout_per_operation: float = 10.0,
512
+ continue_on_error: bool = True,
513
+ description: str = None,
514
+ ) -> AsyncWorkflowBuilder:
515
+ """Add node that performs multiple async operations in parallel."""
516
+ # Build the fetch operations
517
+ operations = []
518
+ for key, operation_code in fetch_operations.items():
519
+ # Indent the operation code properly
520
+ indented_op_code = "\n".join(
521
+ f" {line}" if line.strip() else ""
522
+ for line in operation_code.strip().split("\n")
523
+ )
524
+ operations.append(
525
+ f"""
526
+ async def fetch_{key}():
527
+ try:
528
+ {indented_op_code}
529
+ return ("{key}", True, result, None)
530
+ except Exception as e:
531
+ return ("{key}", False, None, str(e))
532
+ """
533
+ )
534
+
535
+ code = f"""
536
+ import asyncio
537
+
538
+ # Define all fetch operations
539
+ {chr(10).join(operations)}
540
+
541
+ # Get all fetch functions
542
+ fetch_functions = []
543
+ local_vars = list(locals().keys()) # Create a copy to avoid modification during iteration
544
+ for name in local_vars:
545
+ if name.startswith('fetch_') and callable(locals().get(name)):
546
+ fetch_functions.append(locals()[name])
547
+
548
+ # Execute all operations in parallel with timeout
549
+ try:
550
+ results = await asyncio.wait_for(
551
+ asyncio.gather(*[func() for func in fetch_functions]),
552
+ timeout={timeout_per_operation}
553
+ )
554
+ except asyncio.TimeoutError:
555
+ # Handle timeout
556
+ results = [(f"operation_{{i}}", False, None, "timeout") for i in range(len(fetch_functions))]
557
+
558
+ # Process results
559
+ successful = {{}}
560
+ failed = {{}}
561
+
562
+ for key, success, data, error in results:
563
+ if success:
564
+ successful[key] = data
565
+ else:
566
+ failed[key] = error
567
+
568
+ # Check if we should fail on any errors
569
+ if not {continue_on_error} and failed:
570
+ raise RuntimeError(f"{{len(failed)}} operations failed: {{list(failed.keys())}}")
571
+
572
+ result = {{
573
+ "successful": successful,
574
+ "failed": failed,
575
+ "statistics": {{
576
+ "total_operations": len(results),
577
+ "successful_count": len(successful),
578
+ "failed_count": len(failed),
579
+ "success_rate": len(successful) / len(results) if results else 0
580
+ }}
581
+ }}
582
+ """
583
+
584
+ return builder.add_async_code(
585
+ node_id,
586
+ code,
587
+ timeout=int(timeout_per_operation) + 10,
588
+ description=description
589
+ or f"Parallel fetch of {len(fetch_operations)} operations",
590
+ )
591
+
592
+ @staticmethod
593
+ def cache_aside(
594
+ builder: AsyncWorkflowBuilder,
595
+ cache_check_id: str,
596
+ data_fetch_id: str,
597
+ cache_store_id: str,
598
+ fetch_code: str,
599
+ *,
600
+ cache_resource: str = "cache",
601
+ cache_key_template: str = "key_{item_id}",
602
+ ttl_seconds: int = 3600,
603
+ description: str = None,
604
+ ) -> AsyncWorkflowBuilder:
605
+ """Add cache-aside pattern with cache check, fetch, and store."""
606
+
607
+ # Cache check node
608
+ builder.add_async_code(
609
+ cache_check_id,
610
+ f"""
611
+ import json
612
+
613
+ # Get cache resource
614
+ if 'get_resource' in globals():
615
+ cache = await get_resource("{cache_resource}")
616
+ else:
617
+ # Fallback for testing
618
+ cache = locals().get("{cache_resource}")
619
+ if cache is None:
620
+ raise RuntimeError(f"Cache resource '{cache_resource}' not available")
621
+
622
+ # Get variables for cache key generation
623
+ cache_key_vars = {{k: v for k, v in locals().items() if not k.startswith('_')}}
624
+ # Generate cache key
625
+ cache_key = "{cache_key_template}".format(**cache_key_vars)
626
+
627
+ # Try to get from cache
628
+ try:
629
+ cached_data = await cache.get(cache_key)
630
+ if cached_data:
631
+ if isinstance(cached_data, (str, bytes)):
632
+ try:
633
+ data = json.loads(cached_data)
634
+ except (json.JSONDecodeError, TypeError):
635
+ data = cached_data
636
+ else:
637
+ data = cached_data
638
+
639
+ result = {{
640
+ "found_in_cache": True,
641
+ "cache_key": cache_key,
642
+ "data": data
643
+ }}
644
+ else:
645
+ result = {{
646
+ "found_in_cache": False,
647
+ "cache_key": cache_key,
648
+ "data": None
649
+ }}
650
+ except Exception as e:
651
+ # Cache error, proceed without cache
652
+ result = {{
653
+ "found_in_cache": False,
654
+ "cache_key": cache_key,
655
+ "data": None,
656
+ "cache_error": str(e)
657
+ }}
658
+ """,
659
+ required_resources=[cache_resource],
660
+ description="Check cache for existing data",
661
+ )
662
+
663
+ # Data fetch node (only runs if cache miss)
664
+ # Indent the fetch code properly
665
+ indented_fetch = "\n".join(
666
+ f" {line}" if line.strip() else ""
667
+ for line in fetch_code.strip().split("\n")
668
+ )
669
+
670
+ builder.add_async_code(
671
+ data_fetch_id,
672
+ f"""
673
+ # Only fetch if not found in cache
674
+ if not cache_result.get("found_in_cache", False):
675
+ # Get all variables that were passed to cache_check (like item_id)
676
+ # Extract them from cache_key if needed
677
+ cache_key = cache_result.get("cache_key", "")
678
+
679
+ # Try to extract variables from the cache key
680
+ # This is a simple approach - in production you'd want more robust parsing
681
+ import re
682
+ matches = re.findall(r'(\\d+)', cache_key)
683
+ if matches and 'item_id' not in locals():
684
+ item_id = int(matches[0])
685
+
686
+ # Execute fetch operation
687
+ {indented_fetch}
688
+
689
+ fetch_result = {{
690
+ "needs_caching": True,
691
+ "cache_key": cache_result.get("cache_key"),
692
+ "data": result
693
+ }}
694
+ else:
695
+ # Use cached data
696
+ fetch_result = {{
697
+ "needs_caching": False,
698
+ "cache_key": cache_result.get("cache_key"),
699
+ "data": cache_result.get("data")
700
+ }}
701
+
702
+ result = fetch_result
703
+ """,
704
+ description="Fetch data if cache miss",
705
+ )
706
+
707
+ # Cache store node
708
+ builder.add_async_code(
709
+ cache_store_id,
710
+ f"""
711
+ import json
712
+
713
+ # Store in cache if needed
714
+ if fetch_data.get("needs_caching", False):
715
+ try:
716
+ # Get cache resource
717
+ if 'get_resource' in globals():
718
+ cache = await get_resource("{cache_resource}")
719
+ else:
720
+ # Fallback for testing
721
+ cache = locals().get("{cache_resource}")
722
+ if cache is None:
723
+ raise RuntimeError(f"Cache resource '{cache_resource}' not available")
724
+
725
+ cache_key = fetch_data.get("cache_key")
726
+ data_to_cache = fetch_data.get("data")
727
+
728
+ # Serialize data for caching
729
+ if isinstance(data_to_cache, (dict, list)):
730
+ cache_value = json.dumps(data_to_cache)
731
+ else:
732
+ cache_value = data_to_cache
733
+
734
+ # Store with TTL
735
+ await cache.setex(cache_key, {ttl_seconds}, cache_value)
736
+
737
+ result = {{
738
+ "data": data_to_cache,
739
+ "cached": True,
740
+ "cache_key": cache_key,
741
+ "ttl": {ttl_seconds}
742
+ }}
743
+ except Exception as e:
744
+ # Cache store failed, return data anyway
745
+ result = {{
746
+ "data": fetch_data.get("data"),
747
+ "cached": False,
748
+ "cache_error": str(e)
749
+ }}
750
+ else:
751
+ # Data was from cache
752
+ result = {{
753
+ "data": fetch_data.get("data"),
754
+ "cached": False,
755
+ "from_cache": True
756
+ }}
757
+ """,
758
+ required_resources=[cache_resource],
759
+ description="Store fetched data in cache",
760
+ )
761
+
762
+ # Connect the nodes
763
+ builder.add_connection(cache_check_id, "result", data_fetch_id, "cache_result")
764
+ builder.add_connection(data_fetch_id, "result", cache_store_id, "fetch_data")
765
+
766
+ return builder