kailash 0.1.4__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. kailash/__init__.py +1 -1
  2. kailash/access_control.py +740 -0
  3. kailash/api/__main__.py +6 -0
  4. kailash/api/auth.py +668 -0
  5. kailash/api/custom_nodes.py +285 -0
  6. kailash/api/custom_nodes_secure.py +377 -0
  7. kailash/api/database.py +620 -0
  8. kailash/api/studio.py +915 -0
  9. kailash/api/studio_secure.py +893 -0
  10. kailash/mcp/__init__.py +53 -0
  11. kailash/mcp/__main__.py +13 -0
  12. kailash/mcp/ai_registry_server.py +712 -0
  13. kailash/mcp/client.py +447 -0
  14. kailash/mcp/client_new.py +334 -0
  15. kailash/mcp/server.py +293 -0
  16. kailash/mcp/server_new.py +336 -0
  17. kailash/mcp/servers/__init__.py +12 -0
  18. kailash/mcp/servers/ai_registry.py +289 -0
  19. kailash/nodes/__init__.py +4 -2
  20. kailash/nodes/ai/__init__.py +38 -0
  21. kailash/nodes/ai/a2a.py +1790 -0
  22. kailash/nodes/ai/agents.py +116 -2
  23. kailash/nodes/ai/ai_providers.py +206 -8
  24. kailash/nodes/ai/intelligent_agent_orchestrator.py +2108 -0
  25. kailash/nodes/ai/iterative_llm_agent.py +1280 -0
  26. kailash/nodes/ai/llm_agent.py +324 -1
  27. kailash/nodes/ai/self_organizing.py +1623 -0
  28. kailash/nodes/api/http.py +106 -25
  29. kailash/nodes/api/rest.py +116 -21
  30. kailash/nodes/base.py +15 -2
  31. kailash/nodes/base_async.py +45 -0
  32. kailash/nodes/base_cycle_aware.py +374 -0
  33. kailash/nodes/base_with_acl.py +338 -0
  34. kailash/nodes/code/python.py +135 -27
  35. kailash/nodes/data/readers.py +116 -53
  36. kailash/nodes/data/writers.py +16 -6
  37. kailash/nodes/logic/__init__.py +8 -0
  38. kailash/nodes/logic/async_operations.py +48 -9
  39. kailash/nodes/logic/convergence.py +642 -0
  40. kailash/nodes/logic/loop.py +153 -0
  41. kailash/nodes/logic/operations.py +212 -27
  42. kailash/nodes/logic/workflow.py +26 -18
  43. kailash/nodes/mixins/__init__.py +11 -0
  44. kailash/nodes/mixins/mcp.py +228 -0
  45. kailash/nodes/mixins.py +387 -0
  46. kailash/nodes/transform/__init__.py +8 -1
  47. kailash/nodes/transform/processors.py +119 -4
  48. kailash/runtime/__init__.py +2 -1
  49. kailash/runtime/access_controlled.py +458 -0
  50. kailash/runtime/local.py +106 -33
  51. kailash/runtime/parallel_cyclic.py +529 -0
  52. kailash/sdk_exceptions.py +90 -5
  53. kailash/security.py +845 -0
  54. kailash/tracking/manager.py +38 -15
  55. kailash/tracking/models.py +1 -1
  56. kailash/tracking/storage/filesystem.py +30 -2
  57. kailash/utils/__init__.py +8 -0
  58. kailash/workflow/__init__.py +18 -0
  59. kailash/workflow/convergence.py +270 -0
  60. kailash/workflow/cycle_analyzer.py +768 -0
  61. kailash/workflow/cycle_builder.py +573 -0
  62. kailash/workflow/cycle_config.py +709 -0
  63. kailash/workflow/cycle_debugger.py +760 -0
  64. kailash/workflow/cycle_exceptions.py +601 -0
  65. kailash/workflow/cycle_profiler.py +671 -0
  66. kailash/workflow/cycle_state.py +338 -0
  67. kailash/workflow/cyclic_runner.py +985 -0
  68. kailash/workflow/graph.py +500 -39
  69. kailash/workflow/migration.py +768 -0
  70. kailash/workflow/safety.py +365 -0
  71. kailash/workflow/templates.py +744 -0
  72. kailash/workflow/validation.py +693 -0
  73. {kailash-0.1.4.dist-info → kailash-0.2.0.dist-info}/METADATA +446 -13
  74. kailash-0.2.0.dist-info/RECORD +125 -0
  75. kailash/nodes/mcp/__init__.py +0 -11
  76. kailash/nodes/mcp/client.py +0 -554
  77. kailash/nodes/mcp/resource.py +0 -682
  78. kailash/nodes/mcp/server.py +0 -577
  79. kailash-0.1.4.dist-info/RECORD +0 -85
  80. {kailash-0.1.4.dist-info → kailash-0.2.0.dist-info}/WHEEL +0 -0
  81. {kailash-0.1.4.dist-info → kailash-0.2.0.dist-info}/entry_points.txt +0 -0
  82. {kailash-0.1.4.dist-info → kailash-0.2.0.dist-info}/licenses/LICENSE +0 -0
  83. {kailash-0.1.4.dist-info → kailash-0.2.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,744 @@
1
+ """
2
+ Pre-Built Workflow Templates for Common Cyclic Patterns.
3
+
4
+ This module provides a comprehensive collection of pre-built cycle templates
5
+ and patterns that dramatically simplify the creation of common workflow
6
+ structures. It eliminates boilerplate code and ensures best practices are
7
+ followed automatically for standard cyclic workflow patterns.
8
+
9
+ Design Philosophy:
10
+ Provides curated, battle-tested templates for common cycle patterns,
11
+ reducing development time and ensuring optimal configurations. Each
12
+ template encapsulates best practices and proven patterns for specific
13
+ use cases with sensible defaults and customization options.
14
+
15
+ Key Features:
16
+ - Pre-built templates for common patterns
17
+ - Automatic best-practice configuration
18
+ - Customizable parameters with validation
19
+ - Generated helper nodes for complex patterns
20
+ - Workflow class extensions for seamless integration
21
+
22
+ Template Categories:
23
+ - Optimization Cycles: Iterative improvement patterns
24
+ - Retry Cycles: Error recovery and fault tolerance
25
+ - Data Quality Cycles: Iterative data cleaning and validation
26
+ - Learning Cycles: Machine learning training patterns
27
+ - Convergence Cycles: Numerical convergence patterns
28
+ - Batch Processing Cycles: Large dataset processing patterns
29
+
30
+ Core Components:
31
+ - CycleTemplate: Configuration dataclass for templates
32
+ - CycleTemplates: Static factory methods for template creation
33
+ - Generated helper nodes for pattern-specific logic
34
+ - Workflow class extensions for direct integration
35
+
36
+ Automatic Optimizations:
37
+ - Pattern-specific convergence conditions
38
+ - Appropriate safety limits and timeouts
39
+ - Optimal iteration limits for each pattern
40
+ - Memory management for data-intensive patterns
41
+ - Error handling and recovery strategies
42
+
43
+ Upstream Dependencies:
44
+ - Core workflow and node implementations
45
+ - PythonCodeNode for generated helper logic
46
+ - SwitchNode for conditional routing patterns
47
+ - Convergence and safety systems
48
+
49
+ Downstream Consumers:
50
+ - Workflow builders and automation tools
51
+ - Template-based workflow generation systems
52
+ - Development tools and IDEs
53
+ - Educational and training materials
54
+
55
+ Examples:
56
+ Optimization cycle template:
57
+
58
+ >>> from kailash.workflow.templates import CycleTemplates
59
+ >>> workflow = Workflow("optimization", "Quality Optimization")
60
+ >>> workflow.add_node("processor", ProcessorNode())
61
+ >>> workflow.add_node("evaluator", EvaluatorNode())
62
+ >>> cycle_id = CycleTemplates.optimization_cycle(
63
+ ... workflow,
64
+ ... processor_node="processor",
65
+ ... evaluator_node="evaluator",
66
+ ... convergence="quality > 0.95",
67
+ ... max_iterations=100
68
+ ... )
69
+
70
+ Retry cycle with backoff:
71
+
72
+ >>> cycle_id = CycleTemplates.retry_cycle(
73
+ ... workflow,
74
+ ... target_node="api_call",
75
+ ... max_retries=5,
76
+ ... backoff_strategy="exponential",
77
+ ... success_condition="success == True"
78
+ ... )
79
+
80
+ Direct workflow integration:
81
+
82
+ >>> # Templates extend Workflow class
83
+ >>> workflow = Workflow("ml_training", "Model Training")
84
+ >>> cycle_id = workflow.add_learning_cycle(
85
+ ... trainer_node="trainer",
86
+ ... evaluator_node="evaluator",
87
+ ... target_accuracy=0.98,
88
+ ... early_stopping_patience=10
89
+ ... )
90
+
91
+ Custom template configuration:
92
+
93
+ >>> # Numerical convergence with custom tolerance
94
+ >>> cycle_id = workflow.add_convergence_cycle(
95
+ ... processor_node="newton_raphson",
96
+ ... tolerance=0.0001,
97
+ max_iterations=1000
98
+ )
99
+
100
+ # Batch processing for large datasets
101
+ cycle_id = workflow.add_batch_processing_cycle(
102
+ processor_node="data_processor",
103
+ batch_size=1000,
104
+ total_items=1000000
105
+ )
106
+
107
+ See Also:
108
+ - :mod:`kailash.workflow.cycle_config` for advanced configuration
109
+ - :mod:`kailash.workflow.cycle_builder` for custom cycle creation
110
+ - :doc:`/examples/patterns` for comprehensive pattern examples
111
+ """
112
+
113
+ from typing import Dict, Any, List, Optional
114
+ from dataclasses import dataclass
115
+ import time
116
+ import math
117
+
118
+ from ..nodes.code import PythonCodeNode
119
+ from . import Workflow
120
+
121
+
122
+ @dataclass
123
+ class CycleTemplate:
124
+ """Configuration for a cycle template."""
125
+ name: str
126
+ description: str
127
+ nodes: List[str]
128
+ convergence_condition: Optional[str] = None
129
+ max_iterations: int = 100
130
+ timeout: Optional[float] = None
131
+ parameters: Optional[Dict[str, Any]] = None
132
+
133
+
134
+ class CycleTemplates:
135
+ """Collection of pre-built cycle templates for common patterns."""
136
+
137
+ @staticmethod
138
+ def optimization_cycle(
139
+ workflow: Workflow,
140
+ processor_node: str,
141
+ evaluator_node: str,
142
+ convergence: str = "quality > 0.9",
143
+ max_iterations: int = 50,
144
+ cycle_id: Optional[str] = None
145
+ ) -> str:
146
+ """
147
+ Add an optimization cycle pattern to workflow.
148
+
149
+ Creates a cycle where a processor generates solutions and an evaluator
150
+ assesses quality, continuing until convergence criteria is met.
151
+
152
+ Args:
153
+ workflow: Target workflow
154
+ processor_node: Node that generates/improves solutions
155
+ evaluator_node: Node that evaluates solution quality
156
+ convergence: Convergence condition (e.g., "quality > 0.9")
157
+ max_iterations: Maximum iterations before stopping
158
+ cycle_id: Optional custom cycle identifier
159
+
160
+ Returns:
161
+ str: The cycle identifier for reference
162
+
163
+ Example:
164
+ >>> workflow = Workflow("optimization", "Optimization Example")
165
+ >>> workflow.add_node("processor", PythonCodeNode(), code="...")
166
+ >>> workflow.add_node("evaluator", PythonCodeNode(), code="...")
167
+ >>> cycle_id = CycleTemplates.optimization_cycle(
168
+ ... workflow, "processor", "evaluator",
169
+ ... convergence="quality > 0.95", max_iterations=100
170
+ ... )
171
+ """
172
+ if cycle_id is None:
173
+ cycle_id = f"optimization_cycle_{int(time.time())}"
174
+
175
+ # Connect processor to evaluator
176
+ workflow.connect(processor_node, evaluator_node)
177
+
178
+ # Close the cycle with convergence condition
179
+ workflow.connect(
180
+ evaluator_node,
181
+ processor_node,
182
+ cycle=True,
183
+ max_iterations=max_iterations,
184
+ convergence_check=convergence,
185
+ cycle_id=cycle_id
186
+ )
187
+
188
+ return cycle_id
189
+
190
+ @staticmethod
191
+ def retry_cycle(
192
+ workflow: Workflow,
193
+ target_node: str,
194
+ max_retries: int = 3,
195
+ backoff_strategy: str = "exponential",
196
+ success_condition: str = "success == True",
197
+ cycle_id: Optional[str] = None
198
+ ) -> str:
199
+ """
200
+ Add a retry cycle pattern to workflow.
201
+
202
+ Creates a cycle that retries a node operation with configurable
203
+ backoff strategy until success or max retries reached.
204
+
205
+ Args:
206
+ workflow: Target workflow
207
+ target_node: Node to retry on failure
208
+ max_retries: Maximum number of retry attempts
209
+ backoff_strategy: Backoff strategy ("linear", "exponential", "fixed")
210
+ success_condition: Condition that indicates success
211
+ cycle_id: Optional custom cycle identifier
212
+
213
+ Returns:
214
+ str: The cycle identifier for reference
215
+
216
+ Example:
217
+ >>> workflow = Workflow("retry", "Retry Example")
218
+ >>> workflow.add_node("api_call", PythonCodeNode(), code="...")
219
+ >>> cycle_id = CycleTemplates.retry_cycle(
220
+ ... workflow, "api_call", max_retries=5,
221
+ ... backoff_strategy="exponential"
222
+ ... )
223
+ """
224
+ if cycle_id is None:
225
+ cycle_id = f"retry_cycle_{int(time.time())}"
226
+
227
+ # Create retry controller node
228
+ retry_controller_id = f"{target_node}_retry_controller"
229
+
230
+ retry_code = f'''
231
+ import time
232
+ import random
233
+
234
+ # Initialize retry state
235
+ try:
236
+ attempt = attempt
237
+ backoff_time = backoff_time
238
+ except NameError:
239
+ attempt = 0
240
+ backoff_time = 1.0
241
+
242
+ attempt += 1
243
+
244
+ # Check if we should retry
245
+ should_retry = attempt <= {max_retries}
246
+ final_attempt = attempt >= {max_retries}
247
+
248
+ # Calculate backoff delay
249
+ if "{backoff_strategy}" == "exponential":
250
+ backoff_time = min(60, 2 ** (attempt - 1))
251
+ elif "{backoff_strategy}" == "linear":
252
+ backoff_time = attempt * 1.0
253
+ else: # fixed
254
+ backoff_time = 1.0
255
+
256
+ # Add jitter to prevent thundering herd
257
+ jitter = random.uniform(0.1, 0.3) * backoff_time
258
+ actual_delay = backoff_time + jitter
259
+
260
+ print(f"Retry attempt {{attempt}}/{max_retries}, delay: {{actual_delay:.2f}}s")
261
+
262
+ # Simulate delay (in real scenario, this would be handled by scheduler)
263
+ if attempt > 1:
264
+ time.sleep(min(actual_delay, 5.0)) # Cap delay for examples
265
+
266
+ result = {{
267
+ "attempt": attempt,
268
+ "should_retry": should_retry,
269
+ "final_attempt": final_attempt,
270
+ "backoff_time": backoff_time,
271
+ "retry_exhausted": attempt > {max_retries}
272
+ }}
273
+ '''
274
+
275
+ workflow.add_node(retry_controller_id, PythonCodeNode(name=retry_controller_id, code=retry_code))
276
+
277
+ # Connect retry controller to target node
278
+ workflow.connect(retry_controller_id, target_node)
279
+
280
+ # Close the cycle with retry logic
281
+ workflow.connect(
282
+ target_node,
283
+ retry_controller_id,
284
+ cycle=True,
285
+ max_iterations=max_retries + 1,
286
+ convergence_check=f"({success_condition}) or (retry_exhausted == True)",
287
+ cycle_id=cycle_id
288
+ )
289
+
290
+ return cycle_id
291
+
292
+ @staticmethod
293
+ def data_quality_cycle(
294
+ workflow: Workflow,
295
+ cleaner_node: str,
296
+ validator_node: str,
297
+ quality_threshold: float = 0.95,
298
+ max_iterations: int = 10,
299
+ cycle_id: Optional[str] = None
300
+ ) -> str:
301
+ """
302
+ Add a data quality improvement cycle to workflow.
303
+
304
+ Creates a cycle where data is cleaned and validated iteratively
305
+ until quality threshold is met.
306
+
307
+ Args:
308
+ workflow: Target workflow
309
+ cleaner_node: Node that cleans/improves data
310
+ validator_node: Node that validates data quality
311
+ quality_threshold: Minimum quality score to achieve
312
+ max_iterations: Maximum cleaning iterations
313
+ cycle_id: Optional custom cycle identifier
314
+
315
+ Returns:
316
+ str: The cycle identifier for reference
317
+
318
+ Example:
319
+ >>> workflow = Workflow("data_quality", "Data Quality Example")
320
+ >>> workflow.add_node("cleaner", PythonCodeNode(), code="...")
321
+ >>> workflow.add_node("validator", PythonCodeNode(), code="...")
322
+ >>> cycle_id = CycleTemplates.data_quality_cycle(
323
+ ... workflow, "cleaner", "validator", quality_threshold=0.98
324
+ ... )
325
+ """
326
+ if cycle_id is None:
327
+ cycle_id = f"data_quality_cycle_{int(time.time())}"
328
+
329
+ # Connect cleaner to validator
330
+ workflow.connect(cleaner_node, validator_node)
331
+
332
+ # Close the cycle with quality threshold
333
+ workflow.connect(
334
+ validator_node,
335
+ cleaner_node,
336
+ cycle=True,
337
+ max_iterations=max_iterations,
338
+ convergence_check=f"quality_score >= {quality_threshold}",
339
+ cycle_id=cycle_id
340
+ )
341
+
342
+ return cycle_id
343
+
344
+ @staticmethod
345
+ def learning_cycle(
346
+ workflow: Workflow,
347
+ trainer_node: str,
348
+ evaluator_node: str,
349
+ target_accuracy: float = 0.95,
350
+ max_epochs: int = 100,
351
+ early_stopping_patience: int = 10,
352
+ cycle_id: Optional[str] = None
353
+ ) -> str:
354
+ """
355
+ Add a machine learning training cycle to workflow.
356
+
357
+ Creates a cycle for iterative model training with early stopping
358
+ based on validation performance.
359
+
360
+ Args:
361
+ workflow: Target workflow
362
+ trainer_node: Node that trains the model
363
+ evaluator_node: Node that evaluates model performance
364
+ target_accuracy: Target accuracy to achieve
365
+ max_epochs: Maximum training epochs
366
+ early_stopping_patience: Epochs to wait without improvement
367
+ cycle_id: Optional custom cycle identifier
368
+
369
+ Returns:
370
+ str: The cycle identifier for reference
371
+
372
+ Example:
373
+ >>> workflow = Workflow("ml_training", "ML Training Example")
374
+ >>> workflow.add_node("trainer", PythonCodeNode(), code="...")
375
+ >>> workflow.add_node("evaluator", PythonCodeNode(), code="...")
376
+ >>> cycle_id = CycleTemplates.learning_cycle(
377
+ ... workflow, "trainer", "evaluator", target_accuracy=0.98
378
+ ... )
379
+ """
380
+ if cycle_id is None:
381
+ cycle_id = f"learning_cycle_{int(time.time())}"
382
+
383
+ # Create early stopping controller
384
+ early_stop_controller_id = f"{trainer_node}_early_stop"
385
+
386
+ early_stop_code = f'''
387
+ # Initialize early stopping state
388
+ try:
389
+ best_accuracy = best_accuracy
390
+ epochs_without_improvement = epochs_without_improvement
391
+ epoch = epoch
392
+ except NameError:
393
+ best_accuracy = 0.0
394
+ epochs_without_improvement = 0
395
+ epoch = 0
396
+
397
+ epoch += 1
398
+
399
+ # Get current accuracy from evaluator
400
+ current_accuracy = accuracy if 'accuracy' in locals() else 0.0
401
+
402
+ # Check for improvement
403
+ if current_accuracy > best_accuracy:
404
+ best_accuracy = current_accuracy
405
+ epochs_without_improvement = 0
406
+ improved = True
407
+ else:
408
+ epochs_without_improvement += 1
409
+ improved = False
410
+
411
+ # Determine if should continue training
412
+ target_reached = current_accuracy >= {target_accuracy}
413
+ early_stop = epochs_without_improvement >= {early_stopping_patience}
414
+ max_epochs_reached = epoch >= {max_epochs}
415
+
416
+ should_continue = not (target_reached or early_stop or max_epochs_reached)
417
+
418
+ print(f"Epoch {{epoch}}: accuracy={{current_accuracy:.4f}}, best={{best_accuracy:.4f}}")
419
+ if not improved:
420
+ print(f"No improvement for {{epochs_without_improvement}} epochs")
421
+
422
+ result = {{
423
+ "epoch": epoch,
424
+ "current_accuracy": current_accuracy,
425
+ "best_accuracy": best_accuracy,
426
+ "epochs_without_improvement": epochs_without_improvement,
427
+ "should_continue": should_continue,
428
+ "target_reached": target_reached,
429
+ "early_stopped": early_stop,
430
+ "training_complete": not should_continue
431
+ }}
432
+ '''
433
+
434
+ workflow.add_node(early_stop_controller_id, PythonCodeNode(name=early_stop_controller_id, code=early_stop_code))
435
+
436
+ # Connect the training cycle
437
+ workflow.connect(trainer_node, evaluator_node)
438
+ workflow.connect(evaluator_node, early_stop_controller_id)
439
+
440
+ # Close the cycle with early stopping logic
441
+ workflow.connect(
442
+ early_stop_controller_id,
443
+ trainer_node,
444
+ cycle=True,
445
+ max_iterations=max_epochs,
446
+ convergence_check="training_complete == True",
447
+ cycle_id=cycle_id
448
+ )
449
+
450
+ return cycle_id
451
+
452
+ @staticmethod
453
+ def convergence_cycle(
454
+ workflow: Workflow,
455
+ processor_node: str,
456
+ tolerance: float = 0.001,
457
+ max_iterations: int = 1000,
458
+ cycle_id: Optional[str] = None
459
+ ) -> str:
460
+ """
461
+ Add a numerical convergence cycle to workflow.
462
+
463
+ Creates a cycle that continues until successive iterations
464
+ produce values within a specified tolerance.
465
+
466
+ Args:
467
+ workflow: Target workflow
468
+ processor_node: Node that produces values to check for convergence
469
+ tolerance: Maximum difference between iterations for convergence
470
+ max_iterations: Maximum iterations before forced termination
471
+ cycle_id: Optional custom cycle identifier
472
+
473
+ Returns:
474
+ str: The cycle identifier for reference
475
+
476
+ Example:
477
+ >>> workflow = Workflow("convergence", "Convergence Example")
478
+ >>> workflow.add_node("processor", PythonCodeNode(), code="...")
479
+ >>> cycle_id = CycleTemplates.convergence_cycle(
480
+ ... workflow, "processor", tolerance=0.0001
481
+ ... )
482
+ """
483
+ if cycle_id is None:
484
+ cycle_id = f"convergence_cycle_{int(time.time())}"
485
+
486
+ # Create convergence checker node
487
+ convergence_checker_id = f"{processor_node}_convergence_checker"
488
+
489
+ convergence_code = f'''
490
+ import math
491
+
492
+ # Initialize convergence state
493
+ try:
494
+ previous_value = previous_value
495
+ iteration = iteration
496
+ except NameError:
497
+ previous_value = None
498
+ iteration = 0
499
+
500
+ iteration += 1
501
+
502
+ # Get current value (assume processor outputs 'value' field)
503
+ current_value = value if 'value' in locals() else 0.0
504
+
505
+ # Check convergence
506
+ if previous_value is not None:
507
+ difference = abs(current_value - previous_value)
508
+ converged = difference <= {tolerance}
509
+ relative_change = difference / abs(previous_value) if previous_value != 0 else float('inf')
510
+ else:
511
+ difference = float('inf')
512
+ converged = False
513
+ relative_change = float('inf')
514
+
515
+ print(f"Iteration {{iteration}}: value={{current_value:.6f}}, diff={{difference:.6f}}, converged={{converged}}")
516
+
517
+ result = {{
518
+ "iteration": iteration,
519
+ "current_value": current_value,
520
+ "previous_value": previous_value,
521
+ "difference": difference,
522
+ "relative_change": relative_change,
523
+ "converged": converged,
524
+ "tolerance": {tolerance}
525
+ }}
526
+
527
+ # Update for next iteration
528
+ previous_value = current_value
529
+ '''
530
+
531
+ workflow.add_node(convergence_checker_id, PythonCodeNode(name=convergence_checker_id, code=convergence_code))
532
+
533
+ # Connect processor to convergence checker
534
+ workflow.connect(processor_node, convergence_checker_id)
535
+
536
+ # Close the cycle with convergence condition
537
+ workflow.connect(
538
+ convergence_checker_id,
539
+ processor_node,
540
+ cycle=True,
541
+ max_iterations=max_iterations,
542
+ convergence_check="converged == True",
543
+ cycle_id=cycle_id
544
+ )
545
+
546
+ return cycle_id
547
+
548
+ @staticmethod
549
+ def batch_processing_cycle(
550
+ workflow: Workflow,
551
+ processor_node: str,
552
+ batch_size: int = 100,
553
+ total_items: Optional[int] = None,
554
+ cycle_id: Optional[str] = None
555
+ ) -> str:
556
+ """
557
+ Add a batch processing cycle to workflow.
558
+
559
+ Creates a cycle that processes data in batches, continuing
560
+ until all items are processed.
561
+
562
+ Args:
563
+ workflow: Target workflow
564
+ processor_node: Node that processes batches
565
+ batch_size: Number of items to process per batch
566
+ total_items: Total number of items to process (if known)
567
+ cycle_id: Optional custom cycle identifier
568
+
569
+ Returns:
570
+ str: The cycle identifier for reference
571
+
572
+ Example:
573
+ >>> workflow = Workflow("batch", "Batch Processing Example")
574
+ >>> workflow.add_node("processor", PythonCodeNode(), code="...")
575
+ >>> cycle_id = CycleTemplates.batch_processing_cycle(
576
+ ... workflow, "processor", batch_size=50, total_items=1000
577
+ ... )
578
+ """
579
+ if cycle_id is None:
580
+ cycle_id = f"batch_cycle_{int(time.time())}"
581
+
582
+ # Create batch controller node
583
+ batch_controller_id = f"{processor_node}_batch_controller"
584
+
585
+ batch_code = f'''
586
+ # Initialize batch state
587
+ try:
588
+ batch_number = batch_number
589
+ items_processed = items_processed
590
+ start_index = start_index
591
+ except NameError:
592
+ batch_number = 0
593
+ items_processed = 0
594
+ start_index = 0
595
+
596
+ batch_number += 1
597
+ end_index = start_index + {batch_size}
598
+
599
+ # Calculate progress
600
+ if {total_items} is not None:
601
+ remaining_items = max(0, {total_items} - items_processed)
602
+ actual_batch_size = min({batch_size}, remaining_items)
603
+ progress_percentage = (items_processed / {total_items}) * 100
604
+ all_processed = items_processed >= {total_items}
605
+ else:
606
+ # If total unknown, rely on processor to indicate completion
607
+ actual_batch_size = {batch_size}
608
+ progress_percentage = None
609
+ all_processed = False # Will be determined by processor
610
+
611
+ print(f"Processing batch {{batch_number}}: items {{start_index}}-{{end_index-1}}")
612
+ if progress_percentage is not None:
613
+ print(f"Progress: {{progress_percentage:.1f}}% ({{items_processed}}/{total_items})")
614
+
615
+ result = {{
616
+ "batch_number": batch_number,
617
+ "start_index": start_index,
618
+ "end_index": end_index,
619
+ "batch_size": actual_batch_size,
620
+ "items_processed": items_processed,
621
+ "all_processed": all_processed,
622
+ "progress_percentage": progress_percentage
623
+ }}
624
+
625
+ # Update for next iteration
626
+ start_index = end_index
627
+ items_processed += actual_batch_size
628
+ '''
629
+
630
+ workflow.add_node(batch_controller_id, PythonCodeNode(name=batch_controller_id, code=batch_code))
631
+
632
+ # Connect batch controller to processor
633
+ workflow.connect(batch_controller_id, processor_node)
634
+
635
+ # Calculate max iterations based on total items
636
+ if total_items is not None:
637
+ max_iterations = math.ceil(total_items / batch_size) + 1
638
+ else:
639
+ max_iterations = 1000 # Default upper bound
640
+
641
+ # Close the cycle with completion condition
642
+ workflow.connect(
643
+ processor_node,
644
+ batch_controller_id,
645
+ cycle=True,
646
+ max_iterations=max_iterations,
647
+ convergence_check="all_processed == True",
648
+ cycle_id=cycle_id
649
+ )
650
+
651
+ return cycle_id
652
+
653
+
654
+ # Convenience methods to add to Workflow class
655
+ def add_optimization_cycle(
656
+ self,
657
+ processor_node: str,
658
+ evaluator_node: str,
659
+ convergence: str = "quality > 0.9",
660
+ max_iterations: int = 50,
661
+ cycle_id: Optional[str] = None
662
+ ) -> str:
663
+ """Add an optimization cycle pattern to this workflow."""
664
+ return CycleTemplates.optimization_cycle(
665
+ self, processor_node, evaluator_node, convergence, max_iterations, cycle_id
666
+ )
667
+
668
+
669
+ def add_retry_cycle(
670
+ self,
671
+ target_node: str,
672
+ max_retries: int = 3,
673
+ backoff_strategy: str = "exponential",
674
+ success_condition: str = "success == True",
675
+ cycle_id: Optional[str] = None
676
+ ) -> str:
677
+ """Add a retry cycle pattern to this workflow."""
678
+ return CycleTemplates.retry_cycle(
679
+ self, target_node, max_retries, backoff_strategy, success_condition, cycle_id
680
+ )
681
+
682
+
683
+ def add_data_quality_cycle(
684
+ self,
685
+ cleaner_node: str,
686
+ validator_node: str,
687
+ quality_threshold: float = 0.95,
688
+ max_iterations: int = 10,
689
+ cycle_id: Optional[str] = None
690
+ ) -> str:
691
+ """Add a data quality improvement cycle to this workflow."""
692
+ return CycleTemplates.data_quality_cycle(
693
+ self, cleaner_node, validator_node, quality_threshold, max_iterations, cycle_id
694
+ )
695
+
696
+
697
+ def add_learning_cycle(
698
+ self,
699
+ trainer_node: str,
700
+ evaluator_node: str,
701
+ target_accuracy: float = 0.95,
702
+ max_epochs: int = 100,
703
+ early_stopping_patience: int = 10,
704
+ cycle_id: Optional[str] = None
705
+ ) -> str:
706
+ """Add a machine learning training cycle to this workflow."""
707
+ return CycleTemplates.learning_cycle(
708
+ self, trainer_node, evaluator_node, target_accuracy, max_epochs, early_stopping_patience, cycle_id
709
+ )
710
+
711
+
712
+ def add_convergence_cycle(
713
+ self,
714
+ processor_node: str,
715
+ tolerance: float = 0.001,
716
+ max_iterations: int = 1000,
717
+ cycle_id: Optional[str] = None
718
+ ) -> str:
719
+ """Add a numerical convergence cycle to this workflow."""
720
+ return CycleTemplates.convergence_cycle(
721
+ self, processor_node, tolerance, max_iterations, cycle_id
722
+ )
723
+
724
+
725
+ def add_batch_processing_cycle(
726
+ self,
727
+ processor_node: str,
728
+ batch_size: int = 100,
729
+ total_items: Optional[int] = None,
730
+ cycle_id: Optional[str] = None
731
+ ) -> str:
732
+ """Add a batch processing cycle to this workflow."""
733
+ return CycleTemplates.batch_processing_cycle(
734
+ self, processor_node, batch_size, total_items, cycle_id
735
+ )
736
+
737
+
738
+ # Add convenience methods to Workflow class
739
+ Workflow.add_optimization_cycle = add_optimization_cycle
740
+ Workflow.add_retry_cycle = add_retry_cycle
741
+ Workflow.add_data_quality_cycle = add_data_quality_cycle
742
+ Workflow.add_learning_cycle = add_learning_cycle
743
+ Workflow.add_convergence_cycle = add_convergence_cycle
744
+ Workflow.add_batch_processing_cycle = add_batch_processing_cycle