kailash 0.1.5__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. kailash/__init__.py +1 -1
  2. kailash/access_control.py +740 -0
  3. kailash/api/__main__.py +6 -0
  4. kailash/api/auth.py +668 -0
  5. kailash/api/custom_nodes.py +285 -0
  6. kailash/api/custom_nodes_secure.py +377 -0
  7. kailash/api/database.py +620 -0
  8. kailash/api/studio.py +915 -0
  9. kailash/api/studio_secure.py +893 -0
  10. kailash/mcp/__init__.py +53 -0
  11. kailash/mcp/__main__.py +13 -0
  12. kailash/mcp/ai_registry_server.py +712 -0
  13. kailash/mcp/client.py +447 -0
  14. kailash/mcp/client_new.py +334 -0
  15. kailash/mcp/server.py +293 -0
  16. kailash/mcp/server_new.py +336 -0
  17. kailash/mcp/servers/__init__.py +12 -0
  18. kailash/mcp/servers/ai_registry.py +289 -0
  19. kailash/nodes/__init__.py +4 -2
  20. kailash/nodes/ai/__init__.py +2 -0
  21. kailash/nodes/ai/a2a.py +714 -67
  22. kailash/nodes/ai/intelligent_agent_orchestrator.py +31 -37
  23. kailash/nodes/ai/iterative_llm_agent.py +1280 -0
  24. kailash/nodes/ai/llm_agent.py +324 -1
  25. kailash/nodes/ai/self_organizing.py +5 -6
  26. kailash/nodes/base.py +15 -2
  27. kailash/nodes/base_async.py +45 -0
  28. kailash/nodes/base_cycle_aware.py +374 -0
  29. kailash/nodes/base_with_acl.py +338 -0
  30. kailash/nodes/code/python.py +135 -27
  31. kailash/nodes/data/__init__.py +1 -2
  32. kailash/nodes/data/readers.py +16 -6
  33. kailash/nodes/data/sql.py +699 -256
  34. kailash/nodes/data/writers.py +16 -6
  35. kailash/nodes/logic/__init__.py +8 -0
  36. kailash/nodes/logic/convergence.py +642 -0
  37. kailash/nodes/logic/loop.py +153 -0
  38. kailash/nodes/logic/operations.py +187 -27
  39. kailash/nodes/mixins/__init__.py +11 -0
  40. kailash/nodes/mixins/mcp.py +228 -0
  41. kailash/nodes/mixins.py +387 -0
  42. kailash/runtime/__init__.py +2 -1
  43. kailash/runtime/access_controlled.py +458 -0
  44. kailash/runtime/local.py +106 -33
  45. kailash/runtime/parallel_cyclic.py +529 -0
  46. kailash/sdk_exceptions.py +90 -5
  47. kailash/security.py +845 -0
  48. kailash/tracking/manager.py +38 -15
  49. kailash/tracking/models.py +1 -1
  50. kailash/tracking/storage/filesystem.py +30 -2
  51. kailash/utils/__init__.py +8 -0
  52. kailash/workflow/__init__.py +18 -0
  53. kailash/workflow/convergence.py +270 -0
  54. kailash/workflow/cycle_analyzer.py +889 -0
  55. kailash/workflow/cycle_builder.py +579 -0
  56. kailash/workflow/cycle_config.py +725 -0
  57. kailash/workflow/cycle_debugger.py +860 -0
  58. kailash/workflow/cycle_exceptions.py +615 -0
  59. kailash/workflow/cycle_profiler.py +741 -0
  60. kailash/workflow/cycle_state.py +338 -0
  61. kailash/workflow/cyclic_runner.py +985 -0
  62. kailash/workflow/graph.py +500 -39
  63. kailash/workflow/migration.py +809 -0
  64. kailash/workflow/safety.py +365 -0
  65. kailash/workflow/templates.py +763 -0
  66. kailash/workflow/validation.py +751 -0
  67. {kailash-0.1.5.dist-info → kailash-0.2.1.dist-info}/METADATA +259 -12
  68. kailash-0.2.1.dist-info/RECORD +125 -0
  69. kailash/nodes/mcp/__init__.py +0 -11
  70. kailash/nodes/mcp/client.py +0 -554
  71. kailash/nodes/mcp/resource.py +0 -682
  72. kailash/nodes/mcp/server.py +0 -577
  73. kailash-0.1.5.dist-info/RECORD +0 -88
  74. {kailash-0.1.5.dist-info → kailash-0.2.1.dist-info}/WHEEL +0 -0
  75. {kailash-0.1.5.dist-info → kailash-0.2.1.dist-info}/entry_points.txt +0 -0
  76. {kailash-0.1.5.dist-info → kailash-0.2.1.dist-info}/licenses/LICENSE +0 -0
  77. {kailash-0.1.5.dist-info → kailash-0.2.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,529 @@
1
+ """Enhanced parallel runtime with cyclic workflow support."""
2
+
3
+ import logging
4
+ from concurrent.futures import ThreadPoolExecutor, as_completed
5
+ from datetime import datetime, timezone
6
+ from typing import Any, Dict, List, Optional, Set, Tuple
7
+
8
+ import networkx as nx
9
+
10
+ from kailash.nodes.base import Node
11
+ from kailash.runtime.local import LocalRuntime
12
+ from kailash.sdk_exceptions import RuntimeExecutionError, WorkflowExecutionError
13
+ from kailash.tracking import TaskManager, TaskStatus
14
+ from kailash.tracking.metrics_collector import MetricsCollector
15
+ from kailash.tracking.models import TaskMetrics
16
+ from kailash.workflow import Workflow
17
+ from kailash.workflow.cyclic_runner import CyclicWorkflowExecutor
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+
22
+ class ParallelCyclicRuntime:
23
+ """Enhanced parallel runtime with support for cyclic workflows and concurrent execution."""
24
+
25
+ def __init__(
26
+ self,
27
+ debug: bool = False,
28
+ max_workers: int = 4,
29
+ enable_cycles: bool = True,
30
+ enable_async: bool = True,
31
+ ):
32
+ """Initialize the parallel cyclic runtime.
33
+
34
+ Args:
35
+ debug: Whether to enable debug logging
36
+ max_workers: Maximum number of worker threads for parallel execution
37
+ enable_cycles: Whether to enable cyclic workflow support
38
+ enable_async: Whether to enable async execution features
39
+ """
40
+ self.debug = debug
41
+ self.max_workers = max_workers
42
+ self.enable_cycles = enable_cycles
43
+ self.enable_async = enable_async
44
+ self.logger = logger
45
+
46
+ # Initialize components
47
+ self.local_runtime = LocalRuntime(debug=debug, enable_cycles=enable_cycles)
48
+ if enable_cycles:
49
+ self.cyclic_executor = CyclicWorkflowExecutor()
50
+
51
+ if debug:
52
+ self.logger.setLevel(logging.DEBUG)
53
+ else:
54
+ self.logger.setLevel(logging.INFO)
55
+
56
+ def execute(
57
+ self,
58
+ workflow: Workflow,
59
+ task_manager: Optional[TaskManager] = None,
60
+ parameters: Optional[Dict[str, Dict[str, Any]]] = None,
61
+ parallel_nodes: Optional[Set[str]] = None,
62
+ ) -> Tuple[Dict[str, Any], Optional[str]]:
63
+ """Execute a workflow with parallel and cyclic support.
64
+
65
+ Args:
66
+ workflow: Workflow to execute
67
+ task_manager: Optional task manager for tracking
68
+ parameters: Optional parameter overrides per node
69
+ parallel_nodes: Set of node IDs that can be executed in parallel
70
+
71
+ Returns:
72
+ Tuple of (results dict, run_id)
73
+
74
+ Raises:
75
+ RuntimeExecutionError: If execution fails
76
+ WorkflowValidationError: If workflow is invalid
77
+ """
78
+ if not workflow:
79
+ raise RuntimeExecutionError("No workflow provided")
80
+
81
+ try:
82
+ # Validate workflow
83
+ workflow.validate()
84
+
85
+ # Check for cycles first
86
+ if self.enable_cycles and workflow.has_cycles():
87
+ self.logger.info(
88
+ "Cyclic workflow detected, checking for parallel execution opportunities"
89
+ )
90
+ return self._execute_cyclic_workflow(workflow, task_manager, parameters)
91
+
92
+ # Check for parallel execution opportunities in DAG workflows
93
+ if parallel_nodes or self._can_execute_in_parallel(workflow):
94
+ self.logger.info("Parallel execution opportunities detected")
95
+ return self._execute_parallel_dag(
96
+ workflow, task_manager, parameters, parallel_nodes
97
+ )
98
+
99
+ # Fall back to standard local runtime
100
+ self.logger.info("Using standard local runtime execution")
101
+ return self.local_runtime.execute(workflow, task_manager, parameters)
102
+
103
+ except Exception as e:
104
+ raise RuntimeExecutionError(
105
+ f"Parallel runtime execution failed: {e}"
106
+ ) from e
107
+
108
+ def _execute_cyclic_workflow(
109
+ self,
110
+ workflow: Workflow,
111
+ task_manager: Optional[TaskManager],
112
+ parameters: Optional[Dict[str, Dict[str, Any]]],
113
+ ) -> Tuple[Dict[str, Any], str]:
114
+ """Execute a cyclic workflow with potential parallel optimizations.
115
+
116
+ Args:
117
+ workflow: Cyclic workflow to execute
118
+ task_manager: Optional task manager
119
+ parameters: Optional parameters
120
+
121
+ Returns:
122
+ Tuple of (results dict, run_id)
123
+ """
124
+ # For now, delegate to cyclic executor
125
+ # Future enhancement: identify parallelizable parts within cycles
126
+ self.logger.info("Executing cyclic workflow with CyclicWorkflowExecutor")
127
+
128
+ try:
129
+ results, run_id = self.cyclic_executor.execute(workflow, parameters)
130
+
131
+ # TODO: Add cycle-aware parallel execution optimizations
132
+ # - Parallel execution of independent cycles
133
+ # - Parallel execution of DAG portions between cycles
134
+ # - Async cycle monitoring and resource management
135
+
136
+ return results, run_id
137
+
138
+ except Exception as e:
139
+ raise RuntimeExecutionError(f"Cyclic workflow execution failed: {e}") from e
140
+
141
+ def _execute_parallel_dag(
142
+ self,
143
+ workflow: Workflow,
144
+ task_manager: Optional[TaskManager],
145
+ parameters: Optional[Dict[str, Dict[str, Any]]],
146
+ parallel_nodes: Optional[Set[str]],
147
+ ) -> Tuple[Dict[str, Any], Optional[str]]:
148
+ """Execute a DAG workflow with parallel node execution.
149
+
150
+ Args:
151
+ workflow: DAG workflow to execute
152
+ task_manager: Optional task manager
153
+ parameters: Optional parameters
154
+ parallel_nodes: Optional set of nodes that can be executed in parallel
155
+
156
+ Returns:
157
+ Tuple of (results dict, run_id)
158
+ """
159
+ import uuid
160
+
161
+ run_id = str(uuid.uuid4())
162
+
163
+ self.logger.info(
164
+ f"Starting parallel DAG execution: {workflow.name} (run_id: {run_id})"
165
+ )
166
+
167
+ # Initialize tracking
168
+ if task_manager:
169
+ try:
170
+ run_id = task_manager.create_run(
171
+ workflow_name=workflow.name,
172
+ metadata={
173
+ "parameters": parameters,
174
+ "debug": self.debug,
175
+ "runtime": "parallel_cyclic",
176
+ "max_workers": self.max_workers,
177
+ },
178
+ )
179
+ except Exception as e:
180
+ self.logger.warning(f"Failed to create task run: {e}")
181
+
182
+ try:
183
+ # Analyze workflow for parallel execution groups
184
+ execution_groups = self._analyze_parallel_groups(workflow, parallel_nodes)
185
+
186
+ # Execute groups sequentially, but nodes within groups in parallel
187
+ results = {}
188
+
189
+ with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
190
+ for group_index, node_group in enumerate(execution_groups):
191
+ self.logger.info(
192
+ f"Executing parallel group {group_index + 1}/{len(execution_groups)}: {node_group}"
193
+ )
194
+
195
+ # Submit all nodes in this group for parallel execution
196
+ future_to_node = {}
197
+ for node_id in node_group:
198
+ future = executor.submit(
199
+ self._execute_single_node,
200
+ workflow,
201
+ node_id,
202
+ results,
203
+ parameters,
204
+ task_manager,
205
+ run_id,
206
+ )
207
+ future_to_node[future] = node_id
208
+
209
+ # Wait for all nodes in this group to complete
210
+ group_results = {}
211
+ for future in as_completed(future_to_node):
212
+ node_id = future_to_node[future]
213
+ try:
214
+ node_result = future.result()
215
+ group_results[node_id] = node_result
216
+ self.logger.debug(f"Node {node_id} completed successfully")
217
+ except Exception as e:
218
+ self.logger.error(f"Node {node_id} failed: {e}")
219
+ # Decide whether to continue or fail the entire workflow
220
+ if self._should_stop_on_group_error(
221
+ workflow, node_id, node_group
222
+ ):
223
+ raise WorkflowExecutionError(
224
+ f"Critical node {node_id} failed: {e}"
225
+ ) from e
226
+ else:
227
+ group_results[node_id] = {
228
+ "error": str(e),
229
+ "error_type": type(e).__name__,
230
+ "failed": True,
231
+ }
232
+
233
+ # Update results with this group's outputs
234
+ results.update(group_results)
235
+
236
+ # Mark run as completed
237
+ if task_manager and run_id:
238
+ try:
239
+ task_manager.update_run_status(run_id, "completed")
240
+ except Exception as e:
241
+ self.logger.warning(f"Failed to update run status: {e}")
242
+
243
+ return results, run_id
244
+
245
+ except Exception as e:
246
+ # Mark run as failed
247
+ if task_manager and run_id:
248
+ try:
249
+ task_manager.update_run_status(run_id, "failed", error=str(e))
250
+ except Exception:
251
+ pass
252
+ raise
253
+
254
+ def _analyze_parallel_groups(
255
+ self, workflow: Workflow, parallel_nodes: Optional[Set[str]]
256
+ ) -> List[List[str]]:
257
+ """Analyze workflow to identify groups of nodes that can be executed in parallel.
258
+
259
+ Args:
260
+ workflow: Workflow to analyze
261
+ parallel_nodes: Optional hint for nodes that can be parallelized
262
+
263
+ Returns:
264
+ List of execution groups, each containing nodes that can run in parallel
265
+ """
266
+ # Get topological ordering to respect dependencies
267
+ try:
268
+ topo_order = list(nx.topological_sort(workflow.graph))
269
+ except nx.NetworkXError as e:
270
+ raise WorkflowExecutionError(
271
+ f"Failed to determine execution order: {e}"
272
+ ) from e
273
+
274
+ # Group nodes by their dependency level
275
+ # Nodes at the same level can potentially be executed in parallel
276
+ levels = {}
277
+ for node in topo_order:
278
+ # Find the maximum level of all predecessors
279
+ max_pred_level = -1
280
+ for pred in workflow.graph.predecessors(node):
281
+ max_pred_level = max(max_pred_level, levels.get(pred, 0))
282
+ levels[node] = max_pred_level + 1
283
+
284
+ # Group nodes by level
285
+ level_groups = {}
286
+ for node, level in levels.items():
287
+ if level not in level_groups:
288
+ level_groups[level] = []
289
+ level_groups[level].append(node)
290
+
291
+ # Convert to execution groups
292
+ execution_groups = []
293
+ for level in sorted(level_groups.keys()):
294
+ nodes_at_level = level_groups[level]
295
+
296
+ # If parallel_nodes hint is provided, only parallelize those nodes
297
+ if parallel_nodes:
298
+ parallel_subset = [n for n in nodes_at_level if n in parallel_nodes]
299
+ sequential_subset = [
300
+ n for n in nodes_at_level if n not in parallel_nodes
301
+ ]
302
+
303
+ # Add parallel subset as a group
304
+ if parallel_subset:
305
+ execution_groups.append(parallel_subset)
306
+
307
+ # Add sequential nodes one by one
308
+ for node in sequential_subset:
309
+ execution_groups.append([node])
310
+ else:
311
+ # All nodes at this level can be parallelized
312
+ if len(nodes_at_level) > 1:
313
+ execution_groups.append(nodes_at_level)
314
+ else:
315
+ execution_groups.append(nodes_at_level)
316
+
317
+ return execution_groups
318
+
319
+ def _execute_single_node(
320
+ self,
321
+ workflow: Workflow,
322
+ node_id: str,
323
+ previous_results: Dict[str, Any],
324
+ parameters: Optional[Dict[str, Dict[str, Any]]],
325
+ task_manager: Optional[TaskManager],
326
+ run_id: Optional[str],
327
+ ) -> Dict[str, Any]:
328
+ """Execute a single node in isolation.
329
+
330
+ Args:
331
+ workflow: Workflow containing the node
332
+ node_id: ID of node to execute
333
+ previous_results: Results from previously executed nodes
334
+ parameters: Optional parameter overrides
335
+ task_manager: Optional task manager
336
+ run_id: Optional run ID for tracking
337
+
338
+ Returns:
339
+ Node execution results
340
+
341
+ Raises:
342
+ WorkflowExecutionError: If node execution fails
343
+ """
344
+ # Get node instance
345
+ node_instance = workflow._node_instances.get(node_id)
346
+ if not node_instance:
347
+ raise WorkflowExecutionError(
348
+ f"Node instance '{node_id}' not found in workflow"
349
+ )
350
+
351
+ # Start task tracking
352
+ task = None
353
+ if task_manager and run_id:
354
+ try:
355
+ task = task_manager.create_task(
356
+ run_id=run_id,
357
+ node_id=node_id,
358
+ node_type=node_instance.__class__.__name__,
359
+ started_at=datetime.now(timezone.utc),
360
+ metadata={},
361
+ )
362
+ if task:
363
+ task_manager.update_task_status(task.task_id, TaskStatus.RUNNING)
364
+ except Exception as e:
365
+ self.logger.warning(f"Failed to create task for node '{node_id}': {e}")
366
+
367
+ try:
368
+ # Prepare inputs
369
+ inputs = self._prepare_node_inputs_parallel(
370
+ workflow,
371
+ node_id,
372
+ node_instance,
373
+ previous_results,
374
+ parameters.get(node_id, {}) if parameters else {},
375
+ )
376
+
377
+ if self.debug:
378
+ self.logger.debug(f"Node {node_id} inputs: {inputs}")
379
+
380
+ # Execute node with metrics collection
381
+ collector = MetricsCollector()
382
+ with collector.collect(node_id=node_id) as metrics_context:
383
+ outputs = node_instance.execute(**inputs)
384
+
385
+ # Get performance metrics
386
+ performance_metrics = metrics_context.result()
387
+
388
+ if self.debug:
389
+ self.logger.debug(f"Node {node_id} outputs: {outputs}")
390
+
391
+ # Update task status
392
+ if task and task_manager:
393
+ task_metrics_data = performance_metrics.to_task_metrics()
394
+ task_metrics = TaskMetrics(**task_metrics_data)
395
+
396
+ task_manager.update_task_status(
397
+ task.task_id,
398
+ TaskStatus.COMPLETED,
399
+ result=outputs,
400
+ ended_at=datetime.now(timezone.utc),
401
+ metadata={"execution_time": performance_metrics.duration},
402
+ )
403
+ task_manager.update_task_metrics(task.task_id, task_metrics)
404
+
405
+ self.logger.info(
406
+ f"Node {node_id} completed successfully in {performance_metrics.duration:.3f}s"
407
+ )
408
+
409
+ return outputs
410
+
411
+ except Exception as e:
412
+ # Update task status
413
+ if task and task_manager:
414
+ task_manager.update_task_status(
415
+ task.task_id,
416
+ TaskStatus.FAILED,
417
+ error=str(e),
418
+ ended_at=datetime.now(timezone.utc),
419
+ )
420
+
421
+ self.logger.error(f"Node {node_id} failed: {e}", exc_info=self.debug)
422
+ raise WorkflowExecutionError(
423
+ f"Node '{node_id}' execution failed: {e}"
424
+ ) from e
425
+
426
+ def _prepare_node_inputs_parallel(
427
+ self,
428
+ workflow: Workflow,
429
+ node_id: str,
430
+ node_instance: Node,
431
+ previous_results: Dict[str, Any],
432
+ parameters: Dict[str, Any],
433
+ ) -> Dict[str, Any]:
434
+ """Prepare inputs for a node execution in parallel context.
435
+
436
+ Args:
437
+ workflow: The workflow being executed
438
+ node_id: Current node ID
439
+ node_instance: Current node instance
440
+ previous_results: Results from previously executed nodes
441
+ parameters: Parameter overrides
442
+
443
+ Returns:
444
+ Dictionary of inputs for the node
445
+ """
446
+ inputs = {}
447
+
448
+ # Start with node configuration
449
+ inputs.update(node_instance.config)
450
+
451
+ # Add connected inputs from other nodes
452
+ for edge in workflow.graph.in_edges(node_id, data=True):
453
+ source_node_id = edge[0]
454
+ mapping = edge[2].get("mapping", {})
455
+
456
+ if source_node_id in previous_results:
457
+ source_outputs = previous_results[source_node_id]
458
+
459
+ # Check if the source node failed
460
+ if isinstance(source_outputs, dict) and source_outputs.get("failed"):
461
+ raise WorkflowExecutionError(
462
+ f"Cannot use outputs from failed node '{source_node_id}'"
463
+ )
464
+
465
+ for source_key, target_key in mapping.items():
466
+ if source_key in source_outputs:
467
+ inputs[target_key] = source_outputs[source_key]
468
+ else:
469
+ self.logger.warning(
470
+ f"Source output '{source_key}' not found in node '{source_node_id}'. "
471
+ f"Available outputs: {list(source_outputs.keys())}"
472
+ )
473
+
474
+ # Apply parameter overrides
475
+ inputs.update(parameters)
476
+
477
+ return inputs
478
+
479
+ def _can_execute_in_parallel(self, workflow: Workflow) -> bool:
480
+ """Determine if workflow has opportunities for parallel execution.
481
+
482
+ Args:
483
+ workflow: Workflow to analyze
484
+
485
+ Returns:
486
+ True if parallel execution is beneficial
487
+ """
488
+ # Simple heuristic: if there are nodes at the same dependency level
489
+ try:
490
+ topo_order = list(nx.topological_sort(workflow.graph))
491
+
492
+ # Calculate dependency levels
493
+ levels = {}
494
+ for node in topo_order:
495
+ max_pred_level = -1
496
+ for pred in workflow.graph.predecessors(node):
497
+ max_pred_level = max(max_pred_level, levels.get(pred, 0))
498
+ levels[node] = max_pred_level + 1
499
+
500
+ # Check if any level has multiple nodes
501
+ level_counts = {}
502
+ for level in levels.values():
503
+ level_counts[level] = level_counts.get(level, 0) + 1
504
+
505
+ # If any level has more than one node, parallel execution is beneficial
506
+ return any(count > 1 for count in level_counts.values())
507
+
508
+ except nx.NetworkXError:
509
+ return False
510
+
511
+ def _should_stop_on_group_error(
512
+ self, workflow: Workflow, failed_node: str, node_group: List[str]
513
+ ) -> bool:
514
+ """Determine if execution should stop when a node in a parallel group fails.
515
+
516
+ Args:
517
+ workflow: The workflow being executed
518
+ failed_node: Failed node ID
519
+ node_group: The parallel group containing the failed node
520
+
521
+ Returns:
522
+ Whether to stop execution
523
+ """
524
+ # Check if any other nodes in the workflow depend on this failed node
525
+ has_dependents = workflow.graph.out_degree(failed_node) > 0
526
+
527
+ # If the failed node has dependents, we should stop
528
+ # Future enhancement: implement more sophisticated error handling policies
529
+ return has_dependents
kailash/sdk_exceptions.py CHANGED
@@ -1,8 +1,76 @@
1
- """Exception classes for the Kailash SDK.
2
-
3
- This module defines all custom exceptions used throughout the Kailash SDK.
4
- Each exception includes helpful error messages and context to guide users
5
- toward correct usage.
1
+ """Comprehensive Exception System for the Kailash SDK.
2
+
3
+ This module provides a comprehensive hierarchy of custom exceptions designed to
4
+ provide clear, actionable error information throughout the Kailash SDK. Each
5
+ exception includes detailed context, suggestions for resolution, and integration
6
+ with debugging and monitoring systems.
7
+
8
+ Design Philosophy:
9
+ Provides a clear, hierarchical exception system that enables precise error
10
+ handling and debugging. Each exception includes comprehensive context,
11
+ actionable suggestions, and integration points for monitoring and logging
12
+ systems.
13
+
14
+ Key Features:
15
+ - Hierarchical exception structure for precise error handling
16
+ - Rich context information with actionable suggestions
17
+ - Integration with logging and monitoring systems
18
+ - Cycle-specific exceptions for advanced workflow patterns
19
+ - Security and safety violation reporting
20
+ - Performance and resource-related error tracking
21
+
22
+ Exception Categories:
23
+ - **Core Exceptions**: Fundamental SDK operations
24
+ - **Workflow Exceptions**: Workflow creation and validation
25
+ - **Execution Exceptions**: Runtime execution errors
26
+ - **Cycle Exceptions**: Cyclic workflow-specific errors
27
+ - **Security Exceptions**: Safety and security violations
28
+ - **Configuration Exceptions**: Parameter and setup errors
29
+
30
+ Core Exception Hierarchy:
31
+ - KailashException: Base exception for all SDK errors
32
+ - NodeException: Node-related errors
33
+ - NodeValidationError: Validation failures
34
+ - NodeExecutionError: Runtime execution issues
35
+ - NodeConfigurationError: Configuration problems
36
+ - WorkflowException: Workflow-related errors
37
+ - WorkflowValidationError: Validation failures
38
+ - WorkflowExecutionError: Execution failures
39
+ - RuntimeException: Runtime execution errors
40
+ - SecurityException: Security and safety violations
41
+
42
+ Cycle-Specific Exceptions (v0.2.0):
43
+ Enhanced exception handling for cyclic workflows with detailed context
44
+ and resolution guidance for cycle-specific issues.
45
+
46
+ Examples:
47
+ Basic exception handling:
48
+
49
+ >>> from kailash.sdk_exceptions import WorkflowValidationError, NodeExecutionError
50
+ >>> try:
51
+ ... workflow.validate()
52
+ ... except WorkflowValidationError as e:
53
+ ... print(f"Validation failed: {e}")
54
+ ... # Exception includes helpful context and suggestions
55
+
56
+ Production error monitoring:
57
+
58
+ >>> import logging
59
+ >>> from kailash.sdk_exceptions import KailashException
60
+ >>> logger = logging.getLogger(__name__)
61
+ >>> try:
62
+ ... runtime.execute(workflow)
63
+ ... except KailashException as e:
64
+ ... logger.error(f"SDK Error: {e}", extra={
65
+ ... 'error_type': type(e).__name__,
66
+ ... 'workflow_id': getattr(workflow, 'workflow_id', None)
67
+ })
68
+ raise
69
+
70
+ See Also:
71
+ - :mod:`kailash.workflow.cycle_exceptions` for cycle-specific errors
72
+ - :mod:`kailash.security` for security validation and exceptions
73
+ - :doc:`/guides/error_handling` for comprehensive error handling patterns
6
74
  """
7
75
 
8
76
 
@@ -120,6 +188,23 @@ class ConnectionError(WorkflowException):
120
188
  pass
121
189
 
122
190
 
191
+ class CycleConfigurationError(WorkflowException):
192
+ """Raised when cycle configuration is invalid.
193
+
194
+ This exception is thrown by the CycleBuilder API when cycle parameters
195
+ are missing, invalid, or conflicting. It provides actionable error messages
196
+ to guide developers toward correct cycle configuration.
197
+
198
+ Common scenarios:
199
+ - Missing required cycle parameters (max_iterations or convergence_check)
200
+ - Invalid parameter values (negative iterations, empty conditions)
201
+ - Unsafe expressions in convergence conditions
202
+ - Missing source/target nodes before build()
203
+ """
204
+
205
+ pass
206
+
207
+
123
208
  # Runtime-related exceptions
124
209
  class RuntimeException(KailashException):
125
210
  """Base exception for runtime-related errors."""