kailash 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/__init__.py +31 -0
- kailash/__main__.py +11 -0
- kailash/cli/__init__.py +5 -0
- kailash/cli/commands.py +563 -0
- kailash/manifest.py +778 -0
- kailash/nodes/__init__.py +23 -0
- kailash/nodes/ai/__init__.py +26 -0
- kailash/nodes/ai/agents.py +417 -0
- kailash/nodes/ai/models.py +488 -0
- kailash/nodes/api/__init__.py +52 -0
- kailash/nodes/api/auth.py +567 -0
- kailash/nodes/api/graphql.py +480 -0
- kailash/nodes/api/http.py +598 -0
- kailash/nodes/api/rate_limiting.py +572 -0
- kailash/nodes/api/rest.py +665 -0
- kailash/nodes/base.py +1032 -0
- kailash/nodes/base_async.py +128 -0
- kailash/nodes/code/__init__.py +32 -0
- kailash/nodes/code/python.py +1021 -0
- kailash/nodes/data/__init__.py +125 -0
- kailash/nodes/data/readers.py +496 -0
- kailash/nodes/data/sharepoint_graph.py +623 -0
- kailash/nodes/data/sql.py +380 -0
- kailash/nodes/data/streaming.py +1168 -0
- kailash/nodes/data/vector_db.py +964 -0
- kailash/nodes/data/writers.py +529 -0
- kailash/nodes/logic/__init__.py +6 -0
- kailash/nodes/logic/async_operations.py +702 -0
- kailash/nodes/logic/operations.py +551 -0
- kailash/nodes/transform/__init__.py +5 -0
- kailash/nodes/transform/processors.py +379 -0
- kailash/runtime/__init__.py +6 -0
- kailash/runtime/async_local.py +356 -0
- kailash/runtime/docker.py +697 -0
- kailash/runtime/local.py +434 -0
- kailash/runtime/parallel.py +557 -0
- kailash/runtime/runner.py +110 -0
- kailash/runtime/testing.py +347 -0
- kailash/sdk_exceptions.py +307 -0
- kailash/tracking/__init__.py +7 -0
- kailash/tracking/manager.py +885 -0
- kailash/tracking/metrics_collector.py +342 -0
- kailash/tracking/models.py +535 -0
- kailash/tracking/storage/__init__.py +0 -0
- kailash/tracking/storage/base.py +113 -0
- kailash/tracking/storage/database.py +619 -0
- kailash/tracking/storage/filesystem.py +543 -0
- kailash/utils/__init__.py +0 -0
- kailash/utils/export.py +924 -0
- kailash/utils/templates.py +680 -0
- kailash/visualization/__init__.py +62 -0
- kailash/visualization/api.py +732 -0
- kailash/visualization/dashboard.py +951 -0
- kailash/visualization/performance.py +808 -0
- kailash/visualization/reports.py +1471 -0
- kailash/workflow/__init__.py +15 -0
- kailash/workflow/builder.py +245 -0
- kailash/workflow/graph.py +827 -0
- kailash/workflow/mermaid_visualizer.py +628 -0
- kailash/workflow/mock_registry.py +63 -0
- kailash/workflow/runner.py +302 -0
- kailash/workflow/state.py +238 -0
- kailash/workflow/visualization.py +588 -0
- kailash-0.1.0.dist-info/METADATA +710 -0
- kailash-0.1.0.dist-info/RECORD +69 -0
- kailash-0.1.0.dist-info/WHEEL +5 -0
- kailash-0.1.0.dist-info/entry_points.txt +2 -0
- kailash-0.1.0.dist-info/licenses/LICENSE +21 -0
- kailash-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,347 @@
|
|
1
|
+
"""Testing utilities for Kailash workflows and nodes."""
|
2
|
+
|
3
|
+
import json
|
4
|
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
5
|
+
|
6
|
+
from kailash.nodes.base import Node, NodeParameter
|
7
|
+
from kailash.runtime.local import LocalRuntime
|
8
|
+
from kailash.sdk_exceptions import NodeValidationError, WorkflowExecutionError
|
9
|
+
from kailash.tracking import TaskManager
|
10
|
+
from kailash.workflow import Workflow
|
11
|
+
|
12
|
+
|
13
|
+
class MockNode(Node):
|
14
|
+
"""Mock node for testing purposes."""
|
15
|
+
|
16
|
+
def __init__(self, **kwargs):
|
17
|
+
"""Initialize mock node with test behavior."""
|
18
|
+
super().__init__(**kwargs)
|
19
|
+
self._return_value = kwargs.get("return_value", {"output": "test"})
|
20
|
+
self._should_fail = kwargs.get("should_fail", False)
|
21
|
+
self._fail_message = kwargs.get("fail_message", "Mock failure")
|
22
|
+
self._execution_count = 0
|
23
|
+
|
24
|
+
def get_parameters(self) -> Dict[str, NodeParameter]:
|
25
|
+
"""Define mock parameters."""
|
26
|
+
return {
|
27
|
+
"input": NodeParameter(
|
28
|
+
name="input", type=Any, required=False, description="Mock input"
|
29
|
+
),
|
30
|
+
"return_value": NodeParameter(
|
31
|
+
name="return_value",
|
32
|
+
type=dict,
|
33
|
+
required=False,
|
34
|
+
default={"output": "test"},
|
35
|
+
description="Value to return",
|
36
|
+
),
|
37
|
+
"should_fail": NodeParameter(
|
38
|
+
name="should_fail",
|
39
|
+
type=bool,
|
40
|
+
required=False,
|
41
|
+
default=False,
|
42
|
+
description="Whether the node should fail",
|
43
|
+
),
|
44
|
+
}
|
45
|
+
|
46
|
+
def run(self, **kwargs) -> Dict[str, Any]:
|
47
|
+
"""Execute mock node logic."""
|
48
|
+
self._execution_count += 1
|
49
|
+
|
50
|
+
if self._should_fail:
|
51
|
+
raise RuntimeError(self._fail_message)
|
52
|
+
|
53
|
+
# Return configured value or default
|
54
|
+
return self._return_value
|
55
|
+
|
56
|
+
@property
|
57
|
+
def execution_count(self) -> int:
|
58
|
+
"""Get the number of times this node was executed."""
|
59
|
+
return self._execution_count
|
60
|
+
|
61
|
+
|
62
|
+
class TestDataGenerator:
|
63
|
+
"""Generate test data for workflow testing."""
|
64
|
+
|
65
|
+
@staticmethod
|
66
|
+
def generate_csv_data(
|
67
|
+
rows: int = 10, columns: List[str] = None
|
68
|
+
) -> List[Dict[str, Any]]:
|
69
|
+
"""Generate mock CSV data."""
|
70
|
+
if columns is None:
|
71
|
+
columns = ["id", "name", "value", "category"]
|
72
|
+
|
73
|
+
data = []
|
74
|
+
categories = ["A", "B", "C", "D"]
|
75
|
+
|
76
|
+
for i in range(rows):
|
77
|
+
row = {
|
78
|
+
"id": i + 1,
|
79
|
+
"name": f"Item_{i+1}",
|
80
|
+
"value": (i + 1) * 10 + (i % 3),
|
81
|
+
"category": categories[i % len(categories)],
|
82
|
+
}
|
83
|
+
|
84
|
+
# Only include requested columns
|
85
|
+
row = {k: v for k, v in row.items() if k in columns}
|
86
|
+
data.append(row)
|
87
|
+
|
88
|
+
return data
|
89
|
+
|
90
|
+
@staticmethod
|
91
|
+
def generate_json_data(structure: str = "simple") -> Union[Dict, List]:
|
92
|
+
"""Generate mock JSON data."""
|
93
|
+
if structure == "simple":
|
94
|
+
return {
|
95
|
+
"name": "Test Data",
|
96
|
+
"version": "1.0",
|
97
|
+
"data": [{"id": 1, "value": "test1"}, {"id": 2, "value": "test2"}],
|
98
|
+
}
|
99
|
+
elif structure == "nested":
|
100
|
+
return {
|
101
|
+
"metadata": {"created": "2024-01-01", "author": "test"},
|
102
|
+
"records": [
|
103
|
+
{"id": 1, "nested": {"key": "value1"}},
|
104
|
+
{"id": 2, "nested": {"key": "value2"}},
|
105
|
+
],
|
106
|
+
}
|
107
|
+
elif structure == "array":
|
108
|
+
return [
|
109
|
+
{"id": 1, "data": "value1"},
|
110
|
+
{"id": 2, "data": "value2"},
|
111
|
+
{"id": 3, "data": "value3"},
|
112
|
+
]
|
113
|
+
else:
|
114
|
+
return {"type": structure}
|
115
|
+
|
116
|
+
@staticmethod
|
117
|
+
def generate_text_data(lines: int = 5) -> str:
|
118
|
+
"""Generate mock text data."""
|
119
|
+
text_lines = []
|
120
|
+
for i in range(lines):
|
121
|
+
text_lines.append(f"This is line {i+1} of the test text.")
|
122
|
+
if i % 2 == 0:
|
123
|
+
text_lines.append(
|
124
|
+
f"It contains some interesting data about item {i+1}."
|
125
|
+
)
|
126
|
+
|
127
|
+
return "\n".join(text_lines)
|
128
|
+
|
129
|
+
|
130
|
+
class WorkflowTestHelper:
|
131
|
+
"""Helper class for testing workflows."""
|
132
|
+
|
133
|
+
def __init__(self):
|
134
|
+
"""Initialize test helper."""
|
135
|
+
self.runtime = LocalRuntime(debug=True)
|
136
|
+
self.task_manager = None
|
137
|
+
|
138
|
+
def create_test_workflow(self, name: str = "test_workflow") -> Workflow:
|
139
|
+
"""Create a simple test workflow."""
|
140
|
+
workflow = Workflow(name=name)
|
141
|
+
|
142
|
+
# Add some mock nodes
|
143
|
+
workflow.add_node("input", MockNode(), return_value={"data": [1, 2, 3]})
|
144
|
+
workflow.add_node("process", MockNode(), return_value={"processed": [2, 4, 6]})
|
145
|
+
workflow.add_node("output", MockNode(), return_value={"result": "success"})
|
146
|
+
|
147
|
+
# Connect nodes
|
148
|
+
workflow.connect("input", "process", {"data": "input"})
|
149
|
+
workflow.connect("process", "output", {"processed": "data"})
|
150
|
+
|
151
|
+
return workflow
|
152
|
+
|
153
|
+
def run_workflow(
|
154
|
+
self,
|
155
|
+
workflow: Workflow,
|
156
|
+
with_tracking: bool = True,
|
157
|
+
parameters: Optional[Dict] = None,
|
158
|
+
) -> Tuple[Dict[str, Any], Optional[str]]:
|
159
|
+
"""Run a workflow with optional tracking."""
|
160
|
+
if with_tracking:
|
161
|
+
self.task_manager = TaskManager()
|
162
|
+
else:
|
163
|
+
self.task_manager = None
|
164
|
+
|
165
|
+
return self.runtime.execute(workflow, self.task_manager, parameters)
|
166
|
+
|
167
|
+
def assert_workflow_success(
|
168
|
+
self, workflow: Workflow, expected_nodes: Optional[List[str]] = None
|
169
|
+
):
|
170
|
+
"""Assert that a workflow runs successfully."""
|
171
|
+
results, run_id = self.run_workflow(workflow)
|
172
|
+
|
173
|
+
# Check that all expected nodes were executed
|
174
|
+
if expected_nodes:
|
175
|
+
for node_id in expected_nodes:
|
176
|
+
assert node_id in results, f"Node {node_id} was not executed"
|
177
|
+
assert (
|
178
|
+
"error" not in results[node_id]
|
179
|
+
), f"Node {node_id} failed: {results[node_id].get('error')}"
|
180
|
+
|
181
|
+
return results, run_id
|
182
|
+
|
183
|
+
def assert_node_output(
|
184
|
+
self,
|
185
|
+
results: Dict[str, Any],
|
186
|
+
node_id: str,
|
187
|
+
expected_keys: List[str],
|
188
|
+
expected_values: Optional[Dict] = None,
|
189
|
+
):
|
190
|
+
"""Assert that a node produced expected output."""
|
191
|
+
assert node_id in results, f"Node {node_id} not found in results"
|
192
|
+
|
193
|
+
node_output = results[node_id]
|
194
|
+
|
195
|
+
# Check expected keys
|
196
|
+
for key in expected_keys:
|
197
|
+
assert key in node_output, f"Key '{key}' not found in {node_id} output"
|
198
|
+
|
199
|
+
# Check expected values if provided
|
200
|
+
if expected_values:
|
201
|
+
for key, expected_value in expected_values.items():
|
202
|
+
assert (
|
203
|
+
node_output.get(key) == expected_value
|
204
|
+
), f"Node {node_id} key '{key}' expected {expected_value}, got {node_output.get(key)}"
|
205
|
+
|
206
|
+
|
207
|
+
class NodeTestHelper:
|
208
|
+
"""Helper class for testing individual nodes."""
|
209
|
+
|
210
|
+
@staticmethod
|
211
|
+
def test_node_parameters(node: Node, expected_params: Dict[str, type]):
|
212
|
+
"""Test that a node has expected parameters."""
|
213
|
+
params = node.get_parameters()
|
214
|
+
|
215
|
+
for param_name, expected_type in expected_params.items():
|
216
|
+
assert param_name in params, f"Parameter '{param_name}' not found"
|
217
|
+
param = params[param_name]
|
218
|
+
assert (
|
219
|
+
param.type == expected_type
|
220
|
+
), f"Parameter '{param_name}' expected type {expected_type}, got {param.type}"
|
221
|
+
|
222
|
+
@staticmethod
|
223
|
+
def test_node_execution(
|
224
|
+
node: Node,
|
225
|
+
inputs: Dict[str, Any],
|
226
|
+
expected_keys: List[str],
|
227
|
+
should_fail: bool = False,
|
228
|
+
) -> Dict[str, Any]:
|
229
|
+
"""Test node execution with given inputs."""
|
230
|
+
if should_fail:
|
231
|
+
try:
|
232
|
+
result = node.execute(**inputs)
|
233
|
+
assert False, "Node execution should have failed but didn't"
|
234
|
+
except (NodeValidationError, WorkflowExecutionError):
|
235
|
+
return {}
|
236
|
+
else:
|
237
|
+
result = node.execute(**inputs)
|
238
|
+
|
239
|
+
# Check expected output keys
|
240
|
+
for key in expected_keys:
|
241
|
+
assert key in result, f"Expected key '{key}' not found in output"
|
242
|
+
|
243
|
+
return result
|
244
|
+
|
245
|
+
@staticmethod
|
246
|
+
def test_node_validation(
|
247
|
+
node: Node, valid_inputs: Dict[str, Any], invalid_inputs: List[Dict[str, Any]]
|
248
|
+
):
|
249
|
+
"""Test node input validation."""
|
250
|
+
# Test valid inputs
|
251
|
+
try:
|
252
|
+
node.validate_inputs(**valid_inputs)
|
253
|
+
except NodeValidationError:
|
254
|
+
assert False, "Valid inputs failed validation"
|
255
|
+
|
256
|
+
# Test invalid inputs
|
257
|
+
for invalid_input in invalid_inputs:
|
258
|
+
try:
|
259
|
+
node.validate_inputs(**invalid_input)
|
260
|
+
assert False, f"Invalid input {invalid_input} passed validation"
|
261
|
+
except NodeValidationError:
|
262
|
+
pass # Expected
|
263
|
+
|
264
|
+
|
265
|
+
class TestReporter:
|
266
|
+
"""Generate test reports for workflows."""
|
267
|
+
|
268
|
+
def __init__(self, task_manager: TaskManager):
|
269
|
+
"""Initialize reporter with task manager."""
|
270
|
+
self.task_manager = task_manager
|
271
|
+
|
272
|
+
def generate_run_report(self, run_id: str) -> Dict[str, Any]:
|
273
|
+
"""Generate a detailed report for a workflow run."""
|
274
|
+
run = self.task_manager.get_run_summary(run_id)
|
275
|
+
tasks = self.task_manager.list_tasks(run_id)
|
276
|
+
|
277
|
+
report = {
|
278
|
+
"run_id": run_id,
|
279
|
+
"workflow": run.workflow_name,
|
280
|
+
"status": run.status,
|
281
|
+
"duration": run.duration,
|
282
|
+
"started_at": run.started_at,
|
283
|
+
"ended_at": run.ended_at,
|
284
|
+
"summary": {
|
285
|
+
"total_tasks": run.task_count,
|
286
|
+
"completed": run.completed_tasks,
|
287
|
+
"failed": run.failed_tasks,
|
288
|
+
},
|
289
|
+
"tasks": [],
|
290
|
+
}
|
291
|
+
|
292
|
+
# Add task details
|
293
|
+
for task in tasks:
|
294
|
+
task_info = {
|
295
|
+
"node_id": task.node_id,
|
296
|
+
"node_type": task.node_type,
|
297
|
+
"status": task.status,
|
298
|
+
"duration": task.duration,
|
299
|
+
}
|
300
|
+
|
301
|
+
if task.error:
|
302
|
+
task_info["error"] = task.error
|
303
|
+
|
304
|
+
report["tasks"].append(task_info)
|
305
|
+
|
306
|
+
return report
|
307
|
+
|
308
|
+
def save_report(self, report: Dict[str, Any], output_path: str):
|
309
|
+
"""Save report to file."""
|
310
|
+
with open(output_path, "w") as f:
|
311
|
+
json.dump(report, f, indent=2, default=str)
|
312
|
+
|
313
|
+
|
314
|
+
# Convenience functions
|
315
|
+
def create_test_node(node_type: str = "MockNode", **config) -> Node:
|
316
|
+
"""Create a test node instance."""
|
317
|
+
if node_type == "MockNode":
|
318
|
+
return MockNode(**config)
|
319
|
+
else:
|
320
|
+
# Try to get from registry
|
321
|
+
from kailash.nodes import NodeRegistry
|
322
|
+
|
323
|
+
node_class = NodeRegistry.get(node_type)
|
324
|
+
return node_class(**config)
|
325
|
+
|
326
|
+
|
327
|
+
def create_test_workflow(
|
328
|
+
name: str = "test_workflow", nodes: Optional[List[Dict]] = None
|
329
|
+
) -> Workflow:
|
330
|
+
"""Create a test workflow with specified nodes."""
|
331
|
+
workflow = Workflow(name=name)
|
332
|
+
|
333
|
+
if nodes:
|
334
|
+
for node_config in nodes:
|
335
|
+
node_id = node_config["id"]
|
336
|
+
node_type = node_config.get("type", "MockNode")
|
337
|
+
config = node_config.get("config", {})
|
338
|
+
|
339
|
+
node = create_test_node(node_type, **config)
|
340
|
+
workflow.add_node(node_id, node, **config)
|
341
|
+
|
342
|
+
# Add connections if specified
|
343
|
+
if "connections" in node_config:
|
344
|
+
for conn in node_config["connections"]:
|
345
|
+
workflow.connect(conn["from"], conn["to"], conn.get("mapping", {}))
|
346
|
+
|
347
|
+
return workflow
|
@@ -0,0 +1,307 @@
|
|
1
|
+
"""Exception classes for the Kailash SDK.
|
2
|
+
|
3
|
+
This module defines all custom exceptions used throughout the Kailash SDK.
|
4
|
+
Each exception includes helpful error messages and context to guide users
|
5
|
+
toward correct usage.
|
6
|
+
"""
|
7
|
+
|
8
|
+
|
9
|
+
class KailashException(Exception):
|
10
|
+
"""Base exception for all Kailash SDK errors."""
|
11
|
+
|
12
|
+
pass
|
13
|
+
|
14
|
+
|
15
|
+
# Node-related exceptions
|
16
|
+
class NodeException(KailashException):
|
17
|
+
"""Base exception for node-related errors."""
|
18
|
+
|
19
|
+
pass
|
20
|
+
|
21
|
+
|
22
|
+
class NodeValidationError(NodeException):
|
23
|
+
"""Raised when node validation fails.
|
24
|
+
|
25
|
+
This typically occurs when:
|
26
|
+
- Required parameters are missing
|
27
|
+
- Input/output types don't match expectations
|
28
|
+
- Configuration is invalid
|
29
|
+
"""
|
30
|
+
|
31
|
+
pass
|
32
|
+
|
33
|
+
|
34
|
+
class NodeExecutionError(NodeException):
|
35
|
+
"""Raised when node execution fails.
|
36
|
+
|
37
|
+
This typically occurs when:
|
38
|
+
- An error happens during node processing
|
39
|
+
- External resources are unavailable
|
40
|
+
- Data transformation fails
|
41
|
+
"""
|
42
|
+
|
43
|
+
pass
|
44
|
+
|
45
|
+
|
46
|
+
class NodeConfigurationError(NodeException):
|
47
|
+
"""Raised when node configuration is invalid.
|
48
|
+
|
49
|
+
This typically occurs when:
|
50
|
+
- Invalid parameter values are provided
|
51
|
+
- Configuration schema is violated
|
52
|
+
- Required environment variables are missing
|
53
|
+
"""
|
54
|
+
|
55
|
+
pass
|
56
|
+
|
57
|
+
|
58
|
+
class SafetyViolationError(NodeException):
|
59
|
+
"""Raised when code safety validation fails.
|
60
|
+
|
61
|
+
This typically occurs when:
|
62
|
+
- Code contains dangerous operations (eval, exec, import)
|
63
|
+
- Unsafe module imports are attempted
|
64
|
+
- Malicious code patterns are detected
|
65
|
+
"""
|
66
|
+
|
67
|
+
pass
|
68
|
+
|
69
|
+
|
70
|
+
# Workflow-related exceptions
|
71
|
+
class WorkflowException(KailashException):
|
72
|
+
"""Base exception for workflow-related errors."""
|
73
|
+
|
74
|
+
pass
|
75
|
+
|
76
|
+
|
77
|
+
class WorkflowValidationError(WorkflowException):
|
78
|
+
"""Raised when workflow validation fails.
|
79
|
+
|
80
|
+
This typically occurs when:
|
81
|
+
- Node connections are invalid
|
82
|
+
- Required nodes are missing
|
83
|
+
- Workflow structure is invalid
|
84
|
+
"""
|
85
|
+
|
86
|
+
pass
|
87
|
+
|
88
|
+
|
89
|
+
class WorkflowExecutionError(WorkflowException):
|
90
|
+
"""Raised when workflow execution fails.
|
91
|
+
|
92
|
+
This typically occurs when:
|
93
|
+
- A node fails during execution
|
94
|
+
- Data cannot flow between nodes
|
95
|
+
- Runtime resources are exhausted
|
96
|
+
"""
|
97
|
+
|
98
|
+
pass
|
99
|
+
|
100
|
+
|
101
|
+
class CyclicDependencyError(WorkflowException):
|
102
|
+
"""Raised when a cyclic dependency is detected in the workflow graph.
|
103
|
+
|
104
|
+
This occurs when nodes form a circular dependency chain,
|
105
|
+
making it impossible to determine execution order.
|
106
|
+
"""
|
107
|
+
|
108
|
+
pass
|
109
|
+
|
110
|
+
|
111
|
+
class ConnectionError(WorkflowException):
|
112
|
+
"""Raised when node connections are invalid.
|
113
|
+
|
114
|
+
This typically occurs when:
|
115
|
+
- Trying to connect incompatible node outputs/inputs
|
116
|
+
- Connection already exists
|
117
|
+
- Node not found in workflow
|
118
|
+
"""
|
119
|
+
|
120
|
+
pass
|
121
|
+
|
122
|
+
|
123
|
+
# Runtime-related exceptions
|
124
|
+
class RuntimeException(KailashException):
|
125
|
+
"""Base exception for runtime-related errors."""
|
126
|
+
|
127
|
+
pass
|
128
|
+
|
129
|
+
|
130
|
+
class RuntimeExecutionError(RuntimeException):
|
131
|
+
"""Raised when runtime execution fails.
|
132
|
+
|
133
|
+
This typically occurs when:
|
134
|
+
- Execution environment is not properly configured
|
135
|
+
- Resources are unavailable
|
136
|
+
- Execution is interrupted
|
137
|
+
"""
|
138
|
+
|
139
|
+
pass
|
140
|
+
|
141
|
+
|
142
|
+
# Task tracking exceptions
|
143
|
+
class TaskException(KailashException):
|
144
|
+
"""Base exception for task tracking errors."""
|
145
|
+
|
146
|
+
pass
|
147
|
+
|
148
|
+
|
149
|
+
class TaskStateError(TaskException):
|
150
|
+
"""Raised when task state operations fail.
|
151
|
+
|
152
|
+
This typically occurs when:
|
153
|
+
- Invalid state transitions are attempted
|
154
|
+
- Task state is corrupted
|
155
|
+
- Concurrent modification conflicts occur
|
156
|
+
"""
|
157
|
+
|
158
|
+
pass
|
159
|
+
|
160
|
+
|
161
|
+
# Storage exceptions
|
162
|
+
class StorageException(KailashException):
|
163
|
+
"""Base exception for storage-related errors."""
|
164
|
+
|
165
|
+
pass
|
166
|
+
|
167
|
+
|
168
|
+
class KailashStorageError(StorageException):
|
169
|
+
"""Raised when storage operations fail.
|
170
|
+
|
171
|
+
This typically occurs when:
|
172
|
+
- File I/O operations fail
|
173
|
+
- Database connections fail
|
174
|
+
- Storage permissions are insufficient
|
175
|
+
- Data formatting is incorrect
|
176
|
+
"""
|
177
|
+
|
178
|
+
pass
|
179
|
+
|
180
|
+
|
181
|
+
# Import/Export exceptions
|
182
|
+
class ExportException(KailashException):
|
183
|
+
"""Raised when export operations fail.
|
184
|
+
|
185
|
+
This typically occurs when:
|
186
|
+
- Export format is unsupported
|
187
|
+
- File permissions are insufficient
|
188
|
+
- Serialization fails
|
189
|
+
"""
|
190
|
+
|
191
|
+
pass
|
192
|
+
|
193
|
+
|
194
|
+
class ImportException(KailashException):
|
195
|
+
"""Raised when import operations fail.
|
196
|
+
|
197
|
+
This typically occurs when:
|
198
|
+
- Import format is unsupported
|
199
|
+
- File is corrupted or invalid
|
200
|
+
- Deserialization fails
|
201
|
+
"""
|
202
|
+
|
203
|
+
pass
|
204
|
+
|
205
|
+
|
206
|
+
# Configuration exceptions
|
207
|
+
class ConfigurationException(KailashException):
|
208
|
+
"""Raised when configuration is invalid.
|
209
|
+
|
210
|
+
This typically occurs when:
|
211
|
+
- Configuration file is missing
|
212
|
+
- Required configuration values are not provided
|
213
|
+
- Configuration schema is invalid
|
214
|
+
"""
|
215
|
+
|
216
|
+
pass
|
217
|
+
|
218
|
+
|
219
|
+
class KailashConfigError(ConfigurationException):
|
220
|
+
"""Raised when configuration is invalid (legacy name).
|
221
|
+
|
222
|
+
This is an alias for ConfigurationException for backward compatibility.
|
223
|
+
"""
|
224
|
+
|
225
|
+
pass
|
226
|
+
|
227
|
+
|
228
|
+
# Manifest exceptions
|
229
|
+
class ManifestError(KailashException):
|
230
|
+
"""Raised when manifest operations fail.
|
231
|
+
|
232
|
+
This typically occurs when:
|
233
|
+
- Manifest file is invalid
|
234
|
+
- Required manifest fields are missing
|
235
|
+
- Version incompatibility
|
236
|
+
"""
|
237
|
+
|
238
|
+
pass
|
239
|
+
|
240
|
+
|
241
|
+
# CLI exceptions
|
242
|
+
class CLIException(KailashException):
|
243
|
+
"""Raised when CLI operations fail.
|
244
|
+
|
245
|
+
This typically occurs when:
|
246
|
+
- Invalid command arguments
|
247
|
+
- Required arguments are missing
|
248
|
+
- Command execution fails
|
249
|
+
"""
|
250
|
+
|
251
|
+
pass
|
252
|
+
|
253
|
+
|
254
|
+
# Visualization exceptions
|
255
|
+
class VisualizationError(KailashException):
|
256
|
+
"""Raised when visualization operations fail.
|
257
|
+
|
258
|
+
This typically occurs when:
|
259
|
+
- Graph layout fails
|
260
|
+
- Rendering engine is unavailable
|
261
|
+
- Output format is unsupported
|
262
|
+
"""
|
263
|
+
|
264
|
+
pass
|
265
|
+
|
266
|
+
|
267
|
+
# Template exceptions
|
268
|
+
class TemplateError(KailashException):
|
269
|
+
"""Raised when template operations fail.
|
270
|
+
|
271
|
+
This typically occurs when:
|
272
|
+
- Template file is missing
|
273
|
+
- Template syntax is invalid
|
274
|
+
- Variable substitution fails
|
275
|
+
"""
|
276
|
+
|
277
|
+
pass
|
278
|
+
|
279
|
+
|
280
|
+
# Code execution exceptions
|
281
|
+
class SafetyViolationError(NodeException):
|
282
|
+
"""Raised when code execution violates safety rules.
|
283
|
+
|
284
|
+
This typically occurs when:
|
285
|
+
- Potentially dangerous operations are attempted
|
286
|
+
- Resource limits are exceeded
|
287
|
+
- Security policies are violated
|
288
|
+
"""
|
289
|
+
|
290
|
+
pass
|
291
|
+
|
292
|
+
|
293
|
+
class CodeExecutionError(NodeException):
|
294
|
+
"""Raised when code execution fails.
|
295
|
+
|
296
|
+
This typically occurs when:
|
297
|
+
- Syntax errors in user code
|
298
|
+
- Runtime errors during execution
|
299
|
+
- Import or dependency issues
|
300
|
+
"""
|
301
|
+
|
302
|
+
pass
|
303
|
+
|
304
|
+
|
305
|
+
# Legacy exception name compatibility for tests and backwards compatibility
|
306
|
+
KailashRuntimeError = RuntimeExecutionError
|
307
|
+
KailashValidationError = NodeValidationError
|
@@ -0,0 +1,7 @@
|
|
1
|
+
"""Workflow Tracking for Kailash SDK."""
|
2
|
+
|
3
|
+
from kailash.tracking.manager import TaskManager
|
4
|
+
from kailash.tracking.metrics_collector import MetricsCollector, PerformanceMetrics
|
5
|
+
from kailash.tracking.models import TaskStatus
|
6
|
+
|
7
|
+
__all__ = ["TaskManager", "TaskStatus", "MetricsCollector", "PerformanceMetrics"]
|