kailash 0.6.5__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. kailash/__init__.py +35 -4
  2. kailash/adapters/__init__.py +5 -0
  3. kailash/adapters/mcp_platform_adapter.py +273 -0
  4. kailash/channels/__init__.py +21 -0
  5. kailash/channels/api_channel.py +409 -0
  6. kailash/channels/base.py +271 -0
  7. kailash/channels/cli_channel.py +661 -0
  8. kailash/channels/event_router.py +496 -0
  9. kailash/channels/mcp_channel.py +648 -0
  10. kailash/channels/session.py +423 -0
  11. kailash/mcp_server/discovery.py +1 -1
  12. kailash/middleware/core/agent_ui.py +5 -0
  13. kailash/middleware/mcp/enhanced_server.py +22 -16
  14. kailash/nexus/__init__.py +21 -0
  15. kailash/nexus/factory.py +413 -0
  16. kailash/nexus/gateway.py +545 -0
  17. kailash/nodes/__init__.py +2 -0
  18. kailash/nodes/ai/iterative_llm_agent.py +988 -17
  19. kailash/nodes/ai/llm_agent.py +29 -9
  20. kailash/nodes/api/__init__.py +2 -2
  21. kailash/nodes/api/monitoring.py +1 -1
  22. kailash/nodes/base_async.py +54 -14
  23. kailash/nodes/code/async_python.py +1 -1
  24. kailash/nodes/data/bulk_operations.py +939 -0
  25. kailash/nodes/data/query_builder.py +373 -0
  26. kailash/nodes/data/query_cache.py +512 -0
  27. kailash/nodes/monitoring/__init__.py +10 -0
  28. kailash/nodes/monitoring/deadlock_detector.py +964 -0
  29. kailash/nodes/monitoring/performance_anomaly.py +1078 -0
  30. kailash/nodes/monitoring/race_condition_detector.py +1151 -0
  31. kailash/nodes/monitoring/transaction_metrics.py +790 -0
  32. kailash/nodes/monitoring/transaction_monitor.py +931 -0
  33. kailash/nodes/system/__init__.py +17 -0
  34. kailash/nodes/system/command_parser.py +820 -0
  35. kailash/nodes/transaction/__init__.py +48 -0
  36. kailash/nodes/transaction/distributed_transaction_manager.py +983 -0
  37. kailash/nodes/transaction/saga_coordinator.py +652 -0
  38. kailash/nodes/transaction/saga_state_storage.py +411 -0
  39. kailash/nodes/transaction/saga_step.py +467 -0
  40. kailash/nodes/transaction/transaction_context.py +756 -0
  41. kailash/nodes/transaction/two_phase_commit.py +978 -0
  42. kailash/nodes/transform/processors.py +17 -1
  43. kailash/nodes/validation/__init__.py +21 -0
  44. kailash/nodes/validation/test_executor.py +532 -0
  45. kailash/nodes/validation/validation_nodes.py +447 -0
  46. kailash/resources/factory.py +1 -1
  47. kailash/runtime/async_local.py +84 -21
  48. kailash/runtime/local.py +21 -2
  49. kailash/runtime/parameter_injector.py +187 -31
  50. kailash/security.py +16 -1
  51. kailash/servers/__init__.py +32 -0
  52. kailash/servers/durable_workflow_server.py +430 -0
  53. kailash/servers/enterprise_workflow_server.py +466 -0
  54. kailash/servers/gateway.py +183 -0
  55. kailash/servers/workflow_server.py +290 -0
  56. kailash/utils/data_validation.py +192 -0
  57. kailash/workflow/builder.py +291 -12
  58. kailash/workflow/validation.py +144 -8
  59. {kailash-0.6.5.dist-info → kailash-0.7.0.dist-info}/METADATA +1 -1
  60. {kailash-0.6.5.dist-info → kailash-0.7.0.dist-info}/RECORD +64 -26
  61. {kailash-0.6.5.dist-info → kailash-0.7.0.dist-info}/WHEEL +0 -0
  62. {kailash-0.6.5.dist-info → kailash-0.7.0.dist-info}/entry_points.txt +0 -0
  63. {kailash-0.6.5.dist-info → kailash-0.7.0.dist-info}/licenses/LICENSE +0 -0
  64. {kailash-0.6.5.dist-info → kailash-0.7.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,447 @@
1
+ """Validation nodes for test-driven development.
2
+
3
+ This module provides specialized nodes for validating code, workflows, and
4
+ running test suites as part of the IterativeLLMAgent's test-driven convergence.
5
+ """
6
+
7
+ from typing import Any
8
+
9
+ from kailash.nodes.base import Node, NodeParameter, register_node
10
+
11
+ from .test_executor import ValidationLevel, ValidationTestExecutor
12
+
13
+
14
+ @register_node()
15
+ class CodeValidationNode(Node):
16
+ """Validate generated code through multiple levels of testing.
17
+
18
+ This node performs comprehensive validation of Python code including:
19
+ - Syntax validation
20
+ - Import resolution
21
+ - Safe execution testing
22
+ - Output schema validation
23
+
24
+ Examples:
25
+ >>> validator = CodeValidationNode()
26
+ >>> result = validator.execute(
27
+ ... code="def process(x): return {'result': x * 2}",
28
+ ... validation_levels=["syntax", "semantic"],
29
+ ... test_inputs={"x": 5}
30
+ ... )
31
+ """
32
+
33
+ def get_parameters(self) -> dict[str, NodeParameter]:
34
+ """Get node parameters."""
35
+ return {
36
+ "code": NodeParameter(
37
+ name="code",
38
+ type=str,
39
+ required=True,
40
+ description="Python code to validate",
41
+ ),
42
+ "validation_levels": NodeParameter(
43
+ name="validation_levels",
44
+ type=list,
45
+ required=False,
46
+ default=["syntax", "imports", "semantic"],
47
+ description="Validation levels to run (syntax, imports, semantic, functional)",
48
+ ),
49
+ "test_inputs": NodeParameter(
50
+ name="test_inputs",
51
+ type=dict,
52
+ required=False,
53
+ default={},
54
+ description="Input data for semantic validation",
55
+ ),
56
+ "expected_schema": NodeParameter(
57
+ name="expected_schema",
58
+ type=dict,
59
+ required=False,
60
+ description="Expected output schema for functional validation",
61
+ ),
62
+ "timeout": NodeParameter(
63
+ name="timeout",
64
+ type=int,
65
+ required=False,
66
+ default=30,
67
+ description="Maximum execution time in seconds",
68
+ ),
69
+ "sandbox": NodeParameter(
70
+ name="sandbox",
71
+ type=bool,
72
+ required=False,
73
+ default=True,
74
+ description="Use sandboxed execution for safety",
75
+ ),
76
+ }
77
+
78
+ def run(self, **kwargs) -> dict[str, Any]:
79
+ """Execute validation tests on provided code."""
80
+ code = kwargs["code"]
81
+ levels = kwargs.get("validation_levels", ["syntax", "imports", "semantic"])
82
+ test_inputs = kwargs.get("test_inputs", {})
83
+ expected_schema = kwargs.get("expected_schema")
84
+ timeout = kwargs.get("timeout", 30)
85
+ sandbox = kwargs.get("sandbox", True)
86
+
87
+ executor = ValidationTestExecutor(sandbox_enabled=sandbox, timeout=timeout)
88
+ validation_results = []
89
+
90
+ # Run requested validation levels
91
+ if "syntax" in levels:
92
+ result = executor.validate_python_syntax(code)
93
+ validation_results.append(result)
94
+
95
+ # Stop early if syntax fails
96
+ if not result.passed:
97
+ return self._format_results(validation_results, code)
98
+
99
+ if "imports" in levels:
100
+ result = executor.validate_imports(code)
101
+ validation_results.append(result)
102
+
103
+ # Warn but continue if imports fail
104
+ if not result.passed:
105
+ self.logger.warning(f"Import validation failed: {result.error}")
106
+
107
+ if "semantic" in levels:
108
+ result = executor.execute_code_safely(code, test_inputs)
109
+ validation_results.append(result)
110
+
111
+ # If semantic validation passed and we have schema, validate output
112
+ if result.passed and expected_schema and "functional" in levels:
113
+ # Extract output from execution
114
+ namespace = {**test_inputs}
115
+ try:
116
+ exec(code, namespace)
117
+ output = {
118
+ k: v
119
+ for k, v in namespace.items()
120
+ if k not in test_inputs and not k.startswith("_")
121
+ }
122
+
123
+ schema_result = executor.validate_output_schema(
124
+ output, expected_schema
125
+ )
126
+ validation_results.append(schema_result)
127
+ except Exception as e:
128
+ self.logger.error(f"Failed to validate output schema: {e}")
129
+
130
+ return self._format_results(validation_results, code)
131
+
132
+ def _format_results(self, results: list, code: str) -> dict[str, Any]:
133
+ """Format validation results for output."""
134
+ all_passed = all(r.passed for r in results)
135
+
136
+ return {
137
+ "validated": all_passed,
138
+ "validation_results": [
139
+ {
140
+ "level": r.level.value,
141
+ "passed": r.passed,
142
+ "test_name": r.test_name,
143
+ "details": r.details,
144
+ "error": r.error,
145
+ "suggestions": r.suggestions,
146
+ "execution_time": r.execution_time,
147
+ }
148
+ for r in results
149
+ ],
150
+ "summary": {
151
+ "total_tests": len(results),
152
+ "passed": sum(1 for r in results if r.passed),
153
+ "failed": sum(1 for r in results if not r.passed),
154
+ "code_lines": len(code.splitlines()),
155
+ "total_execution_time": sum(r.execution_time for r in results),
156
+ },
157
+ "validation_status": "PASSED" if all_passed else "FAILED",
158
+ }
159
+
160
+
161
+ @register_node()
162
+ class WorkflowValidationNode(Node):
163
+ """Validate entire workflow definitions and execution.
164
+
165
+ This node validates workflow code by:
166
+ - Parsing workflow definition
167
+ - Checking node configurations
168
+ - Validating connections
169
+ - Optionally executing with test data
170
+
171
+ Examples:
172
+ >>> validator = WorkflowValidationNode()
173
+ >>> result = validator.execute(
174
+ ... workflow_code='''
175
+ ... workflow = WorkflowBuilder()
176
+ ... workflow.add_node("CSVReaderNode", "reader", {"file_path": "data.csv"})
177
+ ... workflow.add_node("PythonCodeNode", "processor", {"code": "..."})
178
+ ... workflow.connect("reader", "processor", {"data": "data"})
179
+ ... ''',
180
+ ... validate_execution=True
181
+ ... )
182
+ """
183
+
184
+ def get_parameters(self) -> dict[str, NodeParameter]:
185
+ """Get node parameters."""
186
+ return {
187
+ "workflow_code": NodeParameter(
188
+ name="workflow_code",
189
+ type=str,
190
+ required=True,
191
+ description="Workflow definition code to validate",
192
+ ),
193
+ "validate_execution": NodeParameter(
194
+ name="validate_execution",
195
+ type=bool,
196
+ required=False,
197
+ default=False,
198
+ description="Whether to execute workflow with test data",
199
+ ),
200
+ "test_parameters": NodeParameter(
201
+ name="test_parameters",
202
+ type=dict,
203
+ required=False,
204
+ default={},
205
+ description="Parameters for test execution",
206
+ ),
207
+ "expected_nodes": NodeParameter(
208
+ name="expected_nodes",
209
+ type=list,
210
+ required=False,
211
+ description="List of node IDs that should be present",
212
+ ),
213
+ "required_connections": NodeParameter(
214
+ name="required_connections",
215
+ type=list,
216
+ required=False,
217
+ description="List of required connections between nodes",
218
+ ),
219
+ }
220
+
221
+ def run(self, **kwargs) -> dict[str, Any]:
222
+ """Validate workflow definition and optionally execute."""
223
+ workflow_code = kwargs["workflow_code"]
224
+ validate_execution = kwargs.get("validate_execution", False)
225
+ test_parameters = kwargs.get("test_parameters", {})
226
+ expected_nodes = kwargs.get("expected_nodes", [])
227
+ required_connections = kwargs.get("required_connections", [])
228
+
229
+ validation_results = {
230
+ "syntax_valid": False,
231
+ "structure_valid": False,
232
+ "errors": [],
233
+ "warnings": [],
234
+ }
235
+
236
+ # Only add execution_valid if we're going to validate execution
237
+ if validate_execution:
238
+ validation_results["execution_valid"] = False
239
+
240
+ # First validate syntax
241
+ executor = ValidationTestExecutor()
242
+ syntax_result = executor.validate_python_syntax(workflow_code)
243
+ validation_results["syntax_valid"] = syntax_result.passed
244
+
245
+ if not syntax_result.passed:
246
+ validation_results["errors"].append(f"Syntax error: {syntax_result.error}")
247
+ return self._format_workflow_results(validation_results)
248
+
249
+ # Try to extract workflow structure
250
+ try:
251
+ # Create namespace for execution
252
+ workflow_builder_class = self._get_workflow_builder_class()
253
+ namespace = {
254
+ "WorkflowBuilder": workflow_builder_class,
255
+ "__builtins__": __builtins__,
256
+ }
257
+
258
+ # Execute workflow code
259
+ exec(workflow_code, namespace)
260
+
261
+ # Find workflow instance
262
+ workflow = None
263
+ for var_name, var_value in namespace.items():
264
+ if hasattr(var_value, "build") and hasattr(var_value, "add_node"):
265
+ # Skip the WorkflowBuilder class itself, look for instances
266
+ if var_name != "WorkflowBuilder":
267
+ workflow = var_value
268
+ break
269
+
270
+ if not workflow:
271
+ validation_results["errors"].append("No WorkflowBuilder instance found")
272
+ return self._format_workflow_results(validation_results)
273
+
274
+ # Validate structure
275
+ built_workflow = workflow.build()
276
+
277
+ # Handle both dict format (for testing) and Workflow object (real usage)
278
+ if hasattr(built_workflow, "nodes"):
279
+ # Real Workflow object
280
+ actual_nodes = list(built_workflow.nodes.keys())
281
+ actual_connections = built_workflow.connections
282
+ else:
283
+ # Dict format (for testing)
284
+ actual_nodes = list(built_workflow["nodes"].keys())
285
+ actual_connections = built_workflow.get("connections", [])
286
+
287
+ # Check expected nodes
288
+ for expected_node in expected_nodes:
289
+ if expected_node not in actual_nodes:
290
+ validation_results["errors"].append(
291
+ f"Missing expected node: {expected_node}"
292
+ )
293
+
294
+ # Check connections
295
+ for req_conn in required_connections:
296
+ found = False
297
+ for conn in actual_connections:
298
+ # Handle both dict format and Connection object
299
+ if hasattr(conn, "source_node"):
300
+ # Real Connection object
301
+ from_node = conn.source_node
302
+ to_node = conn.target_node
303
+ else:
304
+ # Dict format
305
+ from_node = conn.get("from_node")
306
+ to_node = conn.get("to_node")
307
+
308
+ if from_node == req_conn.get("from") and to_node == req_conn.get(
309
+ "to"
310
+ ):
311
+ found = True
312
+ break
313
+ if not found:
314
+ validation_results["errors"].append(
315
+ f"Missing connection: {req_conn.get('from')} -> {req_conn.get('to')}"
316
+ )
317
+
318
+ validation_results["structure_valid"] = (
319
+ len(validation_results["errors"]) == 0
320
+ )
321
+ validation_results["node_count"] = len(actual_nodes)
322
+ validation_results["connection_count"] = len(actual_connections)
323
+ validation_results["nodes"] = actual_nodes
324
+
325
+ # Optionally execute workflow
326
+ if validate_execution and validation_results["structure_valid"]:
327
+ try:
328
+ from kailash.runtime.local import LocalRuntime
329
+
330
+ runtime = LocalRuntime()
331
+
332
+ # Execute with test parameters
333
+ results, run_id = runtime.execute(
334
+ built_workflow, parameters=test_parameters
335
+ )
336
+
337
+ # Check for errors
338
+ execution_errors = []
339
+ for node_id, node_result in results.items():
340
+ if isinstance(node_result, dict) and "error" in node_result:
341
+ execution_errors.append(
342
+ f"Node {node_id}: {node_result['error']}"
343
+ )
344
+
345
+ validation_results["execution_valid"] = len(execution_errors) == 0
346
+ validation_results["execution_errors"] = execution_errors
347
+ validation_results["run_id"] = run_id
348
+
349
+ except Exception as e:
350
+ validation_results["execution_valid"] = False
351
+ validation_results["errors"].append(f"Execution failed: {str(e)}")
352
+
353
+ except Exception as e:
354
+ validation_results["errors"].append(f"Workflow parsing failed: {str(e)}")
355
+
356
+ return self._format_workflow_results(validation_results)
357
+
358
+ def _get_workflow_builder_class(self):
359
+ """Get WorkflowBuilder class. Can be overridden for testing."""
360
+ from kailash.workflow.builder import WorkflowBuilder
361
+
362
+ return WorkflowBuilder
363
+
364
+ def _format_workflow_results(self, results: dict[str, Any]) -> dict[str, Any]:
365
+ """Format workflow validation results."""
366
+ all_valid = (
367
+ results["syntax_valid"]
368
+ and results["structure_valid"]
369
+ and (results["execution_valid"] if "execution_valid" in results else True)
370
+ )
371
+
372
+ return {
373
+ "validated": all_valid,
374
+ "workflow_valid": all_valid,
375
+ "validation_details": results,
376
+ "validation_status": "PASSED" if all_valid else "FAILED",
377
+ "error_count": len(results.get("errors", [])),
378
+ "warning_count": len(results.get("warnings", [])),
379
+ }
380
+
381
+
382
+ @register_node()
383
+ class ValidationTestSuiteExecutorNode(Node):
384
+ """Execute a test suite against generated code.
385
+
386
+ This node runs multiple test cases against code to ensure
387
+ comprehensive validation.
388
+
389
+ Examples:
390
+ >>> executor = TestSuiteExecutorNode()
391
+ >>> result = executor.execute(
392
+ ... code="def double(x): return {'result': x * 2}",
393
+ ... test_suite=[
394
+ ... {
395
+ ... "name": "test_positive",
396
+ ... "inputs": {"x": 5},
397
+ ... "expected_output": {"result": 10}
398
+ ... },
399
+ ... {
400
+ ... "name": "test_negative",
401
+ ... "inputs": {"x": -3},
402
+ ... "expected_output": {"result": -6}
403
+ ... }
404
+ ... ]
405
+ ... )
406
+ """
407
+
408
+ def get_parameters(self) -> dict[str, NodeParameter]:
409
+ """Get node parameters."""
410
+ return {
411
+ "code": NodeParameter(
412
+ name="code", type=str, required=True, description="Code to test"
413
+ ),
414
+ "test_suite": NodeParameter(
415
+ name="test_suite",
416
+ type=list,
417
+ required=True,
418
+ description="List of test cases with inputs and expected outputs",
419
+ ),
420
+ "stop_on_failure": NodeParameter(
421
+ name="stop_on_failure",
422
+ type=bool,
423
+ required=False,
424
+ default=False,
425
+ description="Stop execution after first test failure",
426
+ ),
427
+ }
428
+
429
+ def run(self, **kwargs) -> dict[str, Any]:
430
+ """Execute test suite against code."""
431
+ code = kwargs["code"]
432
+ test_suite = kwargs["test_suite"]
433
+ kwargs.get("stop_on_failure", False)
434
+
435
+ executor = ValidationTestExecutor()
436
+ result = executor.run_test_suite(code, test_suite)
437
+
438
+ return {
439
+ "all_tests_passed": result.passed,
440
+ "test_results": result.details["results"],
441
+ "summary": {
442
+ "total": result.details["total_tests"],
443
+ "passed": result.details["passed"],
444
+ "failed": result.details["failed"],
445
+ },
446
+ "validation_status": "PASSED" if result.passed else "FAILED",
447
+ }
@@ -390,7 +390,7 @@ class CacheFactory(ResourceFactory):
390
390
  async def get(self, key: str) -> Any:
391
391
  return self._cache.get(key)
392
392
 
393
- async def set(self, key: str, value: Any, expire: int = None) -> None:
393
+ async def set(self, key: str, value: Any, ttl: int = None) -> None:
394
394
  self._cache[key] = value
395
395
  # TODO: Implement expiration
396
396
 
@@ -579,13 +579,65 @@ class AsyncLocalRuntime(LocalRuntime):
579
579
  context_inputs: Dict[str, Any],
580
580
  ) -> Dict[str, Any]:
581
581
  """Prepare inputs for sync node execution."""
582
- # Simplified input preparation
583
582
  inputs = context_inputs.copy()
584
583
 
585
- # Add outputs from predecessor nodes
584
+ # Add outputs from predecessor nodes using proper connection mapping
586
585
  for predecessor in workflow.graph.predecessors(node_id):
587
586
  if predecessor in node_outputs:
588
- inputs[f"{predecessor}_output"] = node_outputs[predecessor]
587
+ # Use the actual connection mapping if available
588
+ edge_data = workflow.graph.get_edge_data(predecessor, node_id)
589
+ if edge_data and "mapping" in edge_data:
590
+ # Handle new graph format with mapping
591
+ mapping = edge_data["mapping"]
592
+ source_data = node_outputs[predecessor]
593
+
594
+ for source_path, target_param in mapping.items():
595
+ if source_path == "result":
596
+ # Source path is 'result' - use the entire source data
597
+ inputs[target_param] = source_data
598
+ elif "." in source_path and isinstance(source_data, dict):
599
+ # Navigate dotted path (e.g., "result.data" or "nested.field")
600
+ path_parts = source_path.split(".")
601
+
602
+ # Special case: if path starts with "result." and source_data doesn't have "result" key,
603
+ # try stripping "result." since AsyncPythonCodeNode returns direct dict
604
+ if (
605
+ path_parts[0] == "result"
606
+ and "result" not in source_data
607
+ and len(path_parts) > 1
608
+ ):
609
+ # Try the remaining path without "result"
610
+ remaining_path = ".".join(path_parts[1:])
611
+ if remaining_path in source_data:
612
+ inputs[target_param] = source_data[remaining_path]
613
+ continue
614
+ else:
615
+ # Try navigating remaining path parts
616
+ path_parts = path_parts[1:]
617
+
618
+ current_data = source_data
619
+ # Navigate through each part of the path
620
+ for part in path_parts:
621
+ if (
622
+ isinstance(current_data, dict)
623
+ and part in current_data
624
+ ):
625
+ current_data = current_data[part]
626
+ else:
627
+ current_data = None
628
+ break
629
+ inputs[target_param] = current_data
630
+ elif (
631
+ isinstance(source_data, dict) and source_path in source_data
632
+ ):
633
+ # Direct key access
634
+ inputs[target_param] = source_data[source_path]
635
+ else:
636
+ # Fallback - use source data directly
637
+ inputs[target_param] = source_data
638
+ else:
639
+ # Fallback to legacy behavior if no mapping
640
+ inputs[f"{predecessor}_output"] = node_outputs[predecessor]
589
641
 
590
642
  return inputs
591
643
 
@@ -713,17 +765,31 @@ class AsyncLocalRuntime(LocalRuntime):
713
765
  source_data = tracker.node_outputs[predecessor]
714
766
 
715
767
  for source_path, target_param in mapping.items():
716
- if source_path != "result" and isinstance(source_data, dict):
717
- # Navigate the path (e.g., "result.data")
768
+ if source_path == "result":
769
+ # Source path is 'result' - use the entire source data
770
+ inputs[target_param] = source_data
771
+ elif "." in source_path and isinstance(source_data, dict):
772
+ # Navigate dotted path (e.g., "result.data" or "nested.field")
718
773
  path_parts = source_path.split(".")
719
- current_data = source_data
720
774
 
721
- # CRITICAL FIX: Handle paths starting with "result"
722
- # When source_path is "result.field", the node output IS the result
723
- if path_parts[0] == "result" and len(path_parts) > 1:
724
- # Skip the "result" prefix and navigate from the actual data
725
- path_parts = path_parts[1:]
775
+ # Special case: if path starts with "result." and source_data doesn't have "result" key,
776
+ # try stripping "result." since AsyncPythonCodeNode returns direct dict
777
+ if (
778
+ path_parts[0] == "result"
779
+ and "result" not in source_data
780
+ and len(path_parts) > 1
781
+ ):
782
+ # Try the remaining path without "result"
783
+ remaining_path = ".".join(path_parts[1:])
784
+ if remaining_path in source_data:
785
+ inputs[target_param] = source_data[remaining_path]
786
+ continue
787
+ else:
788
+ # Try navigating remaining path parts
789
+ path_parts = path_parts[1:]
726
790
 
791
+ current_data = source_data
792
+ # Navigate through each part of the path
727
793
  for part in path_parts:
728
794
  if (
729
795
  isinstance(current_data, dict)
@@ -734,17 +800,14 @@ class AsyncLocalRuntime(LocalRuntime):
734
800
  current_data = None
735
801
  break
736
802
  inputs[target_param] = current_data
803
+ elif (
804
+ isinstance(source_data, dict) and source_path in source_data
805
+ ):
806
+ # Direct key access
807
+ inputs[target_param] = source_data[source_path]
737
808
  else:
738
- # Source path is 'result' or source_data is not a dict
739
- if source_path == "result":
740
- inputs[target_param] = source_data
741
- elif (
742
- isinstance(source_data, dict)
743
- and source_path in source_data
744
- ):
745
- inputs[target_param] = source_data[source_path]
746
- else:
747
- inputs[target_param] = source_data
809
+ # Fallback - use source data directly
810
+ inputs[target_param] = source_data
748
811
  elif edge_data and "connections" in edge_data:
749
812
  # Handle legacy connection format
750
813
  connections = edge_data["connections"]
kailash/runtime/local.py CHANGED
@@ -567,12 +567,19 @@ class LocalRuntime:
567
567
  collector = MetricsCollector()
568
568
  with collector.collect(node_id=node_id) as metrics_context:
569
569
  # Unified async/sync execution
570
+ # Validate inputs before execution
571
+ from kailash.utils.data_validation import DataTypeValidator
572
+
573
+ validated_inputs = DataTypeValidator.validate_node_input(
574
+ node_id, inputs
575
+ )
576
+
570
577
  if self.enable_async and hasattr(node_instance, "execute_async"):
571
578
  # Use async execution method that includes validation
572
- outputs = await node_instance.execute_async(**inputs)
579
+ outputs = await node_instance.execute_async(**validated_inputs)
573
580
  else:
574
581
  # Standard synchronous execution
575
- outputs = node_instance.execute(**inputs)
582
+ outputs = node_instance.execute(**validated_inputs)
576
583
 
577
584
  # Get performance metrics
578
585
  performance_metrics = metrics_context.result()
@@ -698,6 +705,18 @@ class LocalRuntime:
698
705
  f"Cannot use outputs from failed node '{source_node_id}'"
699
706
  )
700
707
 
708
+ # Validate source outputs before mapping
709
+ from kailash.utils.data_validation import DataTypeValidator
710
+
711
+ try:
712
+ source_outputs = DataTypeValidator.validate_node_output(
713
+ source_node_id, source_outputs
714
+ )
715
+ except Exception as e:
716
+ self.logger.warning(
717
+ f"Data validation failed for node '{source_node_id}': {e}"
718
+ )
719
+
701
720
  for source_key, target_key in mapping.items():
702
721
  # Handle nested output access (e.g., "result.files")
703
722
  if "." in source_key: