kailash 0.6.3__py3-none-any.whl → 0.6.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/__init__.py +3 -3
- kailash/api/custom_nodes_secure.py +3 -3
- kailash/api/gateway.py +1 -1
- kailash/api/studio.py +1 -1
- kailash/api/workflow_api.py +2 -2
- kailash/core/resilience/bulkhead.py +475 -0
- kailash/core/resilience/circuit_breaker.py +92 -10
- kailash/core/resilience/health_monitor.py +578 -0
- kailash/edge/discovery.py +86 -0
- kailash/mcp_server/__init__.py +309 -33
- kailash/mcp_server/advanced_features.py +1022 -0
- kailash/mcp_server/ai_registry_server.py +27 -2
- kailash/mcp_server/auth.py +789 -0
- kailash/mcp_server/client.py +645 -378
- kailash/mcp_server/discovery.py +1593 -0
- kailash/mcp_server/errors.py +673 -0
- kailash/mcp_server/oauth.py +1727 -0
- kailash/mcp_server/protocol.py +1126 -0
- kailash/mcp_server/registry_integration.py +587 -0
- kailash/mcp_server/server.py +1228 -96
- kailash/mcp_server/transports.py +1169 -0
- kailash/mcp_server/utils/__init__.py +6 -1
- kailash/mcp_server/utils/cache.py +250 -7
- kailash/middleware/auth/auth_manager.py +3 -3
- kailash/middleware/communication/api_gateway.py +1 -1
- kailash/middleware/communication/realtime.py +1 -1
- kailash/middleware/mcp/enhanced_server.py +1 -1
- kailash/nodes/__init__.py +2 -0
- kailash/nodes/admin/audit_log.py +6 -6
- kailash/nodes/admin/permission_check.py +8 -8
- kailash/nodes/admin/role_management.py +32 -28
- kailash/nodes/admin/schema.sql +6 -1
- kailash/nodes/admin/schema_manager.py +13 -13
- kailash/nodes/admin/security_event.py +15 -15
- kailash/nodes/admin/tenant_isolation.py +3 -3
- kailash/nodes/admin/transaction_utils.py +3 -3
- kailash/nodes/admin/user_management.py +21 -21
- kailash/nodes/ai/a2a.py +11 -11
- kailash/nodes/ai/ai_providers.py +9 -12
- kailash/nodes/ai/embedding_generator.py +13 -14
- kailash/nodes/ai/intelligent_agent_orchestrator.py +19 -19
- kailash/nodes/ai/iterative_llm_agent.py +2 -2
- kailash/nodes/ai/llm_agent.py +210 -33
- kailash/nodes/ai/self_organizing.py +2 -2
- kailash/nodes/alerts/discord.py +4 -4
- kailash/nodes/api/graphql.py +6 -6
- kailash/nodes/api/http.py +10 -10
- kailash/nodes/api/rate_limiting.py +4 -4
- kailash/nodes/api/rest.py +15 -15
- kailash/nodes/auth/mfa.py +3 -3
- kailash/nodes/auth/risk_assessment.py +2 -2
- kailash/nodes/auth/session_management.py +5 -5
- kailash/nodes/auth/sso.py +143 -0
- kailash/nodes/base.py +8 -2
- kailash/nodes/base_async.py +16 -2
- kailash/nodes/base_with_acl.py +2 -2
- kailash/nodes/cache/__init__.py +9 -0
- kailash/nodes/cache/cache.py +1172 -0
- kailash/nodes/cache/cache_invalidation.py +874 -0
- kailash/nodes/cache/redis_pool_manager.py +595 -0
- kailash/nodes/code/async_python.py +2 -1
- kailash/nodes/code/python.py +194 -30
- kailash/nodes/compliance/data_retention.py +6 -6
- kailash/nodes/compliance/gdpr.py +5 -5
- kailash/nodes/data/__init__.py +10 -0
- kailash/nodes/data/async_sql.py +1956 -129
- kailash/nodes/data/optimistic_locking.py +906 -0
- kailash/nodes/data/readers.py +8 -8
- kailash/nodes/data/redis.py +378 -0
- kailash/nodes/data/sql.py +314 -3
- kailash/nodes/data/streaming.py +21 -0
- kailash/nodes/enterprise/__init__.py +8 -0
- kailash/nodes/enterprise/audit_logger.py +285 -0
- kailash/nodes/enterprise/batch_processor.py +22 -3
- kailash/nodes/enterprise/data_lineage.py +1 -1
- kailash/nodes/enterprise/mcp_executor.py +205 -0
- kailash/nodes/enterprise/service_discovery.py +150 -0
- kailash/nodes/enterprise/tenant_assignment.py +108 -0
- kailash/nodes/logic/async_operations.py +2 -2
- kailash/nodes/logic/convergence.py +1 -1
- kailash/nodes/logic/operations.py +1 -1
- kailash/nodes/monitoring/__init__.py +11 -1
- kailash/nodes/monitoring/health_check.py +456 -0
- kailash/nodes/monitoring/log_processor.py +817 -0
- kailash/nodes/monitoring/metrics_collector.py +627 -0
- kailash/nodes/monitoring/performance_benchmark.py +137 -11
- kailash/nodes/rag/advanced.py +7 -7
- kailash/nodes/rag/agentic.py +49 -2
- kailash/nodes/rag/conversational.py +3 -3
- kailash/nodes/rag/evaluation.py +3 -3
- kailash/nodes/rag/federated.py +3 -3
- kailash/nodes/rag/graph.py +3 -3
- kailash/nodes/rag/multimodal.py +3 -3
- kailash/nodes/rag/optimized.py +5 -5
- kailash/nodes/rag/privacy.py +3 -3
- kailash/nodes/rag/query_processing.py +6 -6
- kailash/nodes/rag/realtime.py +1 -1
- kailash/nodes/rag/registry.py +1 -1
- kailash/nodes/rag/router.py +1 -1
- kailash/nodes/rag/similarity.py +7 -7
- kailash/nodes/rag/strategies.py +4 -4
- kailash/nodes/security/abac_evaluator.py +6 -6
- kailash/nodes/security/behavior_analysis.py +5 -5
- kailash/nodes/security/credential_manager.py +1 -1
- kailash/nodes/security/rotating_credentials.py +11 -11
- kailash/nodes/security/threat_detection.py +8 -8
- kailash/nodes/testing/credential_testing.py +2 -2
- kailash/nodes/transform/processors.py +5 -5
- kailash/runtime/local.py +163 -9
- kailash/runtime/parameter_injection.py +425 -0
- kailash/runtime/parameter_injector.py +657 -0
- kailash/runtime/testing.py +2 -2
- kailash/testing/fixtures.py +2 -2
- kailash/workflow/builder.py +99 -14
- kailash/workflow/builder_improvements.py +207 -0
- kailash/workflow/input_handling.py +170 -0
- {kailash-0.6.3.dist-info → kailash-0.6.5.dist-info}/METADATA +22 -9
- {kailash-0.6.3.dist-info → kailash-0.6.5.dist-info}/RECORD +122 -95
- {kailash-0.6.3.dist-info → kailash-0.6.5.dist-info}/WHEEL +0 -0
- {kailash-0.6.3.dist-info → kailash-0.6.5.dist-info}/entry_points.txt +0 -0
- {kailash-0.6.3.dist-info → kailash-0.6.5.dist-info}/licenses/LICENSE +0 -0
- {kailash-0.6.3.dist-info → kailash-0.6.5.dist-info}/top_level.txt +0 -0
kailash/nodes/rag/strategies.py
CHANGED
@@ -412,7 +412,7 @@ class SemanticRAGNode(Node):
|
|
412
412
|
self.workflow_node = create_semantic_rag_workflow(self.config)
|
413
413
|
|
414
414
|
# Delegate to WorkflowNode
|
415
|
-
return self.workflow_node.
|
415
|
+
return self.workflow_node.execute(**kwargs)
|
416
416
|
|
417
417
|
|
418
418
|
@register_node()
|
@@ -458,7 +458,7 @@ class StatisticalRAGNode(Node):
|
|
458
458
|
if not self.workflow_node:
|
459
459
|
self.workflow_node = create_statistical_rag_workflow(self.config)
|
460
460
|
|
461
|
-
return self.workflow_node.
|
461
|
+
return self.workflow_node.execute(**kwargs)
|
462
462
|
|
463
463
|
|
464
464
|
@register_node()
|
@@ -517,7 +517,7 @@ class HybridRAGNode(Node):
|
|
517
517
|
self.fusion_method = fusion_method
|
518
518
|
self.workflow_node = create_hybrid_rag_workflow(self.config, fusion_method)
|
519
519
|
|
520
|
-
return self.workflow_node.
|
520
|
+
return self.workflow_node.execute(**kwargs)
|
521
521
|
|
522
522
|
|
523
523
|
@register_node()
|
@@ -563,4 +563,4 @@ class HierarchicalRAGNode(Node):
|
|
563
563
|
if not self.workflow_node:
|
564
564
|
self.workflow_node = create_hierarchical_rag_workflow(self.config)
|
565
565
|
|
566
|
-
return self.workflow_node.
|
566
|
+
return self.workflow_node.execute(**kwargs)
|
@@ -194,7 +194,7 @@ class ABACPermissionEvaluatorNode(SecurityMixin, PerformanceMixin, LoggingMixin,
|
|
194
194
|
... "network": "corporate"
|
195
195
|
... }
|
196
196
|
>>>
|
197
|
-
>>> result = evaluator.
|
197
|
+
>>> result = evaluator.execute(
|
198
198
|
... user_context=user_context,
|
199
199
|
... resource_context=resource_context,
|
200
200
|
... environment_context=env_context,
|
@@ -770,7 +770,7 @@ class ABACPermissionEvaluatorNode(SecurityMixin, PerformanceMixin, LoggingMixin,
|
|
770
770
|
|
771
771
|
async def execute_async(self, **kwargs) -> Dict[str, Any]:
|
772
772
|
"""Async execution method for test compatibility."""
|
773
|
-
return self.
|
773
|
+
return self.execute(**kwargs)
|
774
774
|
|
775
775
|
def _evaluate_permission(
|
776
776
|
self, context: ABACContext, permission: str
|
@@ -1026,7 +1026,7 @@ class ABACPermissionEvaluatorNode(SecurityMixin, PerformanceMixin, LoggingMixin,
|
|
1026
1026
|
)
|
1027
1027
|
|
1028
1028
|
# Run AI analysis
|
1029
|
-
ai_response = self.ai_agent.
|
1029
|
+
ai_response = self.ai_agent.execute(
|
1030
1030
|
provider="ollama",
|
1031
1031
|
model=self.ai_model.replace("ollama:", ""),
|
1032
1032
|
messages=[{"role": "user", "content": prompt}],
|
@@ -1275,7 +1275,7 @@ Return a JSON object with this structure:
|
|
1275
1275
|
}
|
1276
1276
|
|
1277
1277
|
try:
|
1278
|
-
self.audit_log_node.
|
1278
|
+
self.audit_log_node.execute(**audit_entry)
|
1279
1279
|
except Exception as e:
|
1280
1280
|
self.log_with_context(
|
1281
1281
|
"WARNING", f"Failed to audit permission decision: {e}"
|
@@ -1355,7 +1355,7 @@ RESPONSE FORMAT:
|
|
1355
1355
|
}}
|
1356
1356
|
"""
|
1357
1357
|
|
1358
|
-
ai_response = self.ai_agent.
|
1358
|
+
ai_response = self.ai_agent.execute(
|
1359
1359
|
provider="ollama",
|
1360
1360
|
model=self.ai_model.replace("ollama:", ""),
|
1361
1361
|
messages=[{"role": "user", "content": prompt}],
|
@@ -1408,4 +1408,4 @@ RESPONSE FORMAT:
|
|
1408
1408
|
|
1409
1409
|
async def async_run(self, **kwargs) -> Dict[str, Any]:
|
1410
1410
|
"""Async execution method for enterprise integration."""
|
1411
|
-
return self.
|
1411
|
+
return self.execute(**kwargs)
|
@@ -107,7 +107,7 @@ class BehaviorAnalysisNode(SecurityMixin, PerformanceMixin, LoggingMixin, Node):
|
|
107
107
|
... "data_volume_mb": 15.5
|
108
108
|
... }
|
109
109
|
>>>
|
110
|
-
>>> result = behavior_analyzer.
|
110
|
+
>>> result = behavior_analyzer.execute(
|
111
111
|
... action="analyze",
|
112
112
|
... user_id="user123",
|
113
113
|
... recent_activity=[activity]
|
@@ -417,7 +417,7 @@ class BehaviorAnalysisNode(SecurityMixin, PerformanceMixin, LoggingMixin, Node):
|
|
417
417
|
# Create audit log entry
|
418
418
|
if result.get("success", False):
|
419
419
|
try:
|
420
|
-
self.audit_log_node.
|
420
|
+
self.audit_log_node.execute(
|
421
421
|
action="behavior_analysis",
|
422
422
|
user_id=user_id or "unknown",
|
423
423
|
result="success",
|
@@ -1185,7 +1185,7 @@ RESPONSE FORMAT:
|
|
1185
1185
|
"""
|
1186
1186
|
|
1187
1187
|
# Run AI analysis
|
1188
|
-
ai_response = self.ai_agent.
|
1188
|
+
ai_response = self.ai_agent.execute(
|
1189
1189
|
provider="ollama",
|
1190
1190
|
model=self.ai_model.replace("ollama:", ""),
|
1191
1191
|
messages=[{"role": "user", "content": prompt}],
|
@@ -1370,7 +1370,7 @@ RESPONSE FORMAT:
|
|
1370
1370
|
}
|
1371
1371
|
|
1372
1372
|
try:
|
1373
|
-
self.security_event_node.
|
1373
|
+
self.security_event_node.execute(**security_event)
|
1374
1374
|
except Exception as e:
|
1375
1375
|
self.log_with_context("WARNING", f"Failed to log anomaly event: {e}")
|
1376
1376
|
|
@@ -1890,4 +1890,4 @@ RESPONSE FORMAT:
|
|
1890
1890
|
|
1891
1891
|
async def async_run(self, **kwargs) -> Dict[str, Any]:
|
1892
1892
|
"""Async execution method for enterprise integration."""
|
1893
|
-
return self.
|
1893
|
+
return self.execute(**kwargs)
|
@@ -255,7 +255,7 @@ class RotatingCredentialNode(Node):
|
|
255
255
|
"""Check if a credential is approaching expiration."""
|
256
256
|
try:
|
257
257
|
# Get current credential
|
258
|
-
credential_result = self._credential_manager.
|
258
|
+
credential_result = self._credential_manager.execute(
|
259
259
|
operation="get_credential", credential_name=credential_name
|
260
260
|
)
|
261
261
|
|
@@ -314,7 +314,7 @@ class RotatingCredentialNode(Node):
|
|
314
314
|
# Try each refresh source in order
|
315
315
|
for source in refresh_sources:
|
316
316
|
try:
|
317
|
-
refresh_result = self._credential_manager.
|
317
|
+
refresh_result = self._credential_manager.execute(
|
318
318
|
operation="get_credential",
|
319
319
|
credential_name=credential_name,
|
320
320
|
credential_sources=[source],
|
@@ -363,7 +363,7 @@ class RotatingCredentialNode(Node):
|
|
363
363
|
# Step 1: Get current credential (for rollback if needed)
|
364
364
|
current_credential = None
|
365
365
|
if rollback_on_failure:
|
366
|
-
current_result = self._credential_manager.
|
366
|
+
current_result = self._credential_manager.execute(
|
367
367
|
operation="get_credential", credential_name=credential_name
|
368
368
|
)
|
369
369
|
if current_result.get("success"):
|
@@ -390,7 +390,7 @@ class RotatingCredentialNode(Node):
|
|
390
390
|
new_credential = refresh_result["credential"]
|
391
391
|
|
392
392
|
# Step 3: Validate new credential
|
393
|
-
validation_result = self._credential_manager.
|
393
|
+
validation_result = self._credential_manager.execute(
|
394
394
|
operation="validate_credential",
|
395
395
|
credential_name=credential_name,
|
396
396
|
credential_data=new_credential,
|
@@ -422,7 +422,7 @@ class RotatingCredentialNode(Node):
|
|
422
422
|
|
423
423
|
temp_credential_name = f"{credential_name}_rotating_{int(time.time())}"
|
424
424
|
|
425
|
-
store_result = self._credential_manager.
|
425
|
+
store_result = self._credential_manager.execute(
|
426
426
|
operation="store_credential",
|
427
427
|
credential_name=temp_credential_name,
|
428
428
|
credential_data=new_credential,
|
@@ -445,7 +445,7 @@ class RotatingCredentialNode(Node):
|
|
445
445
|
# For this example, we'll assume it passes
|
446
446
|
|
447
447
|
# Atomic switch
|
448
|
-
final_store_result = self._credential_manager.
|
448
|
+
final_store_result = self._credential_manager.execute(
|
449
449
|
operation="store_credential",
|
450
450
|
credential_name=credential_name,
|
451
451
|
credential_data=new_credential,
|
@@ -454,7 +454,7 @@ class RotatingCredentialNode(Node):
|
|
454
454
|
if not final_store_result.get("success"):
|
455
455
|
# Rollback if requested
|
456
456
|
if rollback_on_failure and current_credential:
|
457
|
-
self._credential_manager.
|
457
|
+
self._credential_manager.execute(
|
458
458
|
operation="store_credential",
|
459
459
|
credential_name=credential_name,
|
460
460
|
credential_data=current_credential,
|
@@ -476,13 +476,13 @@ class RotatingCredentialNode(Node):
|
|
476
476
|
}
|
477
477
|
|
478
478
|
# Clean up temporary credential
|
479
|
-
self._credential_manager.
|
479
|
+
self._credential_manager.execute(
|
480
480
|
operation="delete_credential", credential_name=temp_credential_name
|
481
481
|
)
|
482
482
|
|
483
483
|
else:
|
484
484
|
# Direct replacement
|
485
|
-
store_result = self._credential_manager.
|
485
|
+
store_result = self._credential_manager.execute(
|
486
486
|
operation="store_credential",
|
487
487
|
credential_name=credential_name,
|
488
488
|
credential_data=new_credential,
|
@@ -491,7 +491,7 @@ class RotatingCredentialNode(Node):
|
|
491
491
|
if not store_result.get("success"):
|
492
492
|
# Rollback if requested
|
493
493
|
if rollback_on_failure and current_credential:
|
494
|
-
self._credential_manager.
|
494
|
+
self._credential_manager.execute(
|
495
495
|
operation="store_credential",
|
496
496
|
credential_name=credential_name,
|
497
497
|
credential_data=current_credential,
|
@@ -757,4 +757,4 @@ class RotatingCredentialNode(Node):
|
|
757
757
|
|
758
758
|
async def async_run(self, **kwargs) -> Dict[str, Any]:
|
759
759
|
"""Async execution method for enterprise integration."""
|
760
|
-
return self.
|
760
|
+
return self.execute(**kwargs)
|
@@ -45,7 +45,7 @@ class ThreatDetectionNode(SecurityMixin, PerformanceMixin, LoggingMixin, Node):
|
|
45
45
|
... {"type": "login", "user": "admin", "ip": "192.168.1.100", "failed": True}
|
46
46
|
... ]
|
47
47
|
>>>
|
48
|
-
>>> threats = threat_detector.
|
48
|
+
>>> threats = threat_detector.execute(events=events)
|
49
49
|
>>> print(f"Detected {len(threats['threats'])} threats")
|
50
50
|
"""
|
51
51
|
|
@@ -661,7 +661,7 @@ class ThreatDetectionNode(SecurityMixin, PerformanceMixin, LoggingMixin, Node):
|
|
661
661
|
prompt = self._create_ai_analysis_prompt(event_summary, context)
|
662
662
|
|
663
663
|
# Run AI analysis
|
664
|
-
ai_response = self.ai_agent.
|
664
|
+
ai_response = self.ai_agent.execute(
|
665
665
|
provider="ollama",
|
666
666
|
model=self.ai_model.replace("ollama:", ""),
|
667
667
|
messages=[{"role": "user", "content": prompt}],
|
@@ -923,7 +923,7 @@ If no threats are detected, return an empty array: []
|
|
923
923
|
"source_ip": threat.get("source_ip", "unknown"),
|
924
924
|
}
|
925
925
|
|
926
|
-
self.security_event_node.
|
926
|
+
self.security_event_node.execute(**alert_event)
|
927
927
|
|
928
928
|
def _block_ip(self, threat: Dict[str, Any]) -> None:
|
929
929
|
"""Block IP address associated with threat.
|
@@ -949,7 +949,7 @@ If no threats are detected, return an empty array: []
|
|
949
949
|
"source_ip": ip,
|
950
950
|
}
|
951
951
|
|
952
|
-
self.security_event_node.
|
952
|
+
self.security_event_node.execute(**block_event)
|
953
953
|
|
954
954
|
def _lock_account(self, threat: Dict[str, Any]) -> None:
|
955
955
|
"""Lock user account associated with threat.
|
@@ -974,7 +974,7 @@ If no threats are detected, return an empty array: []
|
|
974
974
|
"source_ip": threat.get("source_ip", "unknown"),
|
975
975
|
}
|
976
976
|
|
977
|
-
self.security_event_node.
|
977
|
+
self.security_event_node.execute(**lock_event)
|
978
978
|
|
979
979
|
def _quarantine_resource(self, threat: Dict[str, Any]) -> None:
|
980
980
|
"""Quarantine resource associated with threat.
|
@@ -996,7 +996,7 @@ If no threats are detected, return an empty array: []
|
|
996
996
|
"source_ip": threat.get("source_ip", "unknown"),
|
997
997
|
}
|
998
998
|
|
999
|
-
self.security_event_node.
|
999
|
+
self.security_event_node.execute(**quarantine_event)
|
1000
1000
|
|
1001
1001
|
def _log_threat(self, threat: Dict[str, Any]) -> None:
|
1002
1002
|
"""Log threat to audit trail.
|
@@ -1014,7 +1014,7 @@ If no threats are detected, return an empty array: []
|
|
1014
1014
|
"ip_address": threat.get("source_ip", "unknown"),
|
1015
1015
|
}
|
1016
1016
|
|
1017
|
-
self.audit_log_node.
|
1017
|
+
self.audit_log_node.execute(**log_entry)
|
1018
1018
|
|
1019
1019
|
def _update_stats(
|
1020
1020
|
self, events_processed: int, threats_detected: int, processing_time_ms: float
|
@@ -1100,4 +1100,4 @@ If no threats are detected, return an empty array: []
|
|
1100
1100
|
|
1101
1101
|
async def async_run(self, **kwargs) -> Dict[str, Any]:
|
1102
1102
|
"""Async execution method for enterprise integration."""
|
1103
|
-
return self.
|
1103
|
+
return self.execute(**kwargs)
|
@@ -39,7 +39,7 @@ class CredentialTestingNode(Node):
|
|
39
39
|
Example:
|
40
40
|
>>> # Test OAuth2 token expiration
|
41
41
|
>>> tester = CredentialTestingNode()
|
42
|
-
>>> result = tester.
|
42
|
+
>>> result = tester.execute(
|
43
43
|
... credential_type='oauth2',
|
44
44
|
... scenario='expired',
|
45
45
|
... mock_data={'client_id': 'test_client'}
|
@@ -48,7 +48,7 @@ class CredentialTestingNode(Node):
|
|
48
48
|
>>> assert 'expired_token' in result['error_details']
|
49
49
|
>>>
|
50
50
|
>>> # Test successful API key validation
|
51
|
-
>>> result = tester.
|
51
|
+
>>> result = tester.execute(
|
52
52
|
... credential_type='api_key',
|
53
53
|
... scenario='success',
|
54
54
|
... validation_rules={'key_length': 32}
|
@@ -68,7 +68,7 @@ class FilterNode(Node):
|
|
68
68
|
Examples:
|
69
69
|
>>> # Filter list of numbers
|
70
70
|
>>> filter_node = FilterNode()
|
71
|
-
>>> result = filter_node.
|
71
|
+
>>> result = filter_node.execute(
|
72
72
|
... data=[1, 2, 3, 4, 5],
|
73
73
|
... operator=">",
|
74
74
|
... value=3
|
@@ -81,7 +81,7 @@ class FilterNode(Node):
|
|
81
81
|
... {"name": "Bob", "age": 25},
|
82
82
|
... {"name": "Charlie", "age": 35}
|
83
83
|
... ]
|
84
|
-
>>> result = filter_node.
|
84
|
+
>>> result = filter_node.execute(
|
85
85
|
... data=users,
|
86
86
|
... field="age",
|
87
87
|
... operator=">=",
|
@@ -96,7 +96,7 @@ class FilterNode(Node):
|
|
96
96
|
... {"title": "Java Development"},
|
97
97
|
... {"title": "Python for Data Science"}
|
98
98
|
... ]
|
99
|
-
>>> result = filter_node.
|
99
|
+
>>> result = filter_node.execute(
|
100
100
|
... data=items,
|
101
101
|
... field="title",
|
102
102
|
... operator="contains",
|
@@ -110,7 +110,7 @@ class FilterNode(Node):
|
|
110
110
|
... {"value": None},
|
111
111
|
... {"value": 20}
|
112
112
|
... ]
|
113
|
-
>>> result = filter_node.
|
113
|
+
>>> result = filter_node.execute(
|
114
114
|
... data=data_with_nulls,
|
115
115
|
... field="value",
|
116
116
|
... operator="!=",
|
@@ -570,7 +570,7 @@ class ContextualCompressorNode(Node):
|
|
570
570
|
... compression_ratio=0.6,
|
571
571
|
... relevance_threshold=0.7
|
572
572
|
... )
|
573
|
-
>>> result = compressor.
|
573
|
+
>>> result = compressor.execute(
|
574
574
|
... query="machine learning algorithms",
|
575
575
|
... retrieved_docs=[{"content": "...", "metadata": {}}],
|
576
576
|
... compression_target=1500
|
kailash/runtime/local.py
CHANGED
@@ -42,6 +42,7 @@ from typing import Any, Optional
|
|
42
42
|
import networkx as nx
|
43
43
|
|
44
44
|
from kailash.nodes import Node
|
45
|
+
from kailash.runtime.parameter_injector import WorkflowParameterInjector
|
45
46
|
from kailash.sdk_exceptions import (
|
46
47
|
RuntimeExecutionError,
|
47
48
|
WorkflowExecutionError,
|
@@ -135,7 +136,7 @@ class LocalRuntime:
|
|
135
136
|
self,
|
136
137
|
workflow: Workflow,
|
137
138
|
task_manager: TaskManager | None = None,
|
138
|
-
parameters: dict[str, dict[str, Any]] | None = None,
|
139
|
+
parameters: dict[str, dict[str, Any]] | dict[str, Any] | None = None,
|
139
140
|
) -> tuple[dict[str, Any], str | None]:
|
140
141
|
"""Execute a workflow with unified enterprise capabilities.
|
141
142
|
|
@@ -172,7 +173,7 @@ class LocalRuntime:
|
|
172
173
|
self,
|
173
174
|
workflow: Workflow,
|
174
175
|
task_manager: TaskManager | None = None,
|
175
|
-
parameters: dict[str, dict[str, Any]] | None = None,
|
176
|
+
parameters: dict[str, dict[str, Any]] | dict[str, Any] | None = None,
|
176
177
|
) -> tuple[dict[str, Any], str | None]:
|
177
178
|
"""Execute a workflow asynchronously (for AsyncLocalRuntime compatibility).
|
178
179
|
|
@@ -197,7 +198,7 @@ class LocalRuntime:
|
|
197
198
|
self,
|
198
199
|
workflow: Workflow,
|
199
200
|
task_manager: TaskManager | None = None,
|
200
|
-
parameters: dict[str, dict[str, Any]] | None = None,
|
201
|
+
parameters: dict[str, dict[str, Any]] | dict[str, Any] | None = None,
|
201
202
|
) -> tuple[dict[str, Any], str | None]:
|
202
203
|
"""Execute workflow synchronously when already in an event loop.
|
203
204
|
|
@@ -257,7 +258,7 @@ class LocalRuntime:
|
|
257
258
|
self,
|
258
259
|
workflow: Workflow,
|
259
260
|
task_manager: TaskManager | None = None,
|
260
|
-
parameters: dict[str, dict[str, Any]] | None = None,
|
261
|
+
parameters: dict[str, dict[str, Any]] | dict[str, Any] | None = None,
|
261
262
|
) -> tuple[dict[str, Any], str | None]:
|
262
263
|
"""Core async execution implementation with enterprise features.
|
263
264
|
|
@@ -292,8 +293,13 @@ class LocalRuntime:
|
|
292
293
|
if self.enable_security and self.user_context:
|
293
294
|
self._check_workflow_access(workflow)
|
294
295
|
|
296
|
+
# Transform workflow-level parameters if needed
|
297
|
+
processed_parameters = self._process_workflow_parameters(
|
298
|
+
workflow, parameters
|
299
|
+
)
|
300
|
+
|
295
301
|
# Validate workflow with runtime parameters (Session 061)
|
296
|
-
workflow.validate(runtime_parameters=
|
302
|
+
workflow.validate(runtime_parameters=processed_parameters)
|
297
303
|
|
298
304
|
# Enterprise Audit: Log workflow execution start
|
299
305
|
if self.enable_audit:
|
@@ -302,7 +308,7 @@ class LocalRuntime:
|
|
302
308
|
{
|
303
309
|
"workflow_id": workflow.workflow_id,
|
304
310
|
"user_context": self._serialize_user_context(),
|
305
|
-
"parameters":
|
311
|
+
"parameters": processed_parameters,
|
306
312
|
},
|
307
313
|
)
|
308
314
|
|
@@ -315,7 +321,7 @@ class LocalRuntime:
|
|
315
321
|
run_id = task_manager.create_run(
|
316
322
|
workflow_name=workflow.name,
|
317
323
|
metadata={
|
318
|
-
"parameters":
|
324
|
+
"parameters": processed_parameters,
|
319
325
|
"debug": self.debug,
|
320
326
|
"runtime": "unified_enterprise",
|
321
327
|
"enterprise_features": self._execution_context,
|
@@ -335,7 +341,7 @@ class LocalRuntime:
|
|
335
341
|
try:
|
336
342
|
# Pass run_id to cyclic executor if available
|
337
343
|
cyclic_results, cyclic_run_id = self.cyclic_executor.execute(
|
338
|
-
workflow,
|
344
|
+
workflow, processed_parameters, task_manager, run_id
|
339
345
|
)
|
340
346
|
results = cyclic_results
|
341
347
|
# Update run_id if task manager is being used
|
@@ -354,7 +360,7 @@ class LocalRuntime:
|
|
354
360
|
workflow=workflow,
|
355
361
|
task_manager=task_manager,
|
356
362
|
run_id=run_id,
|
357
|
-
parameters=
|
363
|
+
parameters=processed_parameters or {},
|
358
364
|
)
|
359
365
|
|
360
366
|
# Enterprise Audit: Log successful completion
|
@@ -544,6 +550,16 @@ class LocalRuntime:
|
|
544
550
|
{**node_instance.config, **parameters.get(node_id, {})}
|
545
551
|
node_instance.config.update(parameters.get(node_id, {}))
|
546
552
|
|
553
|
+
# ENTERPRISE PARAMETER INJECTION FIX: Injected parameters should override connection inputs
|
554
|
+
# This ensures workflow parameters take precedence over connection inputs for the same parameter names
|
555
|
+
injected_params = parameters.get(node_id, {})
|
556
|
+
if injected_params:
|
557
|
+
inputs.update(injected_params)
|
558
|
+
if self.debug:
|
559
|
+
self.logger.debug(
|
560
|
+
f"Applied parameter injections for {node_id}: {list(injected_params.keys())}"
|
561
|
+
)
|
562
|
+
|
547
563
|
if self.debug:
|
548
564
|
self.logger.debug(f"Node {node_id} inputs: {inputs}")
|
549
565
|
|
@@ -961,3 +977,141 @@ class LocalRuntime:
|
|
961
977
|
except Exception as e:
|
962
978
|
self.logger.warning(f"Failed to serialize user context: {e}")
|
963
979
|
return {"user_context": str(self.user_context)}
|
980
|
+
|
981
|
+
def _process_workflow_parameters(
|
982
|
+
self,
|
983
|
+
workflow: Workflow,
|
984
|
+
parameters: dict[str, dict[str, Any]] | dict[str, Any] | None = None,
|
985
|
+
) -> dict[str, dict[str, Any]] | None:
|
986
|
+
"""Process workflow parameters to handle both formats intelligently.
|
987
|
+
|
988
|
+
This method detects whether parameters are in workflow-level format
|
989
|
+
(flat dictionary) or node-specific format (nested dictionary) and
|
990
|
+
transforms them appropriately for execution.
|
991
|
+
|
992
|
+
ENTERPRISE ENHANCEMENT: Handles mixed format parameters where both
|
993
|
+
node-specific and workflow-level parameters are present in the same
|
994
|
+
parameter dictionary - critical for enterprise production workflows.
|
995
|
+
|
996
|
+
Args:
|
997
|
+
workflow: The workflow being executed
|
998
|
+
parameters: Either workflow-level, node-specific, or MIXED format parameters
|
999
|
+
|
1000
|
+
Returns:
|
1001
|
+
Node-specific parameters ready for execution with workflow-level
|
1002
|
+
parameters properly injected
|
1003
|
+
"""
|
1004
|
+
if not parameters:
|
1005
|
+
return None
|
1006
|
+
|
1007
|
+
# ENTERPRISE FIX: Handle mixed format parameters
|
1008
|
+
# Extract node-specific and workflow-level parameters separately
|
1009
|
+
node_specific_params, workflow_level_params = self._separate_parameter_formats(
|
1010
|
+
parameters, workflow
|
1011
|
+
)
|
1012
|
+
|
1013
|
+
# Start with node-specific parameters
|
1014
|
+
result = node_specific_params.copy() if node_specific_params else {}
|
1015
|
+
|
1016
|
+
# If we have workflow-level parameters, inject them
|
1017
|
+
if workflow_level_params:
|
1018
|
+
injector = WorkflowParameterInjector(workflow, debug=self.debug)
|
1019
|
+
|
1020
|
+
# Transform workflow parameters to node-specific format
|
1021
|
+
injected_params = injector.transform_workflow_parameters(
|
1022
|
+
workflow_level_params
|
1023
|
+
)
|
1024
|
+
|
1025
|
+
# Merge injected parameters with existing node-specific parameters
|
1026
|
+
# IMPORTANT: Node-specific parameters take precedence over workflow-level
|
1027
|
+
for node_id, node_params in injected_params.items():
|
1028
|
+
if node_id not in result:
|
1029
|
+
result[node_id] = {}
|
1030
|
+
# First set workflow-level parameters, then override with node-specific
|
1031
|
+
for param_name, param_value in node_params.items():
|
1032
|
+
if param_name not in result[node_id]: # Only if not already set
|
1033
|
+
result[node_id][param_name] = param_value
|
1034
|
+
|
1035
|
+
# Validate the transformation
|
1036
|
+
warnings = injector.validate_parameters(workflow_level_params)
|
1037
|
+
if warnings and self.debug:
|
1038
|
+
for warning in warnings:
|
1039
|
+
self.logger.warning(f"Parameter validation: {warning}")
|
1040
|
+
|
1041
|
+
return result if result else None
|
1042
|
+
|
1043
|
+
def _separate_parameter_formats(
|
1044
|
+
self, parameters: dict[str, Any], workflow: Workflow
|
1045
|
+
) -> tuple[dict[str, dict[str, Any]], dict[str, Any]]:
|
1046
|
+
"""Separate mixed format parameters into node-specific and workflow-level.
|
1047
|
+
|
1048
|
+
ENTERPRISE CAPABILITY: Intelligently separates complex enterprise parameter
|
1049
|
+
patterns where both node-specific and workflow-level parameters coexist.
|
1050
|
+
|
1051
|
+
Args:
|
1052
|
+
parameters: Mixed format parameters
|
1053
|
+
workflow: The workflow being executed
|
1054
|
+
|
1055
|
+
Returns:
|
1056
|
+
Tuple of (node_specific_params, workflow_level_params)
|
1057
|
+
"""
|
1058
|
+
node_specific_params = {}
|
1059
|
+
workflow_level_params = {}
|
1060
|
+
|
1061
|
+
# Get node IDs for classification
|
1062
|
+
node_ids = set(workflow.graph.nodes()) if workflow else set()
|
1063
|
+
|
1064
|
+
for key, value in parameters.items():
|
1065
|
+
# Node-specific parameter: key is a node ID and value is a dict
|
1066
|
+
if key in node_ids and isinstance(value, dict):
|
1067
|
+
node_specific_params[key] = value
|
1068
|
+
# Workflow-level parameter: key is not a node ID or value is not a dict
|
1069
|
+
else:
|
1070
|
+
workflow_level_params[key] = value
|
1071
|
+
|
1072
|
+
if self.debug:
|
1073
|
+
self.logger.debug(
|
1074
|
+
f"Separated parameters: "
|
1075
|
+
f"node_specific={list(node_specific_params.keys())}, "
|
1076
|
+
f"workflow_level={list(workflow_level_params.keys())}"
|
1077
|
+
)
|
1078
|
+
|
1079
|
+
return node_specific_params, workflow_level_params
|
1080
|
+
|
1081
|
+
def _is_node_specific_format(
|
1082
|
+
self, parameters: dict[str, Any], workflow: Workflow = None
|
1083
|
+
) -> bool:
|
1084
|
+
"""Detect if parameters are in node-specific format.
|
1085
|
+
|
1086
|
+
Node-specific format has structure: {node_id: {param: value}}
|
1087
|
+
Workflow-level format has structure: {param: value}
|
1088
|
+
|
1089
|
+
Args:
|
1090
|
+
parameters: Parameters to check
|
1091
|
+
workflow: Optional workflow for node ID validation
|
1092
|
+
|
1093
|
+
Returns:
|
1094
|
+
True if node-specific format, False if workflow-level
|
1095
|
+
"""
|
1096
|
+
if not parameters:
|
1097
|
+
return True
|
1098
|
+
|
1099
|
+
# Get node IDs if workflow provided
|
1100
|
+
node_ids = set(workflow.graph.nodes()) if workflow else set()
|
1101
|
+
|
1102
|
+
# If any key is a node ID and its value is a dict, it's node-specific
|
1103
|
+
for key, value in parameters.items():
|
1104
|
+
if key in node_ids and isinstance(value, dict):
|
1105
|
+
return True
|
1106
|
+
|
1107
|
+
# Additional heuristic: if all values are dicts and keys look like IDs
|
1108
|
+
all_dict_values = all(isinstance(v, dict) for v in parameters.values())
|
1109
|
+
keys_look_like_ids = any(
|
1110
|
+
"_" in k or k.startswith("node") or k in node_ids for k in parameters.keys()
|
1111
|
+
)
|
1112
|
+
|
1113
|
+
if all_dict_values and keys_look_like_ids:
|
1114
|
+
return True
|
1115
|
+
|
1116
|
+
# Default to workflow-level format
|
1117
|
+
return False
|