kailash 0.6.3__py3-none-any.whl → 0.6.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (122) hide show
  1. kailash/__init__.py +3 -3
  2. kailash/api/custom_nodes_secure.py +3 -3
  3. kailash/api/gateway.py +1 -1
  4. kailash/api/studio.py +1 -1
  5. kailash/api/workflow_api.py +2 -2
  6. kailash/core/resilience/bulkhead.py +475 -0
  7. kailash/core/resilience/circuit_breaker.py +92 -10
  8. kailash/core/resilience/health_monitor.py +578 -0
  9. kailash/edge/discovery.py +86 -0
  10. kailash/mcp_server/__init__.py +309 -33
  11. kailash/mcp_server/advanced_features.py +1022 -0
  12. kailash/mcp_server/ai_registry_server.py +27 -2
  13. kailash/mcp_server/auth.py +789 -0
  14. kailash/mcp_server/client.py +645 -378
  15. kailash/mcp_server/discovery.py +1593 -0
  16. kailash/mcp_server/errors.py +673 -0
  17. kailash/mcp_server/oauth.py +1727 -0
  18. kailash/mcp_server/protocol.py +1126 -0
  19. kailash/mcp_server/registry_integration.py +587 -0
  20. kailash/mcp_server/server.py +1228 -96
  21. kailash/mcp_server/transports.py +1169 -0
  22. kailash/mcp_server/utils/__init__.py +6 -1
  23. kailash/mcp_server/utils/cache.py +250 -7
  24. kailash/middleware/auth/auth_manager.py +3 -3
  25. kailash/middleware/communication/api_gateway.py +1 -1
  26. kailash/middleware/communication/realtime.py +1 -1
  27. kailash/middleware/mcp/enhanced_server.py +1 -1
  28. kailash/nodes/__init__.py +2 -0
  29. kailash/nodes/admin/audit_log.py +6 -6
  30. kailash/nodes/admin/permission_check.py +8 -8
  31. kailash/nodes/admin/role_management.py +32 -28
  32. kailash/nodes/admin/schema.sql +6 -1
  33. kailash/nodes/admin/schema_manager.py +13 -13
  34. kailash/nodes/admin/security_event.py +15 -15
  35. kailash/nodes/admin/tenant_isolation.py +3 -3
  36. kailash/nodes/admin/transaction_utils.py +3 -3
  37. kailash/nodes/admin/user_management.py +21 -21
  38. kailash/nodes/ai/a2a.py +11 -11
  39. kailash/nodes/ai/ai_providers.py +9 -12
  40. kailash/nodes/ai/embedding_generator.py +13 -14
  41. kailash/nodes/ai/intelligent_agent_orchestrator.py +19 -19
  42. kailash/nodes/ai/iterative_llm_agent.py +2 -2
  43. kailash/nodes/ai/llm_agent.py +210 -33
  44. kailash/nodes/ai/self_organizing.py +2 -2
  45. kailash/nodes/alerts/discord.py +4 -4
  46. kailash/nodes/api/graphql.py +6 -6
  47. kailash/nodes/api/http.py +10 -10
  48. kailash/nodes/api/rate_limiting.py +4 -4
  49. kailash/nodes/api/rest.py +15 -15
  50. kailash/nodes/auth/mfa.py +3 -3
  51. kailash/nodes/auth/risk_assessment.py +2 -2
  52. kailash/nodes/auth/session_management.py +5 -5
  53. kailash/nodes/auth/sso.py +143 -0
  54. kailash/nodes/base.py +8 -2
  55. kailash/nodes/base_async.py +16 -2
  56. kailash/nodes/base_with_acl.py +2 -2
  57. kailash/nodes/cache/__init__.py +9 -0
  58. kailash/nodes/cache/cache.py +1172 -0
  59. kailash/nodes/cache/cache_invalidation.py +874 -0
  60. kailash/nodes/cache/redis_pool_manager.py +595 -0
  61. kailash/nodes/code/async_python.py +2 -1
  62. kailash/nodes/code/python.py +194 -30
  63. kailash/nodes/compliance/data_retention.py +6 -6
  64. kailash/nodes/compliance/gdpr.py +5 -5
  65. kailash/nodes/data/__init__.py +10 -0
  66. kailash/nodes/data/async_sql.py +1956 -129
  67. kailash/nodes/data/optimistic_locking.py +906 -0
  68. kailash/nodes/data/readers.py +8 -8
  69. kailash/nodes/data/redis.py +378 -0
  70. kailash/nodes/data/sql.py +314 -3
  71. kailash/nodes/data/streaming.py +21 -0
  72. kailash/nodes/enterprise/__init__.py +8 -0
  73. kailash/nodes/enterprise/audit_logger.py +285 -0
  74. kailash/nodes/enterprise/batch_processor.py +22 -3
  75. kailash/nodes/enterprise/data_lineage.py +1 -1
  76. kailash/nodes/enterprise/mcp_executor.py +205 -0
  77. kailash/nodes/enterprise/service_discovery.py +150 -0
  78. kailash/nodes/enterprise/tenant_assignment.py +108 -0
  79. kailash/nodes/logic/async_operations.py +2 -2
  80. kailash/nodes/logic/convergence.py +1 -1
  81. kailash/nodes/logic/operations.py +1 -1
  82. kailash/nodes/monitoring/__init__.py +11 -1
  83. kailash/nodes/monitoring/health_check.py +456 -0
  84. kailash/nodes/monitoring/log_processor.py +817 -0
  85. kailash/nodes/monitoring/metrics_collector.py +627 -0
  86. kailash/nodes/monitoring/performance_benchmark.py +137 -11
  87. kailash/nodes/rag/advanced.py +7 -7
  88. kailash/nodes/rag/agentic.py +49 -2
  89. kailash/nodes/rag/conversational.py +3 -3
  90. kailash/nodes/rag/evaluation.py +3 -3
  91. kailash/nodes/rag/federated.py +3 -3
  92. kailash/nodes/rag/graph.py +3 -3
  93. kailash/nodes/rag/multimodal.py +3 -3
  94. kailash/nodes/rag/optimized.py +5 -5
  95. kailash/nodes/rag/privacy.py +3 -3
  96. kailash/nodes/rag/query_processing.py +6 -6
  97. kailash/nodes/rag/realtime.py +1 -1
  98. kailash/nodes/rag/registry.py +1 -1
  99. kailash/nodes/rag/router.py +1 -1
  100. kailash/nodes/rag/similarity.py +7 -7
  101. kailash/nodes/rag/strategies.py +4 -4
  102. kailash/nodes/security/abac_evaluator.py +6 -6
  103. kailash/nodes/security/behavior_analysis.py +5 -5
  104. kailash/nodes/security/credential_manager.py +1 -1
  105. kailash/nodes/security/rotating_credentials.py +11 -11
  106. kailash/nodes/security/threat_detection.py +8 -8
  107. kailash/nodes/testing/credential_testing.py +2 -2
  108. kailash/nodes/transform/processors.py +5 -5
  109. kailash/runtime/local.py +163 -9
  110. kailash/runtime/parameter_injection.py +425 -0
  111. kailash/runtime/parameter_injector.py +657 -0
  112. kailash/runtime/testing.py +2 -2
  113. kailash/testing/fixtures.py +2 -2
  114. kailash/workflow/builder.py +99 -14
  115. kailash/workflow/builder_improvements.py +207 -0
  116. kailash/workflow/input_handling.py +170 -0
  117. {kailash-0.6.3.dist-info → kailash-0.6.5.dist-info}/METADATA +22 -9
  118. {kailash-0.6.3.dist-info → kailash-0.6.5.dist-info}/RECORD +122 -95
  119. {kailash-0.6.3.dist-info → kailash-0.6.5.dist-info}/WHEEL +0 -0
  120. {kailash-0.6.3.dist-info → kailash-0.6.5.dist-info}/entry_points.txt +0 -0
  121. {kailash-0.6.3.dist-info → kailash-0.6.5.dist-info}/licenses/LICENSE +0 -0
  122. {kailash-0.6.3.dist-info → kailash-0.6.5.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,285 @@
1
+ """Enterprise audit logging node for compliance and security."""
2
+
3
+ import time
4
+ from datetime import datetime
5
+ from typing import Any, Dict, List
6
+
7
+ from kailash.nodes.base import Node, NodeMetadata, NodeParameter, register_node
8
+ from kailash.sdk_exceptions import NodeExecutionError
9
+
10
+
11
+ @register_node()
12
+ class EnterpriseAuditLoggerNode(Node):
13
+ """Creates comprehensive audit logs for enterprise compliance.
14
+
15
+ This node generates detailed audit trails for all enterprise operations,
16
+ ensuring compliance with regulations like SOX, HIPAA, and GDPR.
17
+ """
18
+
19
+ metadata = NodeMetadata(
20
+ name="EnterpriseAuditLoggerNode",
21
+ description="Generate comprehensive audit logs for enterprise compliance",
22
+ version="1.0.0",
23
+ tags={"enterprise", "audit", "compliance"},
24
+ )
25
+
26
+ def __init__(self, name: str = None, **kwargs):
27
+ self.name = name or self.__class__.__name__
28
+ super().__init__(name=self.name, **kwargs)
29
+
30
+ def get_parameters(self) -> Dict[str, NodeParameter]:
31
+ return {
32
+ "execution_results": NodeParameter(
33
+ name="execution_results",
34
+ type=dict,
35
+ description="Results from MCP tool execution",
36
+ required=True,
37
+ ),
38
+ "user_context": NodeParameter(
39
+ name="user_context",
40
+ type=dict,
41
+ description="User context for audit trail",
42
+ required=True,
43
+ ),
44
+ "audit_level": NodeParameter(
45
+ name="audit_level",
46
+ type=str,
47
+ description="Audit detail level (basic, detailed, full)",
48
+ required=False,
49
+ default="detailed",
50
+ ),
51
+ }
52
+
53
+ def run(
54
+ self,
55
+ execution_results: Dict,
56
+ user_context: Dict,
57
+ audit_level: str = "detailed",
58
+ **kwargs,
59
+ ) -> Dict[str, Any]:
60
+ """Generate comprehensive audit log entry."""
61
+ try:
62
+ # Extract audit information
63
+ actions_performed = execution_results.get("actions", [])
64
+ summary = execution_results.get("summary", {})
65
+
66
+ # Create base audit entry
67
+ audit_entry = {
68
+ "timestamp": datetime.utcnow().isoformat(),
69
+ "audit_id": f"audit-{int(time.time())}-{user_context.get('user_id', 'unknown')}",
70
+ "audit_level": audit_level,
71
+ # User identification
72
+ "user_id": user_context.get("user_id"),
73
+ "tenant_id": user_context.get("tenant_id"),
74
+ "session_id": user_context.get("session_id"),
75
+ # Operation details
76
+ "actions": actions_performed,
77
+ "results_summary": {
78
+ "total_actions": summary.get(
79
+ "total_actions", len(actions_performed)
80
+ ),
81
+ "successful": summary.get("successful_actions", 0),
82
+ "failed": summary.get("failed_actions", 0),
83
+ "execution_time_ms": summary.get("execution_time_ms", 0),
84
+ },
85
+ # Compliance information
86
+ "compliance": {
87
+ "data_residency_compliant": self._check_data_residency_compliance(
88
+ user_context, actions_performed
89
+ ),
90
+ "access_controls_enforced": self._check_access_controls(
91
+ user_context, actions_performed
92
+ ),
93
+ "audit_trail_complete": True,
94
+ "compliance_zones": user_context.get(
95
+ "compliance_zones", ["public"]
96
+ ),
97
+ },
98
+ # Security metadata
99
+ "security": {
100
+ "authentication_method": "sso_mfa",
101
+ "authorization_level": (
102
+ "verified"
103
+ if len(user_context.get("permissions", [])) > 1
104
+ else "basic"
105
+ ),
106
+ "data_classification": self._determine_data_classification(
107
+ actions_performed
108
+ ),
109
+ "encryption_status": "encrypted_in_transit_and_rest",
110
+ },
111
+ }
112
+
113
+ # Add detailed information based on audit level
114
+ if audit_level in ["detailed", "full"]:
115
+ audit_entry["detailed_actions"] = [
116
+ {
117
+ "action_id": f"action-{i}-{int(time.time())}",
118
+ "action_type": action.get("action", "unknown"),
119
+ "server_id": action.get("server_id"),
120
+ "timestamp": action.get("timestamp"),
121
+ "success": action.get("success", False),
122
+ "data_size_bytes": action.get("data_size", 0),
123
+ "error": (
124
+ action.get("error")
125
+ if not action.get("success", False)
126
+ else None
127
+ ),
128
+ }
129
+ for i, action in enumerate(actions_performed)
130
+ ]
131
+
132
+ if audit_level == "full":
133
+ audit_entry["system_context"] = {
134
+ "workflow_execution_id": kwargs.get("workflow_execution_id"),
135
+ "node_execution_order": kwargs.get("node_execution_order", []),
136
+ "resource_usage": {
137
+ "peak_memory_mb": kwargs.get("peak_memory_mb", 0),
138
+ "cpu_time_ms": kwargs.get("cpu_time_ms", 0),
139
+ "network_bytes": kwargs.get("network_bytes", 0),
140
+ },
141
+ }
142
+
143
+ # Calculate risk score
144
+ risk_score = self._calculate_risk_score(actions_performed, user_context)
145
+ audit_entry["risk_assessment"] = {
146
+ "risk_score": risk_score,
147
+ "risk_level": (
148
+ "high"
149
+ if risk_score > 0.7
150
+ else "medium" if risk_score > 0.3 else "low"
151
+ ),
152
+ "risk_factors": self._identify_risk_factors(
153
+ actions_performed, user_context
154
+ ),
155
+ }
156
+
157
+ return {
158
+ "audit_entry": audit_entry,
159
+ "audit_id": audit_entry["audit_id"],
160
+ "compliance_status": "compliant",
161
+ "audit_timestamp": time.time(),
162
+ }
163
+
164
+ except Exception as e:
165
+ raise NodeExecutionError(f"Audit logging failed: {str(e)}")
166
+
167
+ def _check_data_residency_compliance(
168
+ self, user_context: Dict, actions: List[Dict]
169
+ ) -> bool:
170
+ """Check if data residency requirements are met."""
171
+ # In a real implementation, this would check actual data locations
172
+ data_residency = user_context.get("data_residency")
173
+ if not data_residency:
174
+ return True
175
+
176
+ # Check if all actions occurred in the required region
177
+ for action in actions:
178
+ server_id = action.get("server_id", "")
179
+ if data_residency == "us-east-1" and "us-east" not in server_id:
180
+ return False
181
+
182
+ return True
183
+
184
+ def _check_access_controls(self, user_context: Dict, actions: List[Dict]) -> bool:
185
+ """Verify access controls were properly enforced."""
186
+ permissions = user_context.get("permissions", [])
187
+
188
+ # Check if user had write permissions for write actions
189
+ for action in actions:
190
+ action_type = action.get("action", "")
191
+ if "write" in action_type or "execute" in action_type:
192
+ if "write" not in permissions:
193
+ return False
194
+
195
+ return True
196
+
197
+ def _determine_data_classification(self, actions: List[Dict]) -> str:
198
+ """Determine the highest data classification level accessed."""
199
+ classifications = []
200
+
201
+ for action in actions:
202
+ action_type = action.get("action", "")
203
+ if "patient" in action_type:
204
+ classifications.append("confidential")
205
+ elif "transaction" in action_type or "financial" in action_type:
206
+ classifications.append("restricted")
207
+ elif "analytics" in action_type:
208
+ classifications.append("internal")
209
+ else:
210
+ classifications.append("public")
211
+
212
+ # Return highest classification
213
+ if "confidential" in classifications:
214
+ return "confidential"
215
+ elif "restricted" in classifications:
216
+ return "restricted"
217
+ elif "internal" in classifications:
218
+ return "internal"
219
+ else:
220
+ return "public"
221
+
222
+ def _calculate_risk_score(self, actions: List[Dict], user_context: Dict) -> float:
223
+ """Calculate risk score based on actions and context."""
224
+ base_risk = 0.1
225
+
226
+ # Increase risk for failed actions
227
+ failed_actions = sum(
228
+ 1 for action in actions if not action.get("success", False)
229
+ )
230
+ failure_risk = failed_actions * 0.2
231
+
232
+ # Increase risk for sensitive data access
233
+ sensitive_actions = sum(
234
+ 1
235
+ for action in actions
236
+ if any(
237
+ keyword in action.get("action", "")
238
+ for keyword in ["patient", "transaction", "financial"]
239
+ )
240
+ )
241
+ sensitivity_risk = sensitive_actions * 0.15
242
+
243
+ # Increase risk for cross-region access
244
+ data_residency = user_context.get("data_residency", "")
245
+ cross_region_actions = sum(
246
+ 1
247
+ for action in actions
248
+ if data_residency and data_residency not in action.get("server_id", "")
249
+ )
250
+ region_risk = cross_region_actions * 0.1
251
+
252
+ total_risk = min(1.0, base_risk + failure_risk + sensitivity_risk + region_risk)
253
+ return round(total_risk, 2)
254
+
255
+ def _identify_risk_factors(
256
+ self, actions: List[Dict], user_context: Dict
257
+ ) -> List[str]:
258
+ """Identify specific risk factors for this audit entry."""
259
+ factors = []
260
+
261
+ # Check for failures
262
+ if any(not action.get("success", False) for action in actions):
263
+ factors.append("execution_failures")
264
+
265
+ # Check for sensitive data access
266
+ if any(
267
+ keyword in action.get("action", "")
268
+ for action in actions
269
+ for keyword in ["patient", "transaction", "financial"]
270
+ ):
271
+ factors.append("sensitive_data_access")
272
+
273
+ # Check for cross-region access
274
+ data_residency = user_context.get("data_residency", "")
275
+ if any(
276
+ data_residency and data_residency not in action.get("server_id", "")
277
+ for action in actions
278
+ ):
279
+ factors.append("cross_region_access")
280
+
281
+ # Check for elevated permissions
282
+ if "admin" in user_context.get("permissions", []):
283
+ factors.append("elevated_permissions")
284
+
285
+ return factors
@@ -404,8 +404,27 @@ class BatchProcessorNode(Node):
404
404
  # Call progress callback if provided
405
405
  if progress_callback:
406
406
  try:
407
- callback_func = eval(progress_callback)
408
- callback_func(progress_info)
407
+ # Enterprise security: Use safe callback resolution instead of eval
408
+ if callable(progress_callback):
409
+ # Direct callable
410
+ progress_callback(progress_info)
411
+ elif isinstance(progress_callback, str):
412
+ # String callback - validate against safe functions only
413
+ import importlib
414
+
415
+ parts = progress_callback.split(".")
416
+ if len(parts) >= 2:
417
+ module_name = ".".join(parts[:-1])
418
+ func_name = parts[-1]
419
+ try:
420
+ # Only allow importlib for known safe modules
421
+ if module_name in ["logging", "sys", "json"]:
422
+ module = importlib.import_module(module_name)
423
+ callback_func = getattr(module, func_name)
424
+ if callable(callback_func):
425
+ callback_func(progress_info)
426
+ except (ImportError, AttributeError):
427
+ pass
409
428
  except:
410
429
  pass # Ignore callback errors
411
430
 
@@ -738,4 +757,4 @@ result = results
738
757
 
739
758
  async def async_run(self, **kwargs) -> Dict[str, Any]:
740
759
  """Async execution method for enterprise integration."""
741
- return self.run(**kwargs)
760
+ return self.execute(**kwargs)
@@ -494,4 +494,4 @@ class DataLineageNode(Node):
494
494
 
495
495
  async def async_run(self, **kwargs) -> Dict[str, Any]:
496
496
  """Async execution method for enterprise integration."""
497
- return self.run(**kwargs)
497
+ return self.execute(**kwargs)
@@ -0,0 +1,205 @@
1
+ """Enterprise MCP tool execution node with circuit breaker protection."""
2
+
3
+ import random
4
+ import time
5
+ from typing import Any, Dict
6
+
7
+ from kailash.nodes.base import Node, NodeMetadata, NodeParameter, register_node
8
+ from kailash.sdk_exceptions import NodeExecutionError
9
+
10
+
11
+ @register_node()
12
+ class EnterpriseMLCPExecutorNode(Node):
13
+ """Executes MCP tools with enterprise-grade resilience patterns.
14
+
15
+ This node provides circuit breaker protection, audit logging,
16
+ and compliance-aware execution for MCP tools.
17
+ """
18
+
19
+ metadata = NodeMetadata(
20
+ name="EnterpriseMLCPExecutorNode",
21
+ description="Execute MCP tools with enterprise resilience patterns",
22
+ version="1.0.0",
23
+ tags={"enterprise", "mcp", "resilience"},
24
+ )
25
+
26
+ def __init__(self, name: str = None, **kwargs):
27
+ self.name = name or self.__class__.__name__
28
+ super().__init__(name=self.name, **kwargs)
29
+
30
+ def get_parameters(self) -> Dict[str, NodeParameter]:
31
+ return {
32
+ "tool_request": NodeParameter(
33
+ name="tool_request",
34
+ type=dict,
35
+ description="Tool execution request from AI agent",
36
+ required=True,
37
+ ),
38
+ "circuit_breaker_enabled": NodeParameter(
39
+ name="circuit_breaker_enabled",
40
+ type=bool,
41
+ description="Enable circuit breaker protection",
42
+ required=False,
43
+ default=True,
44
+ ),
45
+ "success_rate_threshold": NodeParameter(
46
+ name="success_rate_threshold",
47
+ type=float,
48
+ description="Success rate threshold for circuit breaker",
49
+ required=False,
50
+ default=0.8,
51
+ ),
52
+ }
53
+
54
+ def run(
55
+ self,
56
+ tool_request: Dict,
57
+ circuit_breaker_enabled: bool = True,
58
+ success_rate_threshold: float = 0.8,
59
+ **kwargs,
60
+ ) -> Dict[str, Any]:
61
+ """Execute MCP tool with resilience patterns."""
62
+ try:
63
+ # Extract tool information
64
+ tool_name = tool_request.get("tool", "unknown")
65
+ params = tool_request.get("parameters", {})
66
+ server_id = tool_request.get("server_id", "default-mcp")
67
+
68
+ # Simulate circuit breaker state (in production, this would be persistent)
69
+ # For demo purposes, randomly determine circuit state
70
+ circuit_state = "CLOSED" # Could be OPEN, HALF_OPEN, CLOSED
71
+
72
+ execution_start = time.time()
73
+
74
+ if circuit_breaker_enabled and circuit_state == "OPEN":
75
+ return {
76
+ "success": False,
77
+ "error": f"Circuit breaker OPEN for {server_id}",
78
+ "fallback_used": True,
79
+ "execution_time_ms": 1,
80
+ "circuit_state": circuit_state,
81
+ }
82
+
83
+ # Simulate MCP tool execution with realistic results
84
+ success = random.random() < success_rate_threshold
85
+
86
+ if success:
87
+ # Generate realistic mock data based on tool type
88
+ if tool_name == "patient_analytics":
89
+ data = {
90
+ "patient_count": random.randint(1000, 2000),
91
+ "avg_satisfaction": round(random.uniform(3.5, 4.8), 1),
92
+ "trend": random.choice(["improving", "stable", "declining"]),
93
+ "compliance_score": round(random.uniform(0.85, 0.98), 2),
94
+ }
95
+ elif tool_name == "transaction_analysis":
96
+ data = {
97
+ "transaction_volume": random.randint(10000000, 20000000),
98
+ "fraud_rate": round(random.uniform(0.01, 0.05), 3),
99
+ "avg_transaction": round(random.uniform(100.0, 200.0), 2),
100
+ "risk_score": round(random.uniform(0.1, 0.3), 2),
101
+ }
102
+ elif tool_name == "risk_scoring":
103
+ data = {
104
+ "overall_risk": round(random.uniform(0.1, 0.4), 2),
105
+ "categories": {
106
+ "credit_risk": round(random.uniform(0.05, 0.25), 2),
107
+ "operational_risk": round(random.uniform(0.02, 0.15), 2),
108
+ "market_risk": round(random.uniform(0.03, 0.20), 2),
109
+ },
110
+ "recommendations": ["Increase reserves", "Monitor exposure"],
111
+ }
112
+ else:
113
+ data = {
114
+ "status": "completed",
115
+ "records_processed": random.randint(50, 500),
116
+ "timestamp": time.time(),
117
+ }
118
+
119
+ execution_time = (time.time() - execution_start) * 1000
120
+
121
+ result = {
122
+ "success": True,
123
+ "data": data,
124
+ "execution_time_ms": round(
125
+ execution_time + random.randint(50, 200), 2
126
+ ),
127
+ "server_id": server_id,
128
+ "tool_name": tool_name,
129
+ "circuit_state": circuit_state,
130
+ "compliance_validated": True,
131
+ }
132
+ else:
133
+ # Simulate failure
134
+ error_messages = [
135
+ "Service temporarily unavailable",
136
+ "Rate limit exceeded",
137
+ "Authentication failed",
138
+ "Invalid parameters",
139
+ "Network timeout",
140
+ ]
141
+
142
+ result = {
143
+ "success": False,
144
+ "error": random.choice(error_messages),
145
+ "execution_time_ms": round(
146
+ (time.time() - execution_start) * 1000
147
+ + random.randint(100, 500),
148
+ 2,
149
+ ),
150
+ "server_id": server_id,
151
+ "tool_name": tool_name,
152
+ "circuit_state": circuit_state,
153
+ "retry_recommended": True,
154
+ }
155
+
156
+ # Add audit trail information
157
+ result["audit_info"] = {
158
+ "execution_id": f"exec-{int(time.time())}-{random.randint(1000, 9999)}",
159
+ "timestamp": time.time(),
160
+ "user_context": kwargs.get("user_context", {}),
161
+ "compliance_checked": True,
162
+ }
163
+
164
+ # For successful executions, add actions for audit logging
165
+ if result["success"]:
166
+ result["execution_results"] = {
167
+ "actions": [
168
+ {
169
+ "action": f"execute_{tool_name}",
170
+ "success": True,
171
+ "server_id": server_id,
172
+ "data_size": len(str(data)),
173
+ "timestamp": time.time(),
174
+ }
175
+ ],
176
+ "summary": {
177
+ "total_actions": 1,
178
+ "successful_actions": 1,
179
+ "failed_actions": 0,
180
+ "execution_time_ms": result["execution_time_ms"],
181
+ },
182
+ }
183
+ else:
184
+ result["execution_results"] = {
185
+ "actions": [
186
+ {
187
+ "action": f"execute_{tool_name}",
188
+ "success": False,
189
+ "error": result["error"],
190
+ "server_id": server_id,
191
+ "timestamp": time.time(),
192
+ }
193
+ ],
194
+ "summary": {
195
+ "total_actions": 1,
196
+ "successful_actions": 0,
197
+ "failed_actions": 1,
198
+ "execution_time_ms": result["execution_time_ms"],
199
+ },
200
+ }
201
+
202
+ return result
203
+
204
+ except Exception as e:
205
+ raise NodeExecutionError(f"MCP execution failed: {str(e)}")
@@ -0,0 +1,150 @@
1
+ """Enterprise service discovery node for MCP service management."""
2
+
3
+ from typing import Any, Dict, List
4
+
5
+ from kailash.nodes.base import Node, NodeMetadata, NodeParameter, register_node
6
+ from kailash.sdk_exceptions import NodeExecutionError
7
+
8
+
9
+ @register_node()
10
+ class MCPServiceDiscoveryNode(Node):
11
+ """Discovers available MCP services based on tenant context and requirements.
12
+
13
+ This node manages service discovery for multi-tenant MCP environments,
14
+ ensuring tenants only access services they're authorized to use.
15
+ """
16
+
17
+ metadata = NodeMetadata(
18
+ name="MCPServiceDiscoveryNode",
19
+ description="Discovers MCP services for tenant-specific requirements",
20
+ version="1.0.0",
21
+ tags={"enterprise", "mcp", "service-discovery"},
22
+ )
23
+
24
+ def __init__(self, name: str = None, **kwargs):
25
+ self.name = name or self.__class__.__name__
26
+ super().__init__(name=self.name, **kwargs)
27
+
28
+ def get_parameters(self) -> Dict[str, NodeParameter]:
29
+ return {
30
+ "tenant": NodeParameter(
31
+ name="tenant",
32
+ type=dict,
33
+ description="Tenant information",
34
+ required=True,
35
+ ),
36
+ "user_context": NodeParameter(
37
+ name="user_context",
38
+ type=dict,
39
+ description="User context with permissions",
40
+ required=True,
41
+ ),
42
+ "service_requirements": NodeParameter(
43
+ name="service_requirements",
44
+ type=list,
45
+ description="Required service types",
46
+ required=False,
47
+ default=[],
48
+ ),
49
+ }
50
+
51
+ def run(
52
+ self,
53
+ tenant: Dict,
54
+ user_context: Dict,
55
+ service_requirements: List[str] = None,
56
+ **kwargs,
57
+ ) -> Dict[str, Any]:
58
+ """Discover available MCP services for the tenant."""
59
+ try:
60
+ if service_requirements is None:
61
+ service_requirements = []
62
+
63
+ tenant_id = tenant.get("id", "default")
64
+ compliance_zones = tenant.get("compliance_zones", ["public"])
65
+ data_residency = tenant.get("data_residency", "us-west-1")
66
+
67
+ # Define available services per tenant
68
+ available_services = {
69
+ "healthcare-corp": [
70
+ {
71
+ "id": "health-analytics-mcp",
72
+ "type": "analytics",
73
+ "region": "us-east-1",
74
+ "compliance": ["hipaa", "gdpr"],
75
+ "tools": ["patient_analytics", "health_metrics"],
76
+ "endpoint": "https://health-analytics.mcp.healthcare.com",
77
+ },
78
+ {
79
+ "id": "patient-data-mcp",
80
+ "type": "data",
81
+ "region": "us-east-1",
82
+ "compliance": ["hipaa"],
83
+ "tools": ["patient_lookup", "medical_records"],
84
+ "endpoint": "https://patient-data.mcp.healthcare.com",
85
+ },
86
+ ],
87
+ "finance-inc": [
88
+ {
89
+ "id": "transaction-analytics-mcp",
90
+ "type": "analytics",
91
+ "region": "us-east-1",
92
+ "compliance": ["sox", "pci_dss"],
93
+ "tools": ["transaction_analysis", "fraud_detection"],
94
+ "endpoint": "https://transaction-analytics.mcp.finance.com",
95
+ },
96
+ {
97
+ "id": "risk-assessment-mcp",
98
+ "type": "risk",
99
+ "region": "us-east-1",
100
+ "compliance": ["sox"],
101
+ "tools": ["risk_scoring", "compliance_check"],
102
+ "endpoint": "https://risk-assessment.mcp.finance.com",
103
+ },
104
+ ],
105
+ "default": [
106
+ {
107
+ "id": "general-analytics-mcp",
108
+ "type": "analytics",
109
+ "region": "us-west-1",
110
+ "compliance": ["public"],
111
+ "tools": ["basic_analytics", "reporting"],
112
+ "endpoint": "https://general-analytics.mcp.kailash.ai",
113
+ },
114
+ ],
115
+ }
116
+
117
+ # Get services for this tenant
118
+ services = available_services.get(tenant_id, available_services["default"])
119
+
120
+ # Filter by service requirements
121
+ if service_requirements:
122
+ services = [s for s in services if s["type"] in service_requirements]
123
+
124
+ # Filter by compliance requirements
125
+ user_compliance = set(compliance_zones)
126
+ filtered_services = []
127
+ for service in services:
128
+ service_compliance = set(service.get("compliance", ["public"]))
129
+ if (
130
+ user_compliance.intersection(service_compliance)
131
+ or "public" in service_compliance
132
+ ):
133
+ filtered_services.append(service)
134
+
135
+ # Filter by data residency if required
136
+ if data_residency:
137
+ filtered_services = [
138
+ s for s in filtered_services if s.get("region") == data_residency
139
+ ]
140
+
141
+ return {
142
+ "discovered_services": filtered_services,
143
+ "service_count": len(filtered_services),
144
+ "tenant_id": tenant_id,
145
+ "compliance_filters": list(user_compliance),
146
+ "discovery_timestamp": kwargs.get("timestamp", 0),
147
+ }
148
+
149
+ except Exception as e:
150
+ raise NodeExecutionError(f"Service discovery failed: {str(e)}")