kailash 0.6.2__py3-none-any.whl → 0.6.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (131) hide show
  1. kailash/__init__.py +3 -3
  2. kailash/api/custom_nodes_secure.py +3 -3
  3. kailash/api/gateway.py +1 -1
  4. kailash/api/studio.py +2 -3
  5. kailash/api/workflow_api.py +3 -4
  6. kailash/core/resilience/bulkhead.py +460 -0
  7. kailash/core/resilience/circuit_breaker.py +92 -10
  8. kailash/edge/discovery.py +86 -0
  9. kailash/mcp_server/__init__.py +334 -0
  10. kailash/mcp_server/advanced_features.py +1022 -0
  11. kailash/{mcp → mcp_server}/ai_registry_server.py +29 -4
  12. kailash/mcp_server/auth.py +789 -0
  13. kailash/mcp_server/client.py +712 -0
  14. kailash/mcp_server/discovery.py +1593 -0
  15. kailash/mcp_server/errors.py +673 -0
  16. kailash/mcp_server/oauth.py +1727 -0
  17. kailash/mcp_server/protocol.py +1126 -0
  18. kailash/mcp_server/registry_integration.py +587 -0
  19. kailash/mcp_server/server.py +1747 -0
  20. kailash/{mcp → mcp_server}/servers/ai_registry.py +2 -2
  21. kailash/mcp_server/transports.py +1169 -0
  22. kailash/mcp_server/utils/cache.py +510 -0
  23. kailash/middleware/auth/auth_manager.py +3 -3
  24. kailash/middleware/communication/api_gateway.py +2 -9
  25. kailash/middleware/communication/realtime.py +1 -1
  26. kailash/middleware/mcp/client_integration.py +1 -1
  27. kailash/middleware/mcp/enhanced_server.py +2 -2
  28. kailash/nodes/__init__.py +2 -0
  29. kailash/nodes/admin/audit_log.py +6 -6
  30. kailash/nodes/admin/permission_check.py +8 -8
  31. kailash/nodes/admin/role_management.py +32 -28
  32. kailash/nodes/admin/schema.sql +6 -1
  33. kailash/nodes/admin/schema_manager.py +13 -13
  34. kailash/nodes/admin/security_event.py +16 -20
  35. kailash/nodes/admin/tenant_isolation.py +3 -3
  36. kailash/nodes/admin/transaction_utils.py +3 -3
  37. kailash/nodes/admin/user_management.py +21 -22
  38. kailash/nodes/ai/a2a.py +11 -11
  39. kailash/nodes/ai/ai_providers.py +9 -12
  40. kailash/nodes/ai/embedding_generator.py +13 -14
  41. kailash/nodes/ai/intelligent_agent_orchestrator.py +19 -19
  42. kailash/nodes/ai/iterative_llm_agent.py +3 -3
  43. kailash/nodes/ai/llm_agent.py +213 -36
  44. kailash/nodes/ai/self_organizing.py +2 -2
  45. kailash/nodes/alerts/discord.py +4 -4
  46. kailash/nodes/api/graphql.py +6 -6
  47. kailash/nodes/api/http.py +12 -17
  48. kailash/nodes/api/rate_limiting.py +4 -4
  49. kailash/nodes/api/rest.py +15 -15
  50. kailash/nodes/auth/mfa.py +3 -4
  51. kailash/nodes/auth/risk_assessment.py +2 -2
  52. kailash/nodes/auth/session_management.py +5 -5
  53. kailash/nodes/auth/sso.py +143 -0
  54. kailash/nodes/base.py +6 -2
  55. kailash/nodes/base_async.py +16 -2
  56. kailash/nodes/base_with_acl.py +2 -2
  57. kailash/nodes/cache/__init__.py +9 -0
  58. kailash/nodes/cache/cache.py +1172 -0
  59. kailash/nodes/cache/cache_invalidation.py +870 -0
  60. kailash/nodes/cache/redis_pool_manager.py +595 -0
  61. kailash/nodes/code/async_python.py +2 -1
  62. kailash/nodes/code/python.py +196 -35
  63. kailash/nodes/compliance/data_retention.py +6 -6
  64. kailash/nodes/compliance/gdpr.py +5 -5
  65. kailash/nodes/data/__init__.py +10 -0
  66. kailash/nodes/data/optimistic_locking.py +906 -0
  67. kailash/nodes/data/readers.py +8 -8
  68. kailash/nodes/data/redis.py +349 -0
  69. kailash/nodes/data/sql.py +314 -3
  70. kailash/nodes/data/streaming.py +21 -0
  71. kailash/nodes/enterprise/__init__.py +8 -0
  72. kailash/nodes/enterprise/audit_logger.py +285 -0
  73. kailash/nodes/enterprise/batch_processor.py +22 -3
  74. kailash/nodes/enterprise/data_lineage.py +1 -1
  75. kailash/nodes/enterprise/mcp_executor.py +205 -0
  76. kailash/nodes/enterprise/service_discovery.py +150 -0
  77. kailash/nodes/enterprise/tenant_assignment.py +108 -0
  78. kailash/nodes/logic/async_operations.py +2 -2
  79. kailash/nodes/logic/convergence.py +1 -1
  80. kailash/nodes/logic/operations.py +1 -1
  81. kailash/nodes/monitoring/__init__.py +11 -1
  82. kailash/nodes/monitoring/health_check.py +456 -0
  83. kailash/nodes/monitoring/log_processor.py +817 -0
  84. kailash/nodes/monitoring/metrics_collector.py +627 -0
  85. kailash/nodes/monitoring/performance_benchmark.py +137 -11
  86. kailash/nodes/rag/advanced.py +7 -7
  87. kailash/nodes/rag/agentic.py +49 -2
  88. kailash/nodes/rag/conversational.py +3 -3
  89. kailash/nodes/rag/evaluation.py +3 -3
  90. kailash/nodes/rag/federated.py +3 -3
  91. kailash/nodes/rag/graph.py +3 -3
  92. kailash/nodes/rag/multimodal.py +3 -3
  93. kailash/nodes/rag/optimized.py +5 -5
  94. kailash/nodes/rag/privacy.py +3 -3
  95. kailash/nodes/rag/query_processing.py +6 -6
  96. kailash/nodes/rag/realtime.py +1 -1
  97. kailash/nodes/rag/registry.py +2 -6
  98. kailash/nodes/rag/router.py +1 -1
  99. kailash/nodes/rag/similarity.py +7 -7
  100. kailash/nodes/rag/strategies.py +4 -4
  101. kailash/nodes/security/abac_evaluator.py +6 -6
  102. kailash/nodes/security/behavior_analysis.py +5 -6
  103. kailash/nodes/security/credential_manager.py +1 -1
  104. kailash/nodes/security/rotating_credentials.py +11 -11
  105. kailash/nodes/security/threat_detection.py +8 -8
  106. kailash/nodes/testing/credential_testing.py +2 -2
  107. kailash/nodes/transform/processors.py +5 -5
  108. kailash/runtime/local.py +162 -14
  109. kailash/runtime/parameter_injection.py +425 -0
  110. kailash/runtime/parameter_injector.py +657 -0
  111. kailash/runtime/testing.py +2 -2
  112. kailash/testing/fixtures.py +2 -2
  113. kailash/workflow/builder.py +99 -18
  114. kailash/workflow/builder_improvements.py +207 -0
  115. kailash/workflow/input_handling.py +170 -0
  116. {kailash-0.6.2.dist-info → kailash-0.6.4.dist-info}/METADATA +21 -8
  117. {kailash-0.6.2.dist-info → kailash-0.6.4.dist-info}/RECORD +126 -101
  118. kailash/mcp/__init__.py +0 -53
  119. kailash/mcp/client.py +0 -445
  120. kailash/mcp/server.py +0 -292
  121. kailash/mcp/server_enhanced.py +0 -449
  122. kailash/mcp/utils/cache.py +0 -267
  123. /kailash/{mcp → mcp_server}/client_new.py +0 -0
  124. /kailash/{mcp → mcp_server}/utils/__init__.py +0 -0
  125. /kailash/{mcp → mcp_server}/utils/config.py +0 -0
  126. /kailash/{mcp → mcp_server}/utils/formatters.py +0 -0
  127. /kailash/{mcp → mcp_server}/utils/metrics.py +0 -0
  128. {kailash-0.6.2.dist-info → kailash-0.6.4.dist-info}/WHEEL +0 -0
  129. {kailash-0.6.2.dist-info → kailash-0.6.4.dist-info}/entry_points.txt +0 -0
  130. {kailash-0.6.2.dist-info → kailash-0.6.4.dist-info}/licenses/LICENSE +0 -0
  131. {kailash-0.6.2.dist-info → kailash-0.6.4.dist-info}/top_level.txt +0 -0
kailash/nodes/data/sql.py CHANGED
@@ -10,6 +10,8 @@ Design Philosophy:
10
10
  3. Safe parameterized queries
11
11
  4. Flexible result formats
12
12
  5. Transaction support
13
+ 6. Enterprise-grade concurrency control with optimistic locking
14
+ 7. Advanced retry mechanisms and conflict resolution
13
15
  """
14
16
 
15
17
  import base64
@@ -29,6 +31,17 @@ from sqlalchemy.pool import QueuePool
29
31
  from kailash.nodes.base import Node, NodeParameter, register_node
30
32
  from kailash.sdk_exceptions import NodeExecutionError
31
33
 
34
+ # Import optimistic locking for enterprise concurrency control
35
+ try:
36
+ from kailash.nodes.data.optimistic_locking import (
37
+ ConflictResolution,
38
+ OptimisticLockingNode,
39
+ )
40
+
41
+ OPTIMISTIC_LOCKING_AVAILABLE = True
42
+ except ImportError:
43
+ OPTIMISTIC_LOCKING_AVAILABLE = False
44
+
32
45
 
33
46
  @register_node()
34
47
  class SQLDatabaseNode(Node):
@@ -174,11 +187,11 @@ class SQLDatabaseNode(Node):
174
187
  >>> sql_node = SQLDatabaseNode(connection='customer_db')
175
188
  >>>
176
189
  >>> # Execute multiple queries with the same node
177
- >>> result1 = sql_node.run(
190
+ >>> result1 = sql_node.execute(
178
191
  ... query='SELECT * FROM customers WHERE active = ?',
179
192
  ... parameters=[True]
180
193
  ... )
181
- >>> result2 = sql_node.run(
194
+ >>> result2 = sql_node.execute(
182
195
  ... query='SELECT COUNT(*) as total FROM orders'
183
196
  ... )
184
197
  >>> # result1['data'] = [
@@ -306,6 +319,41 @@ class SQLDatabaseNode(Node):
306
319
  required=False,
307
320
  description="User context for access control",
308
321
  ),
322
+ # Optimistic Locking Parameters
323
+ "optimistic_locking": NodeParameter(
324
+ name="optimistic_locking",
325
+ type=bool,
326
+ required=False,
327
+ default=False,
328
+ description="Enable optimistic locking for updates",
329
+ ),
330
+ "version_field": NodeParameter(
331
+ name="version_field",
332
+ type=str,
333
+ required=False,
334
+ default="version",
335
+ description="Name of the version field for optimistic locking",
336
+ ),
337
+ "expected_version": NodeParameter(
338
+ name="expected_version",
339
+ type=int,
340
+ required=False,
341
+ description="Expected version for optimistic locking (required for updates with locking)",
342
+ ),
343
+ "conflict_resolution": NodeParameter(
344
+ name="conflict_resolution",
345
+ type=str,
346
+ required=False,
347
+ default="retry",
348
+ description="Conflict resolution strategy (fail_fast, retry, merge, last_writer_wins)",
349
+ ),
350
+ "max_retries": NodeParameter(
351
+ name="max_retries",
352
+ type=int,
353
+ required=False,
354
+ default=3,
355
+ description="Maximum retry attempts for optimistic locking conflicts",
356
+ ),
309
357
  }
310
358
 
311
359
  @staticmethod
@@ -388,6 +436,10 @@ class SQLDatabaseNode(Node):
388
436
  # Validate query safety
389
437
  self._validate_query_safety(query)
390
438
 
439
+ # Check if optimistic locking should be used
440
+ if self._should_use_optimistic_locking(kwargs):
441
+ return self._execute_with_optimistic_locking(kwargs)
442
+
391
443
  # Mask password in connection string for logging
392
444
  masked_connection = SQLDatabaseNode._mask_connection_password(
393
445
  self.connection_string
@@ -516,7 +568,7 @@ class SQLDatabaseNode(Node):
516
568
 
517
569
  # Run the synchronous method in a thread pool to avoid blocking
518
570
  loop = asyncio.get_event_loop()
519
- return await loop.run_in_executor(None, lambda: self.run(**kwargs))
571
+ return await loop.run_in_executor(None, lambda: self.execute(**kwargs))
520
572
 
521
573
  @classmethod
522
574
  def get_pool_status(cls) -> dict[str, Any]:
@@ -943,3 +995,262 @@ class SQLDatabaseNode(Node):
943
995
  }
944
996
  result.append(serialized_dict)
945
997
  return result
998
+
999
+ def _should_use_optimistic_locking(self, kwargs: dict) -> bool:
1000
+ """Check if optimistic locking should be used for this operation."""
1001
+ return (
1002
+ OPTIMISTIC_LOCKING_AVAILABLE
1003
+ and kwargs.get("optimistic_locking", False)
1004
+ and self._is_update_query(kwargs.get("query", ""))
1005
+ )
1006
+
1007
+ def _is_update_query(self, query: str) -> bool:
1008
+ """Check if the query is an UPDATE statement."""
1009
+ query_upper = query.strip().upper()
1010
+ return query_upper.startswith("UPDATE")
1011
+
1012
+ def _execute_with_optimistic_locking(self, **kwargs) -> dict[str, Any]:
1013
+ """Execute query with optimistic locking support."""
1014
+ if not OPTIMISTIC_LOCKING_AVAILABLE:
1015
+ raise NodeExecutionError(
1016
+ "Optimistic locking requested but OptimisticLockingNode not available"
1017
+ )
1018
+
1019
+ query = kwargs.get("query", "")
1020
+ expected_version = kwargs.get("expected_version")
1021
+
1022
+ if expected_version is None:
1023
+ raise NodeExecutionError(
1024
+ "expected_version parameter is required when optimistic_locking=True"
1025
+ )
1026
+
1027
+ # Extract table name and record ID from UPDATE query
1028
+ table_info = self._extract_update_info(query, kwargs.get("parameters"))
1029
+
1030
+ if not table_info:
1031
+ raise NodeExecutionError(
1032
+ "Could not extract table and record information from UPDATE query for optimistic locking"
1033
+ )
1034
+
1035
+ # Create optimistic locking node
1036
+ locking_node = OptimisticLockingNode(
1037
+ version_field=kwargs.get("version_field", "version"),
1038
+ max_retries=kwargs.get("max_retries", 3),
1039
+ default_conflict_resolution=kwargs.get("conflict_resolution", "retry"),
1040
+ )
1041
+
1042
+ # Get database connection for the locking node
1043
+ engine = self._get_shared_engine()
1044
+
1045
+ try:
1046
+ with engine.connect() as conn:
1047
+ with conn.begin() as trans:
1048
+ # Use optimistic locking node to handle the update
1049
+ locking_result = locking_node.run(
1050
+ action="update_with_version",
1051
+ connection=conn,
1052
+ table_name=table_info["table_name"],
1053
+ record_id=table_info["record_id"],
1054
+ update_data=table_info["update_data"],
1055
+ expected_version=expected_version,
1056
+ conflict_resolution=kwargs.get("conflict_resolution", "retry"),
1057
+ version_field=kwargs.get("version_field", "version"),
1058
+ id_field=table_info.get("id_field", "id"),
1059
+ )
1060
+
1061
+ if not locking_result.get("success", False):
1062
+ # Handle optimistic locking conflicts
1063
+ status = locking_result.get("status", "unknown_error")
1064
+ if status == "version_conflict":
1065
+ raise NodeExecutionError(
1066
+ f"Version conflict: expected version {expected_version}, "
1067
+ f"current version {locking_result.get('current_version', 'unknown')}"
1068
+ )
1069
+ elif status == "retry_exhausted":
1070
+ raise NodeExecutionError(
1071
+ f"Maximum retries exhausted for optimistic locking. "
1072
+ f"Conflict resolution: {kwargs.get('conflict_resolution', 'retry')}"
1073
+ )
1074
+ else:
1075
+ raise NodeExecutionError(
1076
+ f"Optimistic locking failed: {locking_result.get('error', 'Unknown error')}"
1077
+ )
1078
+
1079
+ # Return enhanced result with locking information
1080
+ return {
1081
+ "data": [], # UPDATE queries typically don't return data
1082
+ "row_count": locking_result.get("rows_affected", 0),
1083
+ "columns": [],
1084
+ "execution_time": locking_result.get("execution_time", 0),
1085
+ "optimistic_locking": {
1086
+ "used": True,
1087
+ "old_version": expected_version,
1088
+ "new_version": locking_result.get("new_version"),
1089
+ "retry_count": locking_result.get("retry_count", 0),
1090
+ "conflict_resolution": kwargs.get(
1091
+ "conflict_resolution", "retry"
1092
+ ),
1093
+ "status": locking_result.get("status", "success"),
1094
+ },
1095
+ }
1096
+
1097
+ except Exception as e:
1098
+ if "Version conflict" in str(e) or "retry exhausted" in str(e):
1099
+ # Re-raise optimistic locking specific errors
1100
+ raise
1101
+ else:
1102
+ # Wrap other database errors
1103
+ raise NodeExecutionError(
1104
+ f"Database error during optimistic locking: {str(e)}"
1105
+ )
1106
+
1107
+ def _extract_update_info(self, query: str, parameters: Any) -> Optional[dict]:
1108
+ """Extract table name, record ID, and update data from UPDATE query.
1109
+
1110
+ This is a simplified parser for common UPDATE patterns.
1111
+ For production use, consider using a proper SQL parser.
1112
+ """
1113
+ import re
1114
+
1115
+ # Simple regex to extract UPDATE table_name SET ... WHERE id = ?
1116
+ # This is a basic implementation - for production, use a proper SQL parser
1117
+ update_pattern = r"UPDATE\s+(\w+)\s+SET\s+(.*?)\s+WHERE\s+(\w+)\s*=\s*[?$:]"
1118
+
1119
+ match = re.search(update_pattern, query.upper(), re.IGNORECASE | re.DOTALL)
1120
+ if not match:
1121
+ return None
1122
+
1123
+ table_name = match.group(1).lower()
1124
+ set_clause = match.group(2)
1125
+ id_field = match.group(3).lower()
1126
+
1127
+ # Extract update data from SET clause
1128
+ # This is simplified - assumes basic "field = ?" patterns
1129
+ update_data = {}
1130
+ set_parts = [part.strip() for part in set_clause.split(",")]
1131
+
1132
+ param_index = 0
1133
+ for part in set_parts:
1134
+ if "=" in part:
1135
+ field_name = part.split("=")[0].strip()
1136
+ # Skip version field as it's handled by optimistic locking
1137
+ if field_name.lower() != "version":
1138
+ if isinstance(parameters, list) and param_index < len(parameters):
1139
+ update_data[field_name] = parameters[param_index]
1140
+ param_index += 1
1141
+ elif isinstance(parameters, dict):
1142
+ # For named parameters, this gets more complex
1143
+ # For now, we'll skip this case
1144
+ pass
1145
+
1146
+ # Extract record ID from parameters
1147
+ # Assume the WHERE clause parameter is the last one for positional params
1148
+ record_id = None
1149
+ if isinstance(parameters, list) and parameters:
1150
+ record_id = parameters[-1] # Assume last parameter is the ID
1151
+ elif isinstance(parameters, dict):
1152
+ # Look for common ID field names in parameters
1153
+ for id_candidate in ["id", id_field, "record_id"]:
1154
+ if id_candidate in parameters:
1155
+ record_id = parameters[id_candidate]
1156
+ break
1157
+
1158
+ if record_id is None:
1159
+ return None
1160
+
1161
+ return {
1162
+ "table_name": table_name,
1163
+ "record_id": record_id,
1164
+ "update_data": update_data,
1165
+ "id_field": id_field,
1166
+ }
1167
+
1168
+ def _execute_with_optimistic_locking(self, kwargs: dict) -> dict:
1169
+ """Execute query using optimistic locking for enhanced concurrency control."""
1170
+ if not OPTIMISTIC_LOCKING_AVAILABLE:
1171
+ raise NodeExecutionError(
1172
+ "OptimisticLockingNode not available. Cannot use optimistic locking."
1173
+ )
1174
+
1175
+ query = kwargs.get("query")
1176
+ parameters = kwargs.get("parameters")
1177
+
1178
+ # Extract update information from the query
1179
+ update_info = self._extract_update_info(query, parameters)
1180
+ if not update_info:
1181
+ raise NodeExecutionError(
1182
+ "Could not extract update information for optimistic locking. "
1183
+ "Query might be too complex or not an UPDATE statement."
1184
+ )
1185
+
1186
+ # Get database connection
1187
+ engine = self._get_shared_engine()
1188
+
1189
+ try:
1190
+ with engine.connect() as conn:
1191
+ # Create optimistic locking node instance
1192
+ locking_node = OptimisticLockingNode(
1193
+ version_field=kwargs.get("version_field", "version"),
1194
+ max_retries=kwargs.get("max_retries", 3),
1195
+ default_conflict_resolution=ConflictResolution(
1196
+ kwargs.get("conflict_resolution", "retry")
1197
+ ),
1198
+ )
1199
+
1200
+ # First, read the current record with version
1201
+ read_kwargs = {
1202
+ "action": "read_with_version",
1203
+ "connection": conn,
1204
+ "table_name": update_info["table_name"],
1205
+ "record_id": update_info["record_id"],
1206
+ "version_field": kwargs.get("version_field", "version"),
1207
+ "id_field": update_info["id_field"],
1208
+ }
1209
+
1210
+ # Execute synchronously by calling async_run directly
1211
+ import asyncio
1212
+
1213
+ read_result = asyncio.run(locking_node.async_run(**read_kwargs))
1214
+
1215
+ if not read_result.get("success"):
1216
+ raise NodeExecutionError(
1217
+ f"Failed to read record for optimistic locking: {read_result.get('error')}"
1218
+ )
1219
+
1220
+ current_version = read_result["version"]
1221
+
1222
+ # Now perform the update with version check
1223
+ update_kwargs = {
1224
+ "action": "update_with_version",
1225
+ "connection": conn,
1226
+ "table_name": update_info["table_name"],
1227
+ "record_id": update_info["record_id"],
1228
+ "update_data": update_info["update_data"],
1229
+ "expected_version": current_version,
1230
+ "conflict_resolution": kwargs.get("conflict_resolution", "retry"),
1231
+ "version_field": kwargs.get("version_field", "version"),
1232
+ "id_field": update_info["id_field"],
1233
+ }
1234
+
1235
+ # Execute the update with optimistic locking
1236
+ update_result = asyncio.run(locking_node.async_run(**update_kwargs))
1237
+
1238
+ if not update_result.get("success"):
1239
+ raise NodeExecutionError(
1240
+ f"Optimistic locking update failed: {update_result.get('error')}"
1241
+ )
1242
+
1243
+ # Return result in SQLDatabaseNode format
1244
+ return {
1245
+ "data": [], # UPDATE queries don't return data
1246
+ "row_count": update_result.get("rows_affected", 1),
1247
+ "columns": [],
1248
+ "execution_time": update_result.get("execution_time", 0.0),
1249
+ "optimistic_locking_used": True,
1250
+ "version_before": current_version,
1251
+ "version_after": update_result.get("new_version"),
1252
+ "retry_count": update_result.get("retry_count", 0),
1253
+ }
1254
+
1255
+ except Exception as e:
1256
+ raise NodeExecutionError(f"Optimistic locking execution failed: {str(e)}")
@@ -493,6 +493,20 @@ class StreamPublisherNode(Node):
493
493
  required=False,
494
494
  default=3,
495
495
  ),
496
+ "messages": NodeParameter(
497
+ name="messages",
498
+ type=list,
499
+ description="Messages to publish",
500
+ required=False,
501
+ default=[],
502
+ ),
503
+ "headers": NodeParameter(
504
+ name="headers",
505
+ type=dict,
506
+ description="Optional message headers",
507
+ required=False,
508
+ default={},
509
+ ),
496
510
  }
497
511
 
498
512
  def configure(self, config: dict[str, Any]) -> None:
@@ -793,6 +807,13 @@ class WebSocketNode(Node):
793
807
  required=False,
794
808
  default=1.0,
795
809
  ),
810
+ "connection_config": NodeParameter(
811
+ name="connection_config",
812
+ type=dict,
813
+ description="Connection configuration from load balancer",
814
+ required=False,
815
+ default={},
816
+ ),
796
817
  }
797
818
 
798
819
  def configure(self, config: dict[str, Any]) -> None:
@@ -4,10 +4,18 @@ This module provides enterprise-grade nodes for complex business integration
4
4
  patterns, data lineage tracking, and batch processing optimizations.
5
5
  """
6
6
 
7
+ from kailash.nodes.enterprise.audit_logger import EnterpriseAuditLoggerNode
7
8
  from kailash.nodes.enterprise.batch_processor import BatchProcessorNode
8
9
  from kailash.nodes.enterprise.data_lineage import DataLineageNode
10
+ from kailash.nodes.enterprise.mcp_executor import EnterpriseMLCPExecutorNode
11
+ from kailash.nodes.enterprise.service_discovery import MCPServiceDiscoveryNode
12
+ from kailash.nodes.enterprise.tenant_assignment import TenantAssignmentNode
9
13
 
10
14
  __all__ = [
11
15
  "DataLineageNode",
12
16
  "BatchProcessorNode",
17
+ "TenantAssignmentNode",
18
+ "MCPServiceDiscoveryNode",
19
+ "EnterpriseMLCPExecutorNode",
20
+ "EnterpriseAuditLoggerNode",
13
21
  ]