kailash 0.8.4__py3-none-any.whl → 0.8.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (99) hide show
  1. kailash/__init__.py +5 -11
  2. kailash/channels/__init__.py +2 -1
  3. kailash/channels/mcp_channel.py +23 -4
  4. kailash/cli/__init__.py +11 -1
  5. kailash/cli/validate_imports.py +202 -0
  6. kailash/cli/validation_audit.py +570 -0
  7. kailash/core/actors/supervisor.py +1 -1
  8. kailash/core/resilience/bulkhead.py +15 -5
  9. kailash/core/resilience/circuit_breaker.py +74 -1
  10. kailash/core/resilience/health_monitor.py +433 -33
  11. kailash/edge/compliance.py +33 -0
  12. kailash/edge/consistency.py +609 -0
  13. kailash/edge/coordination/__init__.py +30 -0
  14. kailash/edge/coordination/global_ordering.py +355 -0
  15. kailash/edge/coordination/leader_election.py +217 -0
  16. kailash/edge/coordination/partition_detector.py +296 -0
  17. kailash/edge/coordination/raft.py +485 -0
  18. kailash/edge/discovery.py +63 -1
  19. kailash/edge/migration/__init__.py +19 -0
  20. kailash/edge/migration/edge_migration_service.py +384 -0
  21. kailash/edge/migration/edge_migrator.py +832 -0
  22. kailash/edge/monitoring/__init__.py +21 -0
  23. kailash/edge/monitoring/edge_monitor.py +736 -0
  24. kailash/edge/prediction/__init__.py +10 -0
  25. kailash/edge/prediction/predictive_warmer.py +591 -0
  26. kailash/edge/resource/__init__.py +102 -0
  27. kailash/edge/resource/cloud_integration.py +796 -0
  28. kailash/edge/resource/cost_optimizer.py +949 -0
  29. kailash/edge/resource/docker_integration.py +919 -0
  30. kailash/edge/resource/kubernetes_integration.py +893 -0
  31. kailash/edge/resource/platform_integration.py +913 -0
  32. kailash/edge/resource/predictive_scaler.py +959 -0
  33. kailash/edge/resource/resource_analyzer.py +824 -0
  34. kailash/edge/resource/resource_pools.py +610 -0
  35. kailash/integrations/dataflow_edge.py +261 -0
  36. kailash/mcp_server/registry_integration.py +1 -1
  37. kailash/mcp_server/server.py +351 -8
  38. kailash/mcp_server/transports.py +305 -0
  39. kailash/middleware/gateway/event_store.py +1 -0
  40. kailash/monitoring/__init__.py +18 -0
  41. kailash/monitoring/alerts.py +646 -0
  42. kailash/monitoring/metrics.py +677 -0
  43. kailash/nodes/__init__.py +2 -0
  44. kailash/nodes/ai/semantic_memory.py +2 -2
  45. kailash/nodes/base.py +622 -1
  46. kailash/nodes/code/python.py +44 -3
  47. kailash/nodes/data/async_sql.py +42 -20
  48. kailash/nodes/edge/__init__.py +36 -0
  49. kailash/nodes/edge/base.py +240 -0
  50. kailash/nodes/edge/cloud_node.py +710 -0
  51. kailash/nodes/edge/coordination.py +239 -0
  52. kailash/nodes/edge/docker_node.py +825 -0
  53. kailash/nodes/edge/edge_data.py +582 -0
  54. kailash/nodes/edge/edge_migration_node.py +396 -0
  55. kailash/nodes/edge/edge_monitoring_node.py +421 -0
  56. kailash/nodes/edge/edge_state.py +673 -0
  57. kailash/nodes/edge/edge_warming_node.py +393 -0
  58. kailash/nodes/edge/kubernetes_node.py +652 -0
  59. kailash/nodes/edge/platform_node.py +766 -0
  60. kailash/nodes/edge/resource_analyzer_node.py +378 -0
  61. kailash/nodes/edge/resource_optimizer_node.py +501 -0
  62. kailash/nodes/edge/resource_scaler_node.py +397 -0
  63. kailash/nodes/governance.py +410 -0
  64. kailash/nodes/ports.py +676 -0
  65. kailash/nodes/rag/registry.py +1 -1
  66. kailash/nodes/transaction/distributed_transaction_manager.py +48 -1
  67. kailash/nodes/transaction/saga_state_storage.py +2 -1
  68. kailash/nodes/validation.py +8 -8
  69. kailash/runtime/local.py +374 -1
  70. kailash/runtime/validation/__init__.py +12 -0
  71. kailash/runtime/validation/connection_context.py +119 -0
  72. kailash/runtime/validation/enhanced_error_formatter.py +202 -0
  73. kailash/runtime/validation/error_categorizer.py +164 -0
  74. kailash/runtime/validation/import_validator.py +446 -0
  75. kailash/runtime/validation/metrics.py +380 -0
  76. kailash/runtime/validation/performance.py +615 -0
  77. kailash/runtime/validation/suggestion_engine.py +212 -0
  78. kailash/testing/fixtures.py +2 -2
  79. kailash/utils/data_paths.py +74 -0
  80. kailash/workflow/builder.py +413 -8
  81. kailash/workflow/contracts.py +418 -0
  82. kailash/workflow/edge_infrastructure.py +369 -0
  83. kailash/workflow/mermaid_visualizer.py +3 -1
  84. kailash/workflow/migration.py +3 -3
  85. kailash/workflow/templates.py +6 -6
  86. kailash/workflow/type_inference.py +669 -0
  87. kailash/workflow/validation.py +134 -3
  88. {kailash-0.8.4.dist-info → kailash-0.8.6.dist-info}/METADATA +52 -34
  89. {kailash-0.8.4.dist-info → kailash-0.8.6.dist-info}/RECORD +93 -42
  90. kailash/nexus/__init__.py +0 -21
  91. kailash/nexus/cli/__init__.py +0 -5
  92. kailash/nexus/cli/__main__.py +0 -6
  93. kailash/nexus/cli/main.py +0 -176
  94. kailash/nexus/factory.py +0 -413
  95. kailash/nexus/gateway.py +0 -545
  96. {kailash-0.8.4.dist-info → kailash-0.8.6.dist-info}/WHEEL +0 -0
  97. {kailash-0.8.4.dist-info → kailash-0.8.6.dist-info}/entry_points.txt +0 -0
  98. {kailash-0.8.4.dist-info → kailash-0.8.6.dist-info}/licenses/LICENSE +0 -0
  99. {kailash-0.8.4.dist-info → kailash-0.8.6.dist-info}/top_level.txt +0 -0
@@ -389,7 +389,9 @@ class CodeExecutor:
389
389
  f"Error position: {' ' * (e.offset - 1) if e.offset else ''}^"
390
390
  )
391
391
 
392
- def execute_code(self, code: str, inputs: dict[str, Any]) -> dict[str, Any]:
392
+ def execute_code(
393
+ self, code: str, inputs: dict[str, Any], node_instance=None
394
+ ) -> dict[str, Any]:
393
395
  """Execute Python code with given inputs.
394
396
 
395
397
  Args:
@@ -476,6 +478,43 @@ class CodeExecutor:
476
478
  except ImportError:
477
479
  logger.warning(f"Module {module_name} not available")
478
480
 
481
+ # Add global utility functions to namespace
482
+ try:
483
+ from kailash.utils.data_paths import (
484
+ get_data_path,
485
+ get_input_data_path,
486
+ get_output_data_path,
487
+ )
488
+
489
+ namespace["get_input_data_path"] = get_input_data_path
490
+ namespace["get_output_data_path"] = get_output_data_path
491
+ namespace["get_data_path"] = get_data_path
492
+ except ImportError:
493
+ logger.warning(
494
+ "Could not import data path utilities - functions will not be available in PythonCodeNode execution"
495
+ )
496
+
497
+ # Add workflow context functions if node instance is available
498
+ if node_instance and hasattr(node_instance, "get_workflow_context"):
499
+ # Bind the actual node methods
500
+ namespace["get_workflow_context"] = node_instance.get_workflow_context
501
+ namespace["set_workflow_context"] = node_instance.set_workflow_context
502
+ else:
503
+ # Add placeholder functions that warn about unavailability
504
+ def _get_workflow_context(key: str, default=None):
505
+ logger.warning(
506
+ "get_workflow_context() is not available in PythonCodeNode execution context. Node instance not provided."
507
+ )
508
+ return default
509
+
510
+ def _set_workflow_context(key: str, value):
511
+ logger.warning(
512
+ "set_workflow_context() is not available in PythonCodeNode execution context. Node instance not provided."
513
+ )
514
+
515
+ namespace["get_workflow_context"] = _get_workflow_context
516
+ namespace["set_workflow_context"] = _set_workflow_context
517
+
479
518
  # Add sanitized inputs
480
519
  namespace.update(sanitized_inputs)
481
520
 
@@ -1222,7 +1261,9 @@ class PythonCodeNode(Node):
1222
1261
  try:
1223
1262
  if self.code:
1224
1263
  # Execute code string
1225
- outputs = self.executor.execute_code(self.code, kwargs)
1264
+ outputs = self.executor.execute_code(
1265
+ self.code, kwargs, node_instance=self
1266
+ )
1226
1267
  # Return 'result' variable if it exists, otherwise all outputs
1227
1268
  if "result" in outputs:
1228
1269
  return {"result": outputs["result"]}
@@ -1454,7 +1495,7 @@ class PythonCodeNode(Node):
1454
1495
  """
1455
1496
  # Execute directly based on execution type
1456
1497
  if self.code:
1457
- outputs = self.executor.execute_code(self.code, inputs)
1498
+ outputs = self.executor.execute_code(self.code, inputs, node_instance=self)
1458
1499
  return outputs.get("result", outputs)
1459
1500
  elif self.function:
1460
1501
  wrapper = FunctionWrapper(self.function, self.executor)
@@ -489,6 +489,7 @@ class PostgreSQLAdapter(DatabaseAdapter):
489
489
  or "DELETE" in query_upper
490
490
  or "INSERT" in query_upper
491
491
  )
492
+ and "SELECT" not in query_upper
492
493
  and "RETURNING" not in query_upper
493
494
  and fetch_mode == FetchMode.ALL
494
495
  ):
@@ -527,6 +528,7 @@ class PostgreSQLAdapter(DatabaseAdapter):
527
528
  or "DELETE" in query_upper
528
529
  or "INSERT" in query_upper
529
530
  )
531
+ and "SELECT" not in query_upper
530
532
  and "RETURNING" not in query_upper
531
533
  and fetch_mode == FetchMode.ALL
532
534
  ):
@@ -2617,33 +2619,53 @@ class AsyncSQLDatabaseNode(AsyncNode):
2617
2619
 
2618
2620
  async def cleanup(self):
2619
2621
  """Clean up database connections."""
2622
+ try:
2623
+ # Check if we have a running event loop
2624
+ loop = asyncio.get_running_loop()
2625
+ if loop.is_closed():
2626
+ # Event loop is closing, skip cleanup
2627
+ return
2628
+ except RuntimeError:
2629
+ # No event loop, skip cleanup
2630
+ return
2631
+
2620
2632
  # Rollback any active transaction
2621
2633
  if self._active_transaction and self._adapter:
2622
2634
  try:
2623
- await self._adapter.rollback_transaction(self._active_transaction)
2624
- except Exception:
2635
+ await asyncio.wait_for(
2636
+ self._adapter.rollback_transaction(self._active_transaction),
2637
+ timeout=1.0,
2638
+ )
2639
+ except (Exception, asyncio.TimeoutError):
2625
2640
  pass # Best effort cleanup
2626
2641
  self._active_transaction = None
2627
2642
 
2628
2643
  if self._adapter and self._connected:
2629
- if self._share_pool and self._pool_key:
2630
- # Decrement reference count for shared pool
2631
- async with self._get_pool_lock():
2632
- if self._pool_key in self._shared_pools:
2633
- adapter, ref_count = self._shared_pools[self._pool_key]
2634
- if ref_count > 1:
2635
- # Others still using the pool
2636
- self._shared_pools[self._pool_key] = (
2637
- adapter,
2638
- ref_count - 1,
2639
- )
2640
- else:
2641
- # Last reference, close the pool
2642
- del self._shared_pools[self._pool_key]
2643
- await adapter.disconnect()
2644
- else:
2645
- # Dedicated pool, close directly
2646
- await self._adapter.disconnect()
2644
+ try:
2645
+ if self._share_pool and self._pool_key:
2646
+ # Decrement reference count for shared pool with timeout
2647
+ async with await asyncio.wait_for(
2648
+ self._get_pool_lock(), timeout=1.0
2649
+ ):
2650
+ if self._pool_key in self._shared_pools:
2651
+ adapter, ref_count = self._shared_pools[self._pool_key]
2652
+ if ref_count > 1:
2653
+ # Others still using the pool
2654
+ self._shared_pools[self._pool_key] = (
2655
+ adapter,
2656
+ ref_count - 1,
2657
+ )
2658
+ else:
2659
+ # Last reference, close the pool
2660
+ del self._shared_pools[self._pool_key]
2661
+ await asyncio.wait_for(
2662
+ adapter.disconnect(), timeout=1.0
2663
+ )
2664
+ else:
2665
+ # Dedicated pool, close directly
2666
+ await asyncio.wait_for(self._adapter.disconnect(), timeout=1.0)
2667
+ except (Exception, asyncio.TimeoutError):
2668
+ pass # Best effort cleanup
2647
2669
 
2648
2670
  self._connected = False
2649
2671
  self._adapter = None
@@ -0,0 +1,36 @@
1
+ """Edge computing nodes for distributed processing and data management."""
2
+
3
+ from .base import EdgeNode
4
+ from .cloud_node import CloudNode
5
+ from .coordination import EdgeCoordinationNode
6
+ from .docker_node import DockerNode
7
+ from .edge_data import EdgeDataNode
8
+ from .edge_migration_node import EdgeMigrationNode
9
+ from .edge_monitoring_node import EdgeMonitoringNode
10
+ from .edge_state import EdgeStateMachine
11
+ from .edge_warming_node import EdgeWarmingNode
12
+
13
+ # Phase 4.4 Integration & Testing nodes
14
+ from .kubernetes_node import KubernetesNode
15
+ from .platform_node import PlatformNode
16
+ from .resource_analyzer_node import ResourceAnalyzerNode
17
+ from .resource_optimizer_node import ResourceOptimizerNode
18
+ from .resource_scaler_node import ResourceScalerNode
19
+
20
+ __all__ = [
21
+ "EdgeNode",
22
+ "EdgeDataNode",
23
+ "EdgeStateMachine",
24
+ "EdgeCoordinationNode",
25
+ "EdgeWarmingNode",
26
+ "EdgeMonitoringNode",
27
+ "EdgeMigrationNode",
28
+ "ResourceAnalyzerNode",
29
+ "ResourceScalerNode",
30
+ "ResourceOptimizerNode",
31
+ # Phase 4.4 nodes
32
+ "KubernetesNode",
33
+ "DockerNode",
34
+ "CloudNode",
35
+ "PlatformNode",
36
+ ]
@@ -0,0 +1,240 @@
1
+ """Base edge-aware node with location awareness and compliance routing."""
2
+
3
+ import asyncio
4
+ from typing import Any, Dict, List, Optional
5
+
6
+ from kailash.edge.compliance import ComplianceRouter, DataClassification
7
+ from kailash.edge.discovery import EdgeDiscovery, EdgeSelectionStrategy
8
+ from kailash.edge.location import EdgeLocation
9
+ from kailash.nodes.base_async import AsyncNode
10
+
11
+
12
+ class EdgeNode(AsyncNode):
13
+ """Base node with edge computing awareness.
14
+
15
+ Extends AsyncNode with:
16
+ - Edge location awareness
17
+ - Automatic edge selection
18
+ - Compliance-aware routing
19
+ - Migration capabilities
20
+ """
21
+
22
+ def __init__(self, **config):
23
+ """Initialize edge-aware node.
24
+
25
+ Args:
26
+ edge_strategy: Edge selection strategy (latency|cost|balanced|compliance)
27
+ preferred_locations: List of preferred edge location names
28
+ compliance_zones: List of required compliance zones (gdpr, ccpa, etc.)
29
+ enable_migration: Whether to enable edge migration capabilities
30
+ **config: Additional node configuration
31
+ """
32
+ self.edge_strategy = EdgeSelectionStrategy(
33
+ config.pop("edge_strategy", "balanced")
34
+ )
35
+ self.preferred_locations = config.pop("preferred_locations", [])
36
+ self.compliance_zones = config.pop("compliance_zones", [])
37
+ self.enable_migration = config.pop("enable_migration", True)
38
+
39
+ # Check for injected infrastructure (from WorkflowBuilder)
40
+ edge_infrastructure = config.pop("_edge_infrastructure", None)
41
+
42
+ if edge_infrastructure:
43
+ # Use shared infrastructure from WorkflowBuilder
44
+ self.edge_discovery = edge_infrastructure.get_discovery()
45
+ self.compliance_router = edge_infrastructure.get_compliance_router()
46
+ self._shared_infrastructure = edge_infrastructure
47
+ else:
48
+ # Standalone mode - create own infrastructure (backward compatibility)
49
+ self.edge_discovery = EdgeDiscovery()
50
+ self.compliance_router = ComplianceRouter()
51
+ self._shared_infrastructure = None
52
+
53
+ self.current_edge: Optional[EdgeLocation] = None
54
+
55
+ super().__init__(**config)
56
+
57
+ async def initialize(self):
58
+ """Initialize edge infrastructure."""
59
+ # No need to call super().initialize() as AsyncNode doesn't have it
60
+
61
+ # Start edge discovery only if not using shared infrastructure
62
+ if not self._shared_infrastructure:
63
+ await self.edge_discovery.start_discovery()
64
+
65
+ # Select initial edge
66
+ self.current_edge = await self._select_edge()
67
+
68
+ if not self.current_edge:
69
+ raise RuntimeError("No suitable edge location found")
70
+
71
+ async def _select_edge(
72
+ self, data: Optional[Dict[str, Any]] = None
73
+ ) -> Optional[EdgeLocation]:
74
+ """Select optimal edge location based on strategy and constraints.
75
+
76
+ Args:
77
+ data: Optional data for compliance classification
78
+
79
+ Returns:
80
+ Selected edge location or None
81
+ """
82
+ # Get all available edges
83
+ edges = self.edge_discovery.get_all_edges()
84
+
85
+ # Filter by preferred locations if specified
86
+ if self.preferred_locations:
87
+ edges = [e for e in edges if e.name in self.preferred_locations]
88
+
89
+ # Filter by compliance if data provided
90
+ if data and self.compliance_zones:
91
+ data_class = self.compliance_router.classify_data(data)
92
+ edges = [
93
+ e
94
+ for e in edges
95
+ if self.compliance_router.is_compliant_location(
96
+ e, data_class, self.compliance_zones
97
+ )
98
+ ]
99
+
100
+ # Apply edge selection strategy
101
+ if not edges:
102
+ return None
103
+
104
+ # If we've already filtered edges, select from the filtered list
105
+ # based on the strategy
106
+ if self.edge_strategy == EdgeSelectionStrategy.LATENCY_OPTIMAL:
107
+ # Select edge with lowest latency
108
+ return min(edges, key=lambda e: e.metrics.latency_p50_ms)
109
+ elif self.edge_strategy == EdgeSelectionStrategy.COST_OPTIMAL:
110
+ # Select edge with lowest cost
111
+ return min(edges, key=lambda e: e.metrics.compute_cost_per_hour)
112
+ elif self.edge_strategy == EdgeSelectionStrategy.COMPLIANCE_FIRST:
113
+ # Already filtered for compliance, pick first
114
+ return edges[0]
115
+ elif self.edge_strategy == EdgeSelectionStrategy.CAPACITY_OPTIMAL:
116
+ # Select edge with most capacity
117
+ return max(
118
+ edges,
119
+ key=lambda e: e.capabilities.cpu_cores
120
+ * (1 - e.metrics.cpu_utilization),
121
+ )
122
+ else: # BALANCED or others
123
+ # Simple balanced selection - pick edge with best combined score
124
+ return min(
125
+ edges,
126
+ key=lambda e: e.metrics.latency_p50_ms
127
+ * e.metrics.compute_cost_per_hour,
128
+ )
129
+
130
+ async def migrate_to_edge(
131
+ self, target_edge: EdgeLocation, state_data: Optional[Dict[str, Any]] = None
132
+ ) -> bool:
133
+ """Migrate this node to a different edge location.
134
+
135
+ Args:
136
+ target_edge: Target edge location
137
+ state_data: Optional state to migrate
138
+
139
+ Returns:
140
+ Success status
141
+ """
142
+ if not self.enable_migration:
143
+ return False
144
+
145
+ try:
146
+ # Prepare for migration
147
+ await self._prepare_migration(target_edge, state_data)
148
+
149
+ # Perform migration
150
+ old_edge = self.current_edge
151
+ self.current_edge = target_edge
152
+
153
+ # Cleanup old edge
154
+ if old_edge:
155
+ await self._cleanup_edge(old_edge)
156
+
157
+ return True
158
+
159
+ except Exception as e:
160
+ self.logger.error(f"Migration failed: {e}")
161
+ return False
162
+
163
+ async def _prepare_migration(
164
+ self, target_edge: EdgeLocation, state_data: Optional[Dict[str, Any]]
165
+ ):
166
+ """Prepare for edge migration."""
167
+ # Override in subclasses for specific preparation
168
+ pass
169
+
170
+ async def _cleanup_edge(self, edge: EdgeLocation):
171
+ """Cleanup after migrating away from an edge."""
172
+ # Override in subclasses for specific cleanup
173
+ pass
174
+
175
+ async def get_edge_metrics(self) -> Dict[str, Any]:
176
+ """Get current edge performance metrics."""
177
+ if not self.current_edge:
178
+ return {}
179
+
180
+ return {
181
+ "edge_name": self.current_edge.name,
182
+ "edge_region": self.current_edge.region,
183
+ "latency_ms": self.current_edge.metrics.latency_p50_ms,
184
+ "cpu_usage": self.current_edge.metrics.cpu_utilization,
185
+ "memory_usage": self.current_edge.metrics.memory_utilization,
186
+ "request_count": self.current_edge.metrics.throughput_rps,
187
+ "error_rate": self.current_edge.metrics.error_rate,
188
+ }
189
+
190
+ def is_compliant_for_data(
191
+ self, data: Dict[str, Any], required_zones: Optional[List[str]] = None
192
+ ) -> bool:
193
+ """Check if current edge is compliant for given data.
194
+
195
+ Args:
196
+ data: Data to check compliance for
197
+ required_zones: Override compliance zones
198
+
199
+ Returns:
200
+ Compliance status
201
+ """
202
+ if not self.current_edge:
203
+ return False
204
+
205
+ zones = required_zones or self.compliance_zones
206
+ if not zones:
207
+ return True
208
+
209
+ data_class = self.compliance_router.classify_data(data)
210
+ return self.compliance_router.is_compliant_location(
211
+ self.current_edge, data_class, zones
212
+ )
213
+
214
+ async def ensure_compliance(
215
+ self, data: Dict[str, Any], required_zones: Optional[List[str]] = None
216
+ ) -> bool:
217
+ """Ensure node is at compliant edge for data.
218
+
219
+ Migrates to compliant edge if necessary.
220
+
221
+ Args:
222
+ data: Data requiring compliance
223
+ required_zones: Override compliance zones
224
+
225
+ Returns:
226
+ Success status
227
+ """
228
+ zones = required_zones or self.compliance_zones
229
+
230
+ # Check current compliance
231
+ if self.is_compliant_for_data(data, zones):
232
+ return True
233
+
234
+ # Find compliant edge
235
+ compliant_edge = await self._select_edge(data)
236
+ if not compliant_edge:
237
+ return False
238
+
239
+ # Migrate to compliant edge
240
+ return await self.migrate_to_edge(compliant_edge)