kailash 0.8.5__py3-none-any.whl → 0.8.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. kailash/__init__.py +5 -5
  2. kailash/channels/__init__.py +2 -1
  3. kailash/channels/mcp_channel.py +23 -4
  4. kailash/cli/validate_imports.py +202 -0
  5. kailash/core/resilience/bulkhead.py +15 -5
  6. kailash/core/resilience/circuit_breaker.py +4 -1
  7. kailash/core/resilience/health_monitor.py +312 -84
  8. kailash/edge/migration/edge_migration_service.py +384 -0
  9. kailash/mcp_server/server.py +351 -8
  10. kailash/mcp_server/transports.py +305 -0
  11. kailash/middleware/gateway/event_store.py +1 -0
  12. kailash/nodes/base.py +77 -1
  13. kailash/nodes/code/python.py +44 -3
  14. kailash/nodes/data/async_sql.py +42 -20
  15. kailash/nodes/edge/edge_migration_node.py +16 -12
  16. kailash/nodes/governance.py +410 -0
  17. kailash/nodes/rag/registry.py +1 -1
  18. kailash/nodes/transaction/distributed_transaction_manager.py +48 -1
  19. kailash/nodes/transaction/saga_state_storage.py +2 -1
  20. kailash/nodes/validation.py +8 -8
  21. kailash/runtime/local.py +30 -0
  22. kailash/runtime/validation/__init__.py +7 -15
  23. kailash/runtime/validation/import_validator.py +446 -0
  24. kailash/runtime/validation/suggestion_engine.py +5 -5
  25. kailash/utils/data_paths.py +74 -0
  26. kailash/workflow/builder.py +183 -4
  27. kailash/workflow/mermaid_visualizer.py +3 -1
  28. kailash/workflow/templates.py +6 -6
  29. kailash/workflow/validation.py +134 -3
  30. {kailash-0.8.5.dist-info → kailash-0.8.6.dist-info}/METADATA +19 -17
  31. {kailash-0.8.5.dist-info → kailash-0.8.6.dist-info}/RECORD +35 -30
  32. {kailash-0.8.5.dist-info → kailash-0.8.6.dist-info}/WHEEL +0 -0
  33. {kailash-0.8.5.dist-info → kailash-0.8.6.dist-info}/entry_points.txt +0 -0
  34. {kailash-0.8.5.dist-info → kailash-0.8.6.dist-info}/licenses/LICENSE +0 -0
  35. {kailash-0.8.5.dist-info → kailash-0.8.6.dist-info}/top_level.txt +0 -0
kailash/__init__.py CHANGED
@@ -3,10 +3,10 @@
3
3
  The Kailash SDK provides a comprehensive framework for creating nodes and workflows
4
4
  that align with container-node architecture while allowing rapid prototyping.
5
5
 
6
- New in v0.7.0: Complete DataFlow and Nexus application frameworks, infrastructure hardening
7
- with 100% E2E test pass rate, enhanced AsyncNode event loop handling, 8 new monitoring operations,
8
- distributed transactions, QueryBuilder/QueryCache with Redis, and real MCP execution by default.
9
- Previous v0.6.6: AgentUIMiddleware shared workflow fix, execute() method standardization.
6
+ New in v0.8.6: Enhanced parameter validation system with 4 modes (off/warn/strict/debug),
7
+ ParameterDebugger for comprehensive flow tracing (10x faster debugging), production-ready
8
+ performance (<1ms overhead), and complete troubleshooting documentation.
9
+ Previous v0.8.5: Test infrastructure enhancement, application framework improvements.
10
10
  """
11
11
 
12
12
  from kailash.nodes.base import Node, NodeMetadata, NodeParameter
@@ -49,7 +49,7 @@ except ImportError:
49
49
  # For backward compatibility
50
50
  WorkflowGraph = Workflow
51
51
 
52
- __version__ = "0.8.5"
52
+ __version__ = "0.8.6"
53
53
 
54
54
  __all__ = [
55
55
  # Core workflow components
@@ -5,7 +5,7 @@ enabling unified management of API, CLI, and MCP interfaces through a common cha
5
5
  """
6
6
 
7
7
  from .api_channel import APIChannel
8
- from .base import Channel, ChannelConfig
8
+ from .base import Channel, ChannelConfig, ChannelType
9
9
  from .cli_channel import CLIChannel
10
10
  from .mcp_channel import MCPChannel
11
11
  from .session import CrossChannelSession, SessionManager
@@ -13,6 +13,7 @@ from .session import CrossChannelSession, SessionManager
13
13
  __all__ = [
14
14
  "Channel",
15
15
  "ChannelConfig",
16
+ "ChannelType",
16
17
  "APIChannel",
17
18
  "CLIChannel",
18
19
  "MCPChannel",
@@ -84,6 +84,7 @@ class MCPChannel(Channel):
84
84
  # MCP-specific state
85
85
  self._clients: Dict[str, Dict[str, Any]] = {}
86
86
  self._server_task: Optional[asyncio.Task] = None
87
+ self._mcp_server_task: Optional[asyncio.Task] = None
87
88
 
88
89
  logger.info(f"Initialized MCP channel {self.name}")
89
90
 
@@ -202,8 +203,15 @@ class MCPChannel(Channel):
202
203
  self.status = ChannelStatus.STARTING
203
204
  self._setup_event_queue()
204
205
 
205
- # Start MCP server
206
- await self.mcp_server.start()
206
+ # Start MCP server (Core SDK uses run() method, not start())
207
+ # For async operation, we need to run it in a separate task
208
+ if hasattr(self.mcp_server, "run"):
209
+ # Core SDK MCPServer uses run() method
210
+ loop = asyncio.get_event_loop()
211
+ self._mcp_server_task = loop.run_in_executor(None, self.mcp_server.run)
212
+ else:
213
+ # Fallback to start() if available
214
+ await self.mcp_server.start()
207
215
 
208
216
  # Start server task for handling connections
209
217
  self._server_task = asyncio.create_task(self._server_loop())
@@ -261,9 +269,20 @@ class MCPChannel(Channel):
261
269
  except asyncio.CancelledError:
262
270
  pass
263
271
 
272
+ # Stop MCP server task if running
273
+ if hasattr(self, "_mcp_server_task") and self._mcp_server_task:
274
+ self._mcp_server_task.cancel()
275
+ try:
276
+ await self._mcp_server_task
277
+ except asyncio.CancelledError:
278
+ pass
279
+
264
280
  # Stop MCP server
265
- if self.mcp_server:
266
- await self.mcp_server.stop()
281
+ if self.mcp_server and hasattr(self.mcp_server, "stop"):
282
+ try:
283
+ await self.mcp_server.stop()
284
+ except Exception as e:
285
+ logger.warning(f"Error stopping MCP server: {e}")
267
286
 
268
287
  await self._cleanup()
269
288
  self.status = ChannelStatus.STOPPED
@@ -0,0 +1,202 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Command-line tool for validating import paths for production deployment.
4
+
5
+ Usage:
6
+ python -m kailash.cli.validate_imports [path] [options]
7
+
8
+ Examples:
9
+ # Validate current directory
10
+ python -m kailash.cli.validate_imports
11
+
12
+ # Validate specific directory
13
+ python -m kailash.cli.validate_imports src/myapp
14
+
15
+ # Fix imports (dry run)
16
+ python -m kailash.cli.validate_imports src/myapp --fix
17
+
18
+ # Fix imports (apply changes)
19
+ python -m kailash.cli.validate_imports src/myapp --fix --apply
20
+ """
21
+
22
+ import argparse
23
+ import sys
24
+ from pathlib import Path
25
+ from typing import List
26
+
27
+ from kailash.runtime.validation import ImportIssue, ImportPathValidator
28
+
29
+
30
+ def main():
31
+ """Main entry point for import validation CLI."""
32
+ parser = argparse.ArgumentParser(
33
+ description="Validate Python imports for production deployment compatibility",
34
+ formatter_class=argparse.RawDescriptionHelpFormatter,
35
+ epilog="""
36
+ Examples:
37
+ %(prog)s # Validate current directory
38
+ %(prog)s src/myapp # Validate specific directory
39
+ %(prog)s src/myapp --fix # Show import fixes (dry run)
40
+ %(prog)s --file module.py # Validate single file
41
+
42
+ For more info, see: sdk-users/7-gold-standards/absolute-imports-gold-standard.md
43
+ """,
44
+ )
45
+
46
+ parser.add_argument(
47
+ "path",
48
+ nargs="?",
49
+ default=".",
50
+ help="Path to validate (directory or file, default: current directory)",
51
+ )
52
+
53
+ parser.add_argument(
54
+ "--file",
55
+ "-f",
56
+ action="store_true",
57
+ help="Treat path as a single file instead of directory",
58
+ )
59
+
60
+ parser.add_argument(
61
+ "--fix", action="store_true", help="Show suggested fixes for import issues"
62
+ )
63
+
64
+ parser.add_argument(
65
+ "--apply",
66
+ action="store_true",
67
+ help="Apply fixes (use with --fix, CAUTION: modifies files!)",
68
+ )
69
+
70
+ parser.add_argument(
71
+ "--no-recursive", "-n", action="store_true", help="Do not scan subdirectories"
72
+ )
73
+
74
+ parser.add_argument(
75
+ "--include-tests", action="store_true", help="Include test files in validation"
76
+ )
77
+
78
+ parser.add_argument("--json", action="store_true", help="Output results as JSON")
79
+
80
+ parser.add_argument(
81
+ "--quiet",
82
+ "-q",
83
+ action="store_true",
84
+ help="Only show errors, no informational output",
85
+ )
86
+
87
+ parser.add_argument(
88
+ "--verbose", "-v", action="store_true", help="Show detailed output"
89
+ )
90
+
91
+ args = parser.parse_args()
92
+
93
+ # Create validator
94
+ validator = ImportPathValidator()
95
+
96
+ # Validate path
97
+ path = Path(args.path)
98
+ if not path.exists():
99
+ print(f"Error: Path '{path}' does not exist", file=sys.stderr)
100
+ sys.exit(1)
101
+
102
+ # Collect issues
103
+ issues: List[ImportIssue] = []
104
+
105
+ if args.file or path.is_file():
106
+ # Validate single file
107
+ if args.verbose:
108
+ print(f"Validating file: {path}")
109
+ issues = validator.validate_file(str(path))
110
+ else:
111
+ # Validate directory
112
+ if args.verbose:
113
+ print(f"Validating directory: {path}")
114
+ print(f"Recursive: {not args.no_recursive}")
115
+ print(f"Include tests: {args.include_tests}")
116
+ print()
117
+
118
+ # TODO: Add support for include_tests flag in validator
119
+ issues = validator.validate_directory(
120
+ str(path), recursive=not args.no_recursive
121
+ )
122
+
123
+ # Handle results
124
+ if args.json:
125
+ import json
126
+
127
+ # Convert issues to JSON-serializable format
128
+ issues_data = [
129
+ {
130
+ "file": issue.file_path,
131
+ "line": issue.line_number,
132
+ "import": issue.import_statement,
133
+ "type": issue.issue_type.value,
134
+ "severity": issue.severity,
135
+ "message": issue.message,
136
+ "suggestion": issue.suggestion,
137
+ }
138
+ for issue in issues
139
+ ]
140
+ print(
141
+ json.dumps(
142
+ {
143
+ "issues": issues_data,
144
+ "total": len(issues),
145
+ "critical": len([i for i in issues if i.severity == "critical"]),
146
+ "warnings": len([i for i in issues if i.severity == "warning"]),
147
+ },
148
+ indent=2,
149
+ )
150
+ )
151
+
152
+ elif args.fix:
153
+ # Show fixes
154
+ if not issues:
155
+ if not args.quiet:
156
+ print("✅ No import issues found!")
157
+ sys.exit(0)
158
+
159
+ print(f"Found {len(issues)} import issues\n")
160
+
161
+ # Group by file
162
+ files_with_issues = {}
163
+ for issue in issues:
164
+ if issue.file_path not in files_with_issues:
165
+ files_with_issues[issue.file_path] = []
166
+ files_with_issues[issue.file_path].append(issue)
167
+
168
+ for file_path, file_issues in files_with_issues.items():
169
+ print(f"\n📄 {file_path}")
170
+ print("-" * 60)
171
+
172
+ if args.apply:
173
+ # Apply fixes
174
+ fixes = validator.fix_imports_in_file(file_path, dry_run=False)
175
+ for original, fixed in fixes:
176
+ print(f" ❌ {original}")
177
+ print(f" ✅ {fixed}")
178
+ print(f"\n Applied {len(fixes)} fixes to {file_path}")
179
+ else:
180
+ # Show proposed fixes
181
+ for issue in file_issues:
182
+ print(f" Line {issue.line_number}: {issue.import_statement}")
183
+ print(f" Issue: {issue.message}")
184
+ print(f" Fix: {issue.suggestion}")
185
+ print()
186
+
187
+ if not args.apply:
188
+ print("\n💡 To apply these fixes, run with --fix --apply")
189
+ print("⚠️ CAUTION: This will modify your files!")
190
+
191
+ else:
192
+ # Standard report
193
+ report = validator.generate_report(issues)
194
+ print(report)
195
+
196
+ # Exit code based on critical issues
197
+ critical_count = len([i for i in issues if i.severity == "critical"])
198
+ sys.exit(1 if critical_count > 0 else 0)
199
+
200
+
201
+ if __name__ == "__main__":
202
+ main()
@@ -204,11 +204,21 @@ class BulkheadPartition:
204
204
  await self._record_failure(execution_time)
205
205
  raise
206
206
  finally:
207
- # Clean up
208
- async with self._lock:
209
- if operation_id in self._active_operations:
210
- self._active_operations.remove(operation_id)
211
- self.metrics.active_operations = len(self._active_operations)
207
+ # Clean up - with proper exception handling for event loop issues
208
+ try:
209
+ async with self._lock:
210
+ if operation_id in self._active_operations:
211
+ self._active_operations.remove(operation_id)
212
+ self.metrics.active_operations = len(self._active_operations)
213
+ except (RuntimeError, asyncio.CancelledError):
214
+ # Handle event loop issues during cleanup - force cleanup without lock
215
+ try:
216
+ if operation_id in self._active_operations:
217
+ self._active_operations.remove(operation_id)
218
+ self.metrics.active_operations = len(self._active_operations)
219
+ except:
220
+ # Final fallback - ignore cleanup errors during shutdown
221
+ pass
212
222
 
213
223
  async def _execute_isolated(
214
224
  self, operation_id: str, func: Callable, args: tuple, kwargs: dict, timeout: int
@@ -467,7 +467,10 @@ class CircuitBreakerManager:
467
467
  return self._breakers[name]
468
468
 
469
469
  def create_circuit_breaker(
470
- self, name: str, config: Optional[CircuitBreakerConfig] = None, pattern: Optional[str] = None
470
+ self,
471
+ name: str,
472
+ config: Optional[CircuitBreakerConfig] = None,
473
+ pattern: Optional[str] = None,
471
474
  ) -> ConnectionCircuitBreaker:
472
475
  """Create a new circuit breaker with optional pattern-based configuration."""
473
476
  if pattern and pattern in self._patterns: