kailash 0.9.15__py3-none-any.whl → 0.9.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/__init__.py +4 -3
- kailash/middleware/database/base_models.py +7 -1
- kailash/migration/__init__.py +30 -0
- kailash/migration/cli.py +340 -0
- kailash/migration/compatibility_checker.py +662 -0
- kailash/migration/configuration_validator.py +837 -0
- kailash/migration/documentation_generator.py +1828 -0
- kailash/migration/examples/__init__.py +5 -0
- kailash/migration/examples/complete_migration_example.py +692 -0
- kailash/migration/migration_assistant.py +715 -0
- kailash/migration/performance_comparator.py +760 -0
- kailash/migration/regression_detector.py +1141 -0
- kailash/migration/tests/__init__.py +6 -0
- kailash/migration/tests/test_compatibility_checker.py +403 -0
- kailash/migration/tests/test_integration.py +463 -0
- kailash/migration/tests/test_migration_assistant.py +397 -0
- kailash/migration/tests/test_performance_comparator.py +433 -0
- kailash/monitoring/__init__.py +29 -2
- kailash/monitoring/asyncsql_metrics.py +275 -0
- kailash/nodes/data/async_sql.py +1828 -33
- kailash/runtime/local.py +1255 -8
- kailash/runtime/monitoring/__init__.py +1 -0
- kailash/runtime/monitoring/runtime_monitor.py +780 -0
- kailash/runtime/resource_manager.py +3033 -0
- kailash/sdk_exceptions.py +21 -0
- kailash/workflow/cyclic_runner.py +18 -2
- {kailash-0.9.15.dist-info → kailash-0.9.17.dist-info}/METADATA +1 -1
- {kailash-0.9.15.dist-info → kailash-0.9.17.dist-info}/RECORD +33 -14
- {kailash-0.9.15.dist-info → kailash-0.9.17.dist-info}/WHEEL +0 -0
- {kailash-0.9.15.dist-info → kailash-0.9.17.dist-info}/entry_points.txt +0 -0
- {kailash-0.9.15.dist-info → kailash-0.9.17.dist-info}/licenses/LICENSE +0 -0
- {kailash-0.9.15.dist-info → kailash-0.9.17.dist-info}/licenses/NOTICE +0 -0
- {kailash-0.9.15.dist-info → kailash-0.9.17.dist-info}/top_level.txt +0 -0
kailash/__init__.py
CHANGED
@@ -3,8 +3,9 @@
|
|
3
3
|
The Kailash SDK provides a comprehensive framework for creating nodes and workflows
|
4
4
|
that align with container-node architecture while allowing rapid prototyping.
|
5
5
|
|
6
|
-
New in v0.9.
|
7
|
-
|
6
|
+
New in v0.9.17: AsyncSQL per-pool locking eliminates lock contention bottleneck.
|
7
|
+
Achieves 100% success at 300+ concurrent operations (was 50% failure). 85% performance improvement with per-pool locks.
|
8
|
+
Previous v0.9.14: Code quality improvements and updated dependencies for DataFlow v0.4.6 compatibility.
|
8
9
|
Previous v0.9.13: Fixed WorkflowBuilder parameter validation false positives (Bug 010).
|
9
10
|
Enhanced validation.py to recognize auto_map_from parameters, eliminating spurious warnings.
|
10
11
|
Previous v0.9.12: SQLite Compatibility & Code Quality improvements.
|
@@ -52,7 +53,7 @@ except ImportError:
|
|
52
53
|
# For backward compatibility
|
53
54
|
WorkflowGraph = Workflow
|
54
55
|
|
55
|
-
__version__ = "0.9.
|
56
|
+
__version__ = "0.9.17"
|
56
57
|
|
57
58
|
__all__ = [
|
58
59
|
# Core workflow components
|
@@ -8,7 +8,13 @@ import uuid
|
|
8
8
|
from datetime import datetime, timezone
|
9
9
|
from typing import Any, Dict, List, Optional
|
10
10
|
|
11
|
-
from sqlalchemy import
|
11
|
+
from sqlalchemy import (
|
12
|
+
JSON,
|
13
|
+
Boolean,
|
14
|
+
CheckConstraint,
|
15
|
+
Column,
|
16
|
+
DateTime,
|
17
|
+
)
|
12
18
|
from sqlalchemy import Enum as SQLEnum
|
13
19
|
from sqlalchemy import (
|
14
20
|
Float,
|
@@ -0,0 +1,30 @@
|
|
1
|
+
"""Comprehensive migration tools for upgrading to enhanced LocalRuntime.
|
2
|
+
|
3
|
+
This module provides a complete suite of migration utilities for upgrading
|
4
|
+
existing codebases to use the enhanced LocalRuntime with zero downtime and
|
5
|
+
comprehensive validation.
|
6
|
+
|
7
|
+
Components:
|
8
|
+
- CompatibilityChecker: Analyze existing code for compatibility issues
|
9
|
+
- MigrationAssistant: Automated configuration conversion and optimization
|
10
|
+
- PerformanceComparator: Before/after performance analysis
|
11
|
+
- ConfigurationValidator: Runtime configuration validation
|
12
|
+
- MigrationDocGenerator: Automated migration guide generation
|
13
|
+
- RegressionDetector: Post-migration validation and regression detection
|
14
|
+
"""
|
15
|
+
|
16
|
+
from .compatibility_checker import CompatibilityChecker
|
17
|
+
from .configuration_validator import ConfigurationValidator
|
18
|
+
from .documentation_generator import MigrationDocGenerator
|
19
|
+
from .migration_assistant import MigrationAssistant
|
20
|
+
from .performance_comparator import PerformanceComparator
|
21
|
+
from .regression_detector import RegressionDetector
|
22
|
+
|
23
|
+
__all__ = [
|
24
|
+
"CompatibilityChecker",
|
25
|
+
"MigrationAssistant",
|
26
|
+
"PerformanceComparator",
|
27
|
+
"ConfigurationValidator",
|
28
|
+
"MigrationDocGenerator",
|
29
|
+
"RegressionDetector",
|
30
|
+
]
|
kailash/migration/cli.py
ADDED
@@ -0,0 +1,340 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
"""Command-line interface for LocalRuntime migration tools.
|
3
|
+
|
4
|
+
This module provides a simple CLI for accessing all migration tools
|
5
|
+
and utilities from the command line.
|
6
|
+
"""
|
7
|
+
|
8
|
+
import argparse
|
9
|
+
import json
|
10
|
+
import sys
|
11
|
+
from pathlib import Path
|
12
|
+
from typing import Any, Dict
|
13
|
+
|
14
|
+
from .compatibility_checker import CompatibilityChecker
|
15
|
+
from .configuration_validator import ConfigurationValidator
|
16
|
+
from .documentation_generator import MigrationDocGenerator
|
17
|
+
from .migration_assistant import MigrationAssistant
|
18
|
+
from .performance_comparator import PerformanceComparator
|
19
|
+
from .regression_detector import RegressionDetector
|
20
|
+
|
21
|
+
|
22
|
+
def cmd_analyze(args):
|
23
|
+
"""Run compatibility analysis."""
|
24
|
+
print("🔍 Running compatibility analysis...")
|
25
|
+
|
26
|
+
checker = CompatibilityChecker()
|
27
|
+
result = checker.analyze_codebase(
|
28
|
+
args.path, include_patterns=args.include, exclude_patterns=args.exclude
|
29
|
+
)
|
30
|
+
|
31
|
+
if args.output:
|
32
|
+
report = checker.generate_report(result, args.format)
|
33
|
+
Path(args.output).write_text(report)
|
34
|
+
print(f"📄 Analysis report saved to: {args.output}")
|
35
|
+
else:
|
36
|
+
print(checker.generate_report(result, "text"))
|
37
|
+
|
38
|
+
|
39
|
+
def cmd_validate(args):
|
40
|
+
"""Run configuration validation."""
|
41
|
+
print("⚙️ Running configuration validation...")
|
42
|
+
|
43
|
+
# Load configuration from file or command line
|
44
|
+
if args.config_file:
|
45
|
+
with open(args.config_file) as f:
|
46
|
+
config = json.load(f)
|
47
|
+
else:
|
48
|
+
# Parse config from command line args
|
49
|
+
config = {}
|
50
|
+
if args.debug is not None:
|
51
|
+
config["debug"] = args.debug
|
52
|
+
if args.max_concurrency:
|
53
|
+
config["max_concurrency"] = args.max_concurrency
|
54
|
+
if args.enable_monitoring is not None:
|
55
|
+
config["enable_monitoring"] = args.enable_monitoring
|
56
|
+
if args.enable_security is not None:
|
57
|
+
config["enable_security"] = args.enable_security
|
58
|
+
|
59
|
+
validator = ConfigurationValidator()
|
60
|
+
result = validator.validate_configuration(config)
|
61
|
+
|
62
|
+
if args.output:
|
63
|
+
report = validator.generate_validation_report(result, args.format)
|
64
|
+
Path(args.output).write_text(report)
|
65
|
+
print(f"📄 Validation report saved to: {args.output}")
|
66
|
+
else:
|
67
|
+
print(validator.generate_validation_report(result, "text"))
|
68
|
+
|
69
|
+
|
70
|
+
def cmd_migrate(args):
|
71
|
+
"""Run migration planning and execution."""
|
72
|
+
print("🚀 Running migration planning...")
|
73
|
+
|
74
|
+
assistant = MigrationAssistant(dry_run=args.dry_run, create_backups=args.backup)
|
75
|
+
|
76
|
+
# Create migration plan
|
77
|
+
plan = assistant.create_migration_plan(
|
78
|
+
args.path, include_patterns=args.include, exclude_patterns=args.exclude
|
79
|
+
)
|
80
|
+
|
81
|
+
print(f"Created migration plan with {len(plan.steps)} steps")
|
82
|
+
print(f"Estimated duration: {plan.estimated_duration_minutes} minutes")
|
83
|
+
print(f"Risk level: {plan.risk_level}")
|
84
|
+
|
85
|
+
if not args.plan_only:
|
86
|
+
print("\n🔄 Executing migration...")
|
87
|
+
result = assistant.execute_migration(plan)
|
88
|
+
|
89
|
+
print(f"Migration {'successful' if result.success else 'failed'}")
|
90
|
+
print(f"Steps completed: {result.steps_completed}")
|
91
|
+
if result.steps_failed > 0:
|
92
|
+
print(f"Steps failed: {result.steps_failed}")
|
93
|
+
for error in result.errors:
|
94
|
+
print(f"❌ Error: {error}")
|
95
|
+
|
96
|
+
if args.output:
|
97
|
+
report = assistant.generate_migration_report(
|
98
|
+
plan, result if not args.plan_only else None
|
99
|
+
)
|
100
|
+
Path(args.output).write_text(report)
|
101
|
+
print(f"📄 Migration report saved to: {args.output}")
|
102
|
+
|
103
|
+
|
104
|
+
def cmd_compare(args):
|
105
|
+
"""Run performance comparison."""
|
106
|
+
print("📊 Running performance comparison...")
|
107
|
+
|
108
|
+
# Load configurations
|
109
|
+
with open(args.before_config) as f:
|
110
|
+
before_config = json.load(f)
|
111
|
+
|
112
|
+
with open(args.after_config) as f:
|
113
|
+
after_config = json.load(f)
|
114
|
+
|
115
|
+
comparator = PerformanceComparator(
|
116
|
+
sample_size=args.samples, warmup_runs=args.warmup
|
117
|
+
)
|
118
|
+
|
119
|
+
try:
|
120
|
+
report = comparator.compare_configurations(before_config, after_config)
|
121
|
+
|
122
|
+
print(f"Performance change: {report.overall_change_percentage:+.1f}%")
|
123
|
+
print(
|
124
|
+
f"Status: {'Improvement' if report.overall_improvement else 'Regression'}"
|
125
|
+
)
|
126
|
+
print(f"Risk: {report.risk_assessment}")
|
127
|
+
|
128
|
+
if args.output:
|
129
|
+
perf_report = comparator.generate_performance_report(report, args.format)
|
130
|
+
Path(args.output).write_text(perf_report)
|
131
|
+
print(f"📄 Performance report saved to: {args.output}")
|
132
|
+
|
133
|
+
except Exception as e:
|
134
|
+
print(f"❌ Performance comparison failed: {e}")
|
135
|
+
print(" Ensure LocalRuntime is properly installed and configured")
|
136
|
+
|
137
|
+
|
138
|
+
def cmd_baseline(args):
|
139
|
+
"""Create regression baseline."""
|
140
|
+
print("📊 Creating regression baseline...")
|
141
|
+
|
142
|
+
with open(args.config) as f:
|
143
|
+
config = json.load(f)
|
144
|
+
|
145
|
+
detector = RegressionDetector(baseline_path=args.baseline_file)
|
146
|
+
|
147
|
+
try:
|
148
|
+
baselines = detector.create_baseline(config)
|
149
|
+
print(f"Created {len(baselines)} baseline snapshots")
|
150
|
+
print(f"Baseline saved to: {args.baseline_file}")
|
151
|
+
except Exception as e:
|
152
|
+
print(f"❌ Baseline creation failed: {e}")
|
153
|
+
|
154
|
+
|
155
|
+
def cmd_regress(args):
|
156
|
+
"""Run regression detection."""
|
157
|
+
print("🔍 Running regression detection...")
|
158
|
+
|
159
|
+
with open(args.config) as f:
|
160
|
+
config = json.load(f)
|
161
|
+
|
162
|
+
detector = RegressionDetector(baseline_path=args.baseline_file)
|
163
|
+
|
164
|
+
try:
|
165
|
+
report = detector.detect_regressions(config)
|
166
|
+
|
167
|
+
print(f"Tests run: {report.total_tests}")
|
168
|
+
print(f"Passed: {report.passed_tests}")
|
169
|
+
print(f"Failed: {report.failed_tests}")
|
170
|
+
print(f"Status: {report.overall_status}")
|
171
|
+
|
172
|
+
if args.output:
|
173
|
+
regression_report = detector.generate_regression_report(report, args.format)
|
174
|
+
Path(args.output).write_text(regression_report)
|
175
|
+
print(f"📄 Regression report saved to: {args.output}")
|
176
|
+
|
177
|
+
except Exception as e:
|
178
|
+
print(f"❌ Regression detection failed: {e}")
|
179
|
+
|
180
|
+
|
181
|
+
def cmd_docs(args):
|
182
|
+
"""Generate migration documentation."""
|
183
|
+
print("📚 Generating migration documentation...")
|
184
|
+
|
185
|
+
generator = MigrationDocGenerator()
|
186
|
+
|
187
|
+
# Load results from previous analysis if available
|
188
|
+
analysis_result = None
|
189
|
+
if args.analysis_file:
|
190
|
+
# This would need to load from saved analysis
|
191
|
+
print("Loading previous analysis results...")
|
192
|
+
|
193
|
+
guide = generator.generate_migration_guide(
|
194
|
+
analysis_result=analysis_result, scenario=args.scenario, audience=args.audience
|
195
|
+
)
|
196
|
+
|
197
|
+
generator.export_guide(guide, args.output, args.format)
|
198
|
+
print(f"📄 Migration guide saved to: {args.output}")
|
199
|
+
|
200
|
+
|
201
|
+
def main():
|
202
|
+
"""Main CLI entry point."""
|
203
|
+
parser = argparse.ArgumentParser(
|
204
|
+
description="LocalRuntime Migration Tools CLI", prog="kailash-migrate"
|
205
|
+
)
|
206
|
+
|
207
|
+
subparsers = parser.add_subparsers(dest="command", help="Available commands")
|
208
|
+
|
209
|
+
# Analyze command
|
210
|
+
analyze_parser = subparsers.add_parser("analyze", help="Run compatibility analysis")
|
211
|
+
analyze_parser.add_argument("path", type=Path, help="Project path to analyze")
|
212
|
+
analyze_parser.add_argument(
|
213
|
+
"--include", nargs="*", default=["*.py"], help="Include patterns"
|
214
|
+
)
|
215
|
+
analyze_parser.add_argument("--exclude", nargs="*", help="Exclude patterns")
|
216
|
+
analyze_parser.add_argument("--output", "-o", help="Output file path")
|
217
|
+
analyze_parser.add_argument(
|
218
|
+
"--format", choices=["text", "json", "markdown"], default="text"
|
219
|
+
)
|
220
|
+
analyze_parser.set_defaults(func=cmd_analyze)
|
221
|
+
|
222
|
+
# Validate command
|
223
|
+
validate_parser = subparsers.add_parser("validate", help="Validate configuration")
|
224
|
+
validate_parser.add_argument("--config-file", help="Configuration file (JSON)")
|
225
|
+
validate_parser.add_argument("--debug", type=bool, help="Debug mode")
|
226
|
+
validate_parser.add_argument("--max-concurrency", type=int, help="Max concurrency")
|
227
|
+
validate_parser.add_argument(
|
228
|
+
"--enable-monitoring", type=bool, help="Enable monitoring"
|
229
|
+
)
|
230
|
+
validate_parser.add_argument("--enable-security", type=bool, help="Enable security")
|
231
|
+
validate_parser.add_argument("--output", "-o", help="Output file path")
|
232
|
+
validate_parser.add_argument(
|
233
|
+
"--format", choices=["text", "json", "markdown"], default="text"
|
234
|
+
)
|
235
|
+
validate_parser.set_defaults(func=cmd_validate)
|
236
|
+
|
237
|
+
# Migrate command
|
238
|
+
migrate_parser = subparsers.add_parser("migrate", help="Run migration")
|
239
|
+
migrate_parser.add_argument("path", type=Path, help="Project path to migrate")
|
240
|
+
migrate_parser.add_argument(
|
241
|
+
"--include", nargs="*", default=["*.py"], help="Include patterns"
|
242
|
+
)
|
243
|
+
migrate_parser.add_argument("--exclude", nargs="*", help="Exclude patterns")
|
244
|
+
migrate_parser.add_argument(
|
245
|
+
"--dry-run", action="store_true", default=True, help="Dry run mode"
|
246
|
+
)
|
247
|
+
migrate_parser.add_argument(
|
248
|
+
"--no-dry-run",
|
249
|
+
dest="dry_run",
|
250
|
+
action="store_false",
|
251
|
+
help="Execute actual migration",
|
252
|
+
)
|
253
|
+
migrate_parser.add_argument(
|
254
|
+
"--backup", action="store_true", default=True, help="Create backups"
|
255
|
+
)
|
256
|
+
migrate_parser.add_argument(
|
257
|
+
"--no-backup", dest="backup", action="store_false", help="Skip backups"
|
258
|
+
)
|
259
|
+
migrate_parser.add_argument(
|
260
|
+
"--plan-only", action="store_true", help="Only create plan, don't execute"
|
261
|
+
)
|
262
|
+
migrate_parser.add_argument("--output", "-o", help="Output file path")
|
263
|
+
migrate_parser.set_defaults(func=cmd_migrate)
|
264
|
+
|
265
|
+
# Compare command
|
266
|
+
compare_parser = subparsers.add_parser("compare", help="Compare performance")
|
267
|
+
compare_parser.add_argument(
|
268
|
+
"before_config", help="Before configuration file (JSON)"
|
269
|
+
)
|
270
|
+
compare_parser.add_argument("after_config", help="After configuration file (JSON)")
|
271
|
+
compare_parser.add_argument(
|
272
|
+
"--samples", type=int, default=3, help="Number of samples"
|
273
|
+
)
|
274
|
+
compare_parser.add_argument("--warmup", type=int, default=1, help="Warmup runs")
|
275
|
+
compare_parser.add_argument("--output", "-o", help="Output file path")
|
276
|
+
compare_parser.add_argument(
|
277
|
+
"--format", choices=["text", "json", "markdown"], default="text"
|
278
|
+
)
|
279
|
+
compare_parser.set_defaults(func=cmd_compare)
|
280
|
+
|
281
|
+
# Baseline command
|
282
|
+
baseline_parser = subparsers.add_parser(
|
283
|
+
"baseline", help="Create regression baseline"
|
284
|
+
)
|
285
|
+
baseline_parser.add_argument("config", help="Configuration file (JSON)")
|
286
|
+
baseline_parser.add_argument(
|
287
|
+
"--baseline-file", default="baseline.json", help="Baseline file path"
|
288
|
+
)
|
289
|
+
baseline_parser.set_defaults(func=cmd_baseline)
|
290
|
+
|
291
|
+
# Regression command
|
292
|
+
regress_parser = subparsers.add_parser("regress", help="Run regression detection")
|
293
|
+
regress_parser.add_argument("config", help="Configuration file (JSON)")
|
294
|
+
regress_parser.add_argument(
|
295
|
+
"--baseline-file", default="baseline.json", help="Baseline file path"
|
296
|
+
)
|
297
|
+
regress_parser.add_argument("--output", "-o", help="Output file path")
|
298
|
+
regress_parser.add_argument(
|
299
|
+
"--format", choices=["text", "json", "markdown"], default="text"
|
300
|
+
)
|
301
|
+
regress_parser.set_defaults(func=cmd_regress)
|
302
|
+
|
303
|
+
# Documentation command
|
304
|
+
docs_parser = subparsers.add_parser("docs", help="Generate documentation")
|
305
|
+
docs_parser.add_argument("output", help="Output file path")
|
306
|
+
docs_parser.add_argument(
|
307
|
+
"--scenario",
|
308
|
+
choices=["simple", "standard", "enterprise", "performance_critical"],
|
309
|
+
default="standard",
|
310
|
+
help="Documentation scenario",
|
311
|
+
)
|
312
|
+
docs_parser.add_argument(
|
313
|
+
"--audience",
|
314
|
+
choices=["developer", "admin", "architect", "all"],
|
315
|
+
default="developer",
|
316
|
+
help="Target audience",
|
317
|
+
)
|
318
|
+
docs_parser.add_argument("--analysis-file", help="Previous analysis results file")
|
319
|
+
docs_parser.add_argument(
|
320
|
+
"--format", choices=["markdown", "html"], default="markdown"
|
321
|
+
)
|
322
|
+
docs_parser.set_defaults(func=cmd_docs)
|
323
|
+
|
324
|
+
# Parse arguments and execute
|
325
|
+
args = parser.parse_args()
|
326
|
+
|
327
|
+
if not args.command:
|
328
|
+
parser.print_help()
|
329
|
+
return 1
|
330
|
+
|
331
|
+
try:
|
332
|
+
args.func(args)
|
333
|
+
return 0
|
334
|
+
except Exception as e:
|
335
|
+
print(f"❌ Error: {e}")
|
336
|
+
return 1
|
337
|
+
|
338
|
+
|
339
|
+
if __name__ == "__main__":
|
340
|
+
sys.exit(main())
|