kailash 0.9.15__py3-none-any.whl → 0.9.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kailash/__init__.py +4 -3
- kailash/middleware/database/base_models.py +7 -1
- kailash/migration/__init__.py +30 -0
- kailash/migration/cli.py +340 -0
- kailash/migration/compatibility_checker.py +662 -0
- kailash/migration/configuration_validator.py +837 -0
- kailash/migration/documentation_generator.py +1828 -0
- kailash/migration/examples/__init__.py +5 -0
- kailash/migration/examples/complete_migration_example.py +692 -0
- kailash/migration/migration_assistant.py +715 -0
- kailash/migration/performance_comparator.py +760 -0
- kailash/migration/regression_detector.py +1141 -0
- kailash/migration/tests/__init__.py +6 -0
- kailash/migration/tests/test_compatibility_checker.py +403 -0
- kailash/migration/tests/test_integration.py +463 -0
- kailash/migration/tests/test_migration_assistant.py +397 -0
- kailash/migration/tests/test_performance_comparator.py +433 -0
- kailash/monitoring/__init__.py +29 -2
- kailash/monitoring/asyncsql_metrics.py +275 -0
- kailash/nodes/data/async_sql.py +1828 -33
- kailash/runtime/local.py +1255 -8
- kailash/runtime/monitoring/__init__.py +1 -0
- kailash/runtime/monitoring/runtime_monitor.py +780 -0
- kailash/runtime/resource_manager.py +3033 -0
- kailash/sdk_exceptions.py +21 -0
- kailash/workflow/cyclic_runner.py +18 -2
- {kailash-0.9.15.dist-info → kailash-0.9.17.dist-info}/METADATA +1 -1
- {kailash-0.9.15.dist-info → kailash-0.9.17.dist-info}/RECORD +33 -14
- {kailash-0.9.15.dist-info → kailash-0.9.17.dist-info}/WHEEL +0 -0
- {kailash-0.9.15.dist-info → kailash-0.9.17.dist-info}/entry_points.txt +0 -0
- {kailash-0.9.15.dist-info → kailash-0.9.17.dist-info}/licenses/LICENSE +0 -0
- {kailash-0.9.15.dist-info → kailash-0.9.17.dist-info}/licenses/NOTICE +0 -0
- {kailash-0.9.15.dist-info → kailash-0.9.17.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,463 @@
|
|
1
|
+
"""Integration tests for the complete migration toolchain.
|
2
|
+
|
3
|
+
These tests verify that all migration tools work together correctly
|
4
|
+
for end-to-end migration scenarios.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import tempfile
|
8
|
+
import textwrap
|
9
|
+
from pathlib import Path
|
10
|
+
|
11
|
+
import pytest
|
12
|
+
|
13
|
+
from kailash.migration import (
|
14
|
+
CompatibilityChecker,
|
15
|
+
ConfigurationValidator,
|
16
|
+
MigrationAssistant,
|
17
|
+
MigrationDocGenerator,
|
18
|
+
PerformanceComparator,
|
19
|
+
RegressionDetector,
|
20
|
+
)
|
21
|
+
from kailash.workflow.builder import WorkflowBuilder
|
22
|
+
|
23
|
+
|
24
|
+
@pytest.fixture
|
25
|
+
def complete_project():
|
26
|
+
"""Create a complete project with various migration scenarios."""
|
27
|
+
with tempfile.TemporaryDirectory() as temp_dir:
|
28
|
+
temp_path = Path(temp_dir)
|
29
|
+
|
30
|
+
# Main application file with legacy patterns
|
31
|
+
(temp_path / "app.py").write_text(
|
32
|
+
textwrap.dedent(
|
33
|
+
"""
|
34
|
+
from kailash.runtime.local import LocalRuntime
|
35
|
+
from kailash.workflow.builder import WorkflowBuilder
|
36
|
+
|
37
|
+
def create_runtime():
|
38
|
+
# Legacy configuration
|
39
|
+
return LocalRuntime(
|
40
|
+
enable_parallel=True,
|
41
|
+
thread_pool_size=16,
|
42
|
+
debug_mode=False,
|
43
|
+
memory_limit=4096,
|
44
|
+
timeout=600
|
45
|
+
)
|
46
|
+
|
47
|
+
def run_workflow(data):
|
48
|
+
runtime = create_runtime()
|
49
|
+
|
50
|
+
workflow = WorkflowBuilder()
|
51
|
+
workflow.add_node("PythonCodeNode", "process", {
|
52
|
+
"code": f"result = len('{data}') * 2",
|
53
|
+
"output_key": "length_doubled"
|
54
|
+
})
|
55
|
+
|
56
|
+
# Legacy execution pattern
|
57
|
+
runtime.execute_sync(workflow.build())
|
58
|
+
return runtime.get_results()
|
59
|
+
|
60
|
+
if __name__ == "__main__":
|
61
|
+
result = run_workflow("test data")
|
62
|
+
print(f"Result: {result}")
|
63
|
+
"""
|
64
|
+
).strip()
|
65
|
+
)
|
66
|
+
|
67
|
+
# Configuration file
|
68
|
+
(temp_path / "config.py").write_text(
|
69
|
+
textwrap.dedent(
|
70
|
+
"""
|
71
|
+
# Legacy configuration patterns
|
72
|
+
PRODUCTION_CONFIG = {
|
73
|
+
'enable_parallel': True,
|
74
|
+
'thread_pool_size': 32,
|
75
|
+
'memory_limit': 8192,
|
76
|
+
'timeout': 1200,
|
77
|
+
'log_level': 'INFO',
|
78
|
+
'retry_count': 5
|
79
|
+
}
|
80
|
+
|
81
|
+
DEVELOPMENT_CONFIG = {
|
82
|
+
'debug_mode': True,
|
83
|
+
'enable_parallel': False,
|
84
|
+
'memory_limit': 1024
|
85
|
+
}
|
86
|
+
"""
|
87
|
+
).strip()
|
88
|
+
)
|
89
|
+
|
90
|
+
# Already modern file (should not be changed)
|
91
|
+
(temp_path / "modern.py").write_text(
|
92
|
+
textwrap.dedent(
|
93
|
+
"""
|
94
|
+
from kailash.runtime.local import LocalRuntime
|
95
|
+
from kailash.access_control import UserContext
|
96
|
+
|
97
|
+
def create_modern_runtime():
|
98
|
+
user_context = UserContext(user_id="modern_user")
|
99
|
+
return LocalRuntime(
|
100
|
+
debug=True,
|
101
|
+
max_concurrency=20,
|
102
|
+
enable_monitoring=True,
|
103
|
+
enable_security=True,
|
104
|
+
user_context=user_context,
|
105
|
+
resource_limits={
|
106
|
+
'memory_mb': 2048,
|
107
|
+
'timeout_seconds': 300
|
108
|
+
}
|
109
|
+
)
|
110
|
+
|
111
|
+
def execute_modern_workflow(workflow):
|
112
|
+
runtime = create_modern_runtime()
|
113
|
+
results, run_id = runtime.execute(workflow)
|
114
|
+
return results, run_id
|
115
|
+
"""
|
116
|
+
).strip()
|
117
|
+
)
|
118
|
+
|
119
|
+
# Test file
|
120
|
+
(temp_path / "test_app.py").write_text(
|
121
|
+
textwrap.dedent(
|
122
|
+
"""
|
123
|
+
import pytest
|
124
|
+
from app import run_workflow
|
125
|
+
|
126
|
+
def test_workflow_execution():
|
127
|
+
result = run_workflow("hello")
|
128
|
+
assert result is not None
|
129
|
+
|
130
|
+
def test_workflow_with_empty_data():
|
131
|
+
result = run_workflow("")
|
132
|
+
assert result is not None
|
133
|
+
"""
|
134
|
+
).strip()
|
135
|
+
)
|
136
|
+
|
137
|
+
yield temp_path
|
138
|
+
|
139
|
+
|
140
|
+
class TestMigrationToolchainIntegration:
|
141
|
+
"""Integration tests for the complete migration toolchain."""
|
142
|
+
|
143
|
+
def test_complete_migration_workflow(self, complete_project):
|
144
|
+
"""Test complete migration workflow from analysis to validation."""
|
145
|
+
|
146
|
+
# Step 1: Compatibility Analysis
|
147
|
+
checker = CompatibilityChecker()
|
148
|
+
analysis_result = checker.analyze_codebase(complete_project)
|
149
|
+
|
150
|
+
assert analysis_result.total_files_analyzed > 0
|
151
|
+
assert len(analysis_result.issues) > 0
|
152
|
+
assert analysis_result.migration_complexity in [
|
153
|
+
"low",
|
154
|
+
"medium",
|
155
|
+
"high",
|
156
|
+
"very_high",
|
157
|
+
]
|
158
|
+
|
159
|
+
# Should detect legacy patterns
|
160
|
+
deprecated_issues = [
|
161
|
+
i for i in analysis_result.issues if "deprecated" in i.description.lower()
|
162
|
+
]
|
163
|
+
assert len(deprecated_issues) > 0
|
164
|
+
|
165
|
+
# Step 2: Configuration Validation
|
166
|
+
validator = ConfigurationValidator()
|
167
|
+
|
168
|
+
# Test legacy configurations
|
169
|
+
legacy_config = {
|
170
|
+
"enable_parallel": True,
|
171
|
+
"thread_pool_size": 16,
|
172
|
+
"debug_mode": False,
|
173
|
+
}
|
174
|
+
|
175
|
+
validation_result = validator.validate_configuration(legacy_config)
|
176
|
+
assert validation_result.valid is False # Should have deprecated parameters
|
177
|
+
assert len(validation_result.issues) > 0
|
178
|
+
|
179
|
+
# Step 3: Migration Planning and Execution
|
180
|
+
assistant = MigrationAssistant(dry_run=True, create_backups=False)
|
181
|
+
migration_plan = assistant.create_migration_plan(complete_project)
|
182
|
+
|
183
|
+
assert len(migration_plan.steps) > 0
|
184
|
+
assert migration_plan.estimated_duration_minutes > 0
|
185
|
+
|
186
|
+
# Execute migration (dry run)
|
187
|
+
migration_result = assistant.execute_migration(migration_plan)
|
188
|
+
assert migration_result.success is True
|
189
|
+
assert migration_result.steps_completed > 0
|
190
|
+
|
191
|
+
# Step 4: Performance Comparison (simulated)
|
192
|
+
comparator = PerformanceComparator(sample_size=1, warmup_runs=0)
|
193
|
+
|
194
|
+
legacy_config = {"debug": True, "max_concurrency": 1}
|
195
|
+
modern_config = {"debug": True, "max_concurrency": 4, "enable_monitoring": True}
|
196
|
+
|
197
|
+
# Create simple test workflow
|
198
|
+
workflow = WorkflowBuilder()
|
199
|
+
workflow.add_node(
|
200
|
+
"PythonCodeNode",
|
201
|
+
"test",
|
202
|
+
{"code": "result = sum(range(100))", "output_key": "sum_result"},
|
203
|
+
)
|
204
|
+
test_workflows = [("simple_test", workflow.build())]
|
205
|
+
|
206
|
+
# This would normally require actual LocalRuntime execution
|
207
|
+
# For integration test, we'll just verify the structure
|
208
|
+
try:
|
209
|
+
performance_report = comparator.compare_configurations(
|
210
|
+
legacy_config, modern_config, test_workflows
|
211
|
+
)
|
212
|
+
assert isinstance(performance_report.before_benchmarks, list)
|
213
|
+
assert isinstance(performance_report.after_benchmarks, list)
|
214
|
+
except Exception as e:
|
215
|
+
# Expected in test environment without full runtime
|
216
|
+
assert "LocalRuntime" in str(e) or "import" in str(e).lower()
|
217
|
+
|
218
|
+
# Step 5: Documentation Generation
|
219
|
+
doc_generator = MigrationDocGenerator()
|
220
|
+
|
221
|
+
migration_guide = doc_generator.generate_migration_guide(
|
222
|
+
analysis_result=analysis_result,
|
223
|
+
migration_plan=migration_plan,
|
224
|
+
migration_result=migration_result,
|
225
|
+
validation_result=validation_result,
|
226
|
+
scenario="standard",
|
227
|
+
)
|
228
|
+
|
229
|
+
assert migration_guide is not None
|
230
|
+
assert migration_guide.title
|
231
|
+
assert len(migration_guide.sections) > 0
|
232
|
+
|
233
|
+
# Verify key sections exist
|
234
|
+
section_titles = [s.title for s in migration_guide.sections]
|
235
|
+
assert "Overview" in section_titles
|
236
|
+
assert "Migration Steps" in section_titles
|
237
|
+
assert "Validation and Testing" in section_titles
|
238
|
+
|
239
|
+
def test_regression_detection_integration(self, complete_project):
|
240
|
+
"""Test regression detection integration."""
|
241
|
+
|
242
|
+
# Create regression detector
|
243
|
+
detector = RegressionDetector(
|
244
|
+
baseline_path=complete_project / "baseline.json", parallel_tests=False
|
245
|
+
)
|
246
|
+
|
247
|
+
# Create baseline configuration
|
248
|
+
baseline_config = {"debug": True, "max_concurrency": 2}
|
249
|
+
|
250
|
+
# Create simple test workflows for baseline
|
251
|
+
simple_workflow = WorkflowBuilder()
|
252
|
+
simple_workflow.add_node(
|
253
|
+
"PythonCodeNode",
|
254
|
+
"baseline_test",
|
255
|
+
{"code": "result = 'baseline_success'", "output_key": "message"},
|
256
|
+
)
|
257
|
+
|
258
|
+
test_workflows = [("integration_test", simple_workflow.build())]
|
259
|
+
|
260
|
+
# This would normally create actual baselines
|
261
|
+
try:
|
262
|
+
baselines = detector.create_baseline(baseline_config, test_workflows)
|
263
|
+
assert isinstance(baselines, dict)
|
264
|
+
|
265
|
+
# Test regression detection
|
266
|
+
modified_config = {
|
267
|
+
"debug": True,
|
268
|
+
"max_concurrency": 4,
|
269
|
+
} # Increased concurrency
|
270
|
+
regression_report = detector.detect_regressions(
|
271
|
+
modified_config, test_workflows
|
272
|
+
)
|
273
|
+
|
274
|
+
assert regression_report.total_tests > 0
|
275
|
+
except Exception as e:
|
276
|
+
# Expected in test environment without full runtime
|
277
|
+
assert "LocalRuntime" in str(e) or "import" in str(e).lower()
|
278
|
+
|
279
|
+
def test_configuration_optimization_flow(self, complete_project):
|
280
|
+
"""Test configuration optimization workflow."""
|
281
|
+
|
282
|
+
# Start with problematic configuration
|
283
|
+
problematic_config = {
|
284
|
+
"debug": True,
|
285
|
+
"enable_security": True, # Conflict: debug + security
|
286
|
+
"max_concurrency": 1000, # Too high
|
287
|
+
"enable_monitoring": False,
|
288
|
+
"enable_enterprise_monitoring": True, # Requires basic monitoring
|
289
|
+
}
|
290
|
+
|
291
|
+
# Validate and get recommendations
|
292
|
+
validator = ConfigurationValidator()
|
293
|
+
validation_result = validator.validate_configuration(problematic_config)
|
294
|
+
|
295
|
+
assert validation_result.valid is False
|
296
|
+
assert len(validation_result.issues) > 0
|
297
|
+
|
298
|
+
# Should have optimized configuration
|
299
|
+
assert validation_result.optimized_config is not None
|
300
|
+
|
301
|
+
# Optimized config should fix dependency issues
|
302
|
+
optimized = validation_result.optimized_config
|
303
|
+
if optimized.get("enable_enterprise_monitoring"):
|
304
|
+
assert optimized.get("enable_monitoring") is True
|
305
|
+
|
306
|
+
# Re-validate optimized configuration
|
307
|
+
revalidation = validator.validate_configuration(optimized)
|
308
|
+
assert len(revalidation.issues) < len(validation_result.issues)
|
309
|
+
|
310
|
+
def test_multi_format_report_generation(self, complete_project):
|
311
|
+
"""Test report generation in multiple formats."""
|
312
|
+
|
313
|
+
# Run analysis
|
314
|
+
checker = CompatibilityChecker()
|
315
|
+
analysis_result = checker.analyze_codebase(complete_project)
|
316
|
+
|
317
|
+
# Generate reports in all formats
|
318
|
+
text_report = checker.generate_report(analysis_result, "text")
|
319
|
+
json_report = checker.generate_report(analysis_result, "json")
|
320
|
+
markdown_report = checker.generate_report(analysis_result, "markdown")
|
321
|
+
|
322
|
+
# Verify all formats generated successfully
|
323
|
+
assert isinstance(text_report, str) and len(text_report) > 0
|
324
|
+
assert isinstance(json_report, str) and len(json_report) > 0
|
325
|
+
assert isinstance(markdown_report, str) and len(markdown_report) > 0
|
326
|
+
|
327
|
+
# Verify format-specific content
|
328
|
+
assert "Migration Compatibility Report" in text_report
|
329
|
+
|
330
|
+
# JSON should be valid
|
331
|
+
import json
|
332
|
+
|
333
|
+
json_data = json.loads(json_report)
|
334
|
+
assert "summary" in json_data
|
335
|
+
assert "issues" in json_data
|
336
|
+
|
337
|
+
# Markdown should have headers
|
338
|
+
assert "# LocalRuntime Migration Compatibility Report" in markdown_report
|
339
|
+
assert "## Summary" in markdown_report
|
340
|
+
|
341
|
+
def test_comprehensive_documentation_generation(self, complete_project):
|
342
|
+
"""Test comprehensive documentation generation."""
|
343
|
+
|
344
|
+
# Run complete analysis
|
345
|
+
checker = CompatibilityChecker()
|
346
|
+
analysis_result = checker.analyze_codebase(complete_project)
|
347
|
+
|
348
|
+
assistant = MigrationAssistant(dry_run=True)
|
349
|
+
migration_plan = assistant.create_migration_plan(complete_project)
|
350
|
+
migration_result = assistant.execute_migration(migration_plan)
|
351
|
+
|
352
|
+
validator = ConfigurationValidator()
|
353
|
+
validation_result = validator.validate_configuration({"debug": True})
|
354
|
+
|
355
|
+
# Generate comprehensive documentation
|
356
|
+
doc_generator = MigrationDocGenerator()
|
357
|
+
|
358
|
+
# Test different scenarios
|
359
|
+
scenarios = ["simple", "standard", "enterprise", "performance_critical"]
|
360
|
+
|
361
|
+
for scenario in scenarios:
|
362
|
+
guide = doc_generator.generate_migration_guide(
|
363
|
+
analysis_result=analysis_result,
|
364
|
+
migration_plan=migration_plan,
|
365
|
+
migration_result=migration_result,
|
366
|
+
validation_result=validation_result,
|
367
|
+
scenario=scenario,
|
368
|
+
audience="developer",
|
369
|
+
)
|
370
|
+
|
371
|
+
assert guide is not None
|
372
|
+
assert len(guide.sections) > 0
|
373
|
+
|
374
|
+
# Verify scenario-appropriate sections
|
375
|
+
section_titles = [s.title for s in guide.sections]
|
376
|
+
|
377
|
+
if scenario == "enterprise":
|
378
|
+
assert "Enterprise Features" in section_titles
|
379
|
+
|
380
|
+
if scenario == "performance_critical":
|
381
|
+
# Performance scenario should have performance focus
|
382
|
+
assert any("performance" in title.lower() for title in section_titles)
|
383
|
+
|
384
|
+
def test_error_handling_integration(self, complete_project):
|
385
|
+
"""Test error handling across the migration toolchain."""
|
386
|
+
|
387
|
+
# Create problematic files
|
388
|
+
broken_file = complete_project / "broken.py"
|
389
|
+
broken_file.write_text("def broken_function(\n # Syntax error")
|
390
|
+
|
391
|
+
# Test that tools handle errors gracefully
|
392
|
+
checker = CompatibilityChecker()
|
393
|
+
analysis_result = checker.analyze_codebase(complete_project)
|
394
|
+
|
395
|
+
# Should still analyze successfully despite broken file
|
396
|
+
assert analysis_result.total_files_analyzed > 0
|
397
|
+
|
398
|
+
# Should detect syntax errors
|
399
|
+
syntax_errors = [
|
400
|
+
i for i in analysis_result.issues if "syntax" in i.description.lower()
|
401
|
+
]
|
402
|
+
assert len(syntax_errors) > 0
|
403
|
+
|
404
|
+
# Migration assistant should handle errors
|
405
|
+
assistant = MigrationAssistant(dry_run=True)
|
406
|
+
migration_plan = assistant.create_migration_plan(complete_project)
|
407
|
+
|
408
|
+
# Should create plan despite problematic files
|
409
|
+
assert len(migration_plan.steps) >= 0
|
410
|
+
|
411
|
+
# Migration execution should handle errors gracefully
|
412
|
+
migration_result = assistant.execute_migration(migration_plan)
|
413
|
+
if not migration_result.success:
|
414
|
+
assert len(migration_result.errors) > 0
|
415
|
+
|
416
|
+
def test_end_to_end_workflow_files(self, complete_project):
|
417
|
+
"""Test that workflow affects actual files (in dry-run mode)."""
|
418
|
+
|
419
|
+
# Get original file content
|
420
|
+
original_app = (complete_project / "app.py").read_text()
|
421
|
+
original_config = (complete_project / "config.py").read_text()
|
422
|
+
original_modern = (complete_project / "modern.py").read_text()
|
423
|
+
|
424
|
+
# Run migration in dry-run mode
|
425
|
+
assistant = MigrationAssistant(dry_run=True)
|
426
|
+
migration_plan = assistant.create_migration_plan(complete_project)
|
427
|
+
migration_result = assistant.execute_migration(migration_plan)
|
428
|
+
|
429
|
+
# In dry-run mode, files should not be changed
|
430
|
+
assert (complete_project / "app.py").read_text() == original_app
|
431
|
+
assert (complete_project / "config.py").read_text() == original_config
|
432
|
+
assert (complete_project / "modern.py").read_text() == original_modern
|
433
|
+
|
434
|
+
# But should have successful execution
|
435
|
+
assert migration_result.success is True
|
436
|
+
assert migration_result.steps_completed > 0
|
437
|
+
|
438
|
+
def test_migration_report_completeness(self, complete_project):
|
439
|
+
"""Test that migration reports contain all expected information."""
|
440
|
+
|
441
|
+
# Run complete migration
|
442
|
+
assistant = MigrationAssistant(dry_run=True)
|
443
|
+
migration_plan = assistant.create_migration_plan(complete_project)
|
444
|
+
migration_result = assistant.execute_migration(migration_plan)
|
445
|
+
|
446
|
+
# Generate comprehensive report
|
447
|
+
report = assistant.generate_migration_report(migration_plan, migration_result)
|
448
|
+
|
449
|
+
# Verify report completeness
|
450
|
+
assert "Migration Report" in report
|
451
|
+
assert "MIGRATION PLAN SUMMARY" in report
|
452
|
+
assert "EXECUTION RESULTS" in report
|
453
|
+
assert str(migration_plan.estimated_duration_minutes) in report
|
454
|
+
assert migration_plan.risk_level.upper() in report
|
455
|
+
assert str(migration_result.steps_completed) in report
|
456
|
+
|
457
|
+
# Should include step details
|
458
|
+
for step in migration_plan.steps:
|
459
|
+
assert step.description in report or step.file_path in report
|
460
|
+
|
461
|
+
|
462
|
+
if __name__ == "__main__":
|
463
|
+
pytest.main([__file__])
|