kailash 0.9.15__py3-none-any.whl → 0.9.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. kailash/middleware/database/base_models.py +7 -1
  2. kailash/migration/__init__.py +30 -0
  3. kailash/migration/cli.py +340 -0
  4. kailash/migration/compatibility_checker.py +662 -0
  5. kailash/migration/configuration_validator.py +837 -0
  6. kailash/migration/documentation_generator.py +1828 -0
  7. kailash/migration/examples/__init__.py +5 -0
  8. kailash/migration/examples/complete_migration_example.py +692 -0
  9. kailash/migration/migration_assistant.py +715 -0
  10. kailash/migration/performance_comparator.py +760 -0
  11. kailash/migration/regression_detector.py +1141 -0
  12. kailash/migration/tests/__init__.py +6 -0
  13. kailash/migration/tests/test_compatibility_checker.py +403 -0
  14. kailash/migration/tests/test_integration.py +463 -0
  15. kailash/migration/tests/test_migration_assistant.py +397 -0
  16. kailash/migration/tests/test_performance_comparator.py +433 -0
  17. kailash/nodes/data/async_sql.py +1507 -6
  18. kailash/runtime/local.py +1255 -8
  19. kailash/runtime/monitoring/__init__.py +1 -0
  20. kailash/runtime/monitoring/runtime_monitor.py +780 -0
  21. kailash/runtime/resource_manager.py +3033 -0
  22. kailash/sdk_exceptions.py +21 -0
  23. kailash/workflow/cyclic_runner.py +18 -2
  24. {kailash-0.9.15.dist-info → kailash-0.9.16.dist-info}/METADATA +1 -1
  25. {kailash-0.9.15.dist-info → kailash-0.9.16.dist-info}/RECORD +30 -12
  26. {kailash-0.9.15.dist-info → kailash-0.9.16.dist-info}/WHEEL +0 -0
  27. {kailash-0.9.15.dist-info → kailash-0.9.16.dist-info}/entry_points.txt +0 -0
  28. {kailash-0.9.15.dist-info → kailash-0.9.16.dist-info}/licenses/LICENSE +0 -0
  29. {kailash-0.9.15.dist-info → kailash-0.9.16.dist-info}/licenses/NOTICE +0 -0
  30. {kailash-0.9.15.dist-info → kailash-0.9.16.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,433 @@
1
+ """Tests for the PerformanceComparator class."""
2
+
3
+ import time
4
+ from unittest.mock import Mock, patch
5
+
6
+ import pytest
7
+
8
+ from kailash.migration.performance_comparator import (
9
+ ComparisonResult,
10
+ PerformanceBenchmark,
11
+ PerformanceComparator,
12
+ PerformanceMetric,
13
+ PerformanceReport,
14
+ )
15
+ from kailash.workflow.builder import WorkflowBuilder
16
+
17
+
18
+ @pytest.fixture
19
+ def comparator():
20
+ """Create a PerformanceComparator instance for testing."""
21
+ return PerformanceComparator(sample_size=2, warmup_runs=1, timeout_seconds=30)
22
+
23
+
24
+ @pytest.fixture
25
+ def simple_workflow():
26
+ """Create a simple test workflow."""
27
+ builder = WorkflowBuilder()
28
+ builder.add_node(
29
+ "PythonCodeNode", "test_node", {"code": "result = 42", "output_key": "answer"}
30
+ )
31
+ return builder.build()
32
+
33
+
34
+ @pytest.fixture
35
+ def mock_runtime():
36
+ """Create a mock LocalRuntime for testing."""
37
+ mock = Mock()
38
+ mock.execute.return_value = ({"test_node": {"answer": 42}}, "run_123")
39
+ return mock
40
+
41
+
42
+ class TestPerformanceComparator:
43
+ """Test cases for PerformanceComparator."""
44
+
45
+ def test_initialization(self, comparator):
46
+ """Test PerformanceComparator initialization."""
47
+ assert comparator is not None
48
+ assert comparator.sample_size == 2
49
+ assert comparator.warmup_runs == 1
50
+ assert comparator.timeout_seconds == 30
51
+ assert len(comparator.standard_workflows) > 0
52
+ assert isinstance(comparator.significance_thresholds, dict)
53
+
54
+ def test_standard_workflows_creation(self, comparator):
55
+ """Test creation of standard benchmark workflows."""
56
+ workflows = comparator.standard_workflows
57
+
58
+ assert len(workflows) > 0
59
+
60
+ # Check workflow structure
61
+ for name, workflow in workflows:
62
+ assert isinstance(name, str)
63
+ assert workflow is not None
64
+ assert len(name) > 0
65
+
66
+ # Should have specific standard workflows
67
+ workflow_names = [name for name, _ in workflows]
68
+ assert "simple_linear" in workflow_names
69
+ assert "multi_node" in workflow_names
70
+ assert "memory_intensive" in workflow_names
71
+
72
+ @patch("kailash.migration.performance_comparator.LocalRuntime")
73
+ @patch("psutil.Process")
74
+ def test_benchmark_configuration(
75
+ self, mock_process, mock_runtime_class, comparator, simple_workflow
76
+ ):
77
+ """Test benchmarking of a specific configuration."""
78
+ # Setup mocks
79
+ mock_runtime = Mock()
80
+ mock_runtime.execute.return_value = ({"test": "result"}, "run_123")
81
+ mock_runtime_class.return_value = mock_runtime
82
+
83
+ mock_process_instance = Mock()
84
+ mock_process_instance.memory_info.return_value.rss = 100 * 1024 * 1024 # 100MB
85
+ mock_process_instance.cpu_percent.return_value = 50.0
86
+ mock_process.return_value = mock_process_instance
87
+
88
+ config = {"debug": True, "max_concurrency": 2}
89
+ test_workflows = [("test", simple_workflow)]
90
+
91
+ benchmarks = comparator.benchmark_configuration(config, test_workflows)
92
+
93
+ assert isinstance(benchmarks, list)
94
+ assert len(benchmarks) == 1
95
+
96
+ benchmark = benchmarks[0]
97
+ assert isinstance(benchmark, PerformanceBenchmark)
98
+ assert benchmark.test_name == "test"
99
+ assert benchmark.success is True
100
+ assert benchmark.execution_time_ms > 0
101
+
102
+ @patch("kailash.migration.performance_comparator.LocalRuntime")
103
+ @patch("psutil.Process")
104
+ def test_compare_configurations(
105
+ self, mock_process, mock_runtime_class, comparator, simple_workflow
106
+ ):
107
+ """Test comparison between two configurations."""
108
+ # Setup mocks
109
+ mock_runtime = Mock()
110
+ mock_runtime.execute.return_value = ({"test": "result"}, "run_123")
111
+ mock_runtime_class.return_value = mock_runtime
112
+
113
+ mock_process_instance = Mock()
114
+ # Simulate different memory usage for before/after
115
+ mock_process_instance.memory_info.return_value.rss = 100 * 1024 * 1024 # 100MB
116
+ mock_process_instance.cpu_percent.return_value = 50.0
117
+ mock_process.return_value = mock_process_instance
118
+
119
+ before_config = {"debug": True, "max_concurrency": 1}
120
+ after_config = {"debug": True, "max_concurrency": 2}
121
+ test_workflows = [("test", simple_workflow)]
122
+
123
+ report = comparator.compare_configurations(
124
+ before_config, after_config, test_workflows
125
+ )
126
+
127
+ assert isinstance(report, PerformanceReport)
128
+ assert len(report.before_benchmarks) == 1
129
+ assert len(report.after_benchmarks) == 1
130
+ assert len(report.comparisons) > 0
131
+ assert isinstance(report.overall_improvement, bool)
132
+ assert isinstance(report.overall_change_percentage, float)
133
+
134
+ def test_create_comparison(self, comparator):
135
+ """Test creation of performance comparison results."""
136
+ comparison = comparator._create_comparison("execution_time", 100.0, 80.0, "ms")
137
+
138
+ assert isinstance(comparison, ComparisonResult)
139
+ assert comparison.metric_name == "execution_time"
140
+ assert comparison.before_value == 100.0
141
+ assert comparison.after_value == 80.0
142
+ assert comparison.change_absolute == -20.0
143
+ assert comparison.change_percentage == -20.0
144
+ assert comparison.improvement is True # Lower time is better
145
+ assert comparison.significance in [
146
+ "major_improvement",
147
+ "minor_improvement",
148
+ "negligible",
149
+ "minor_regression",
150
+ "major_regression",
151
+ ]
152
+
153
+ def test_assess_significance(self, comparator):
154
+ """Test significance assessment for performance changes."""
155
+ # Major improvement
156
+ assert comparator._assess_significance(-25.0) == "major_improvement"
157
+
158
+ # Minor improvement
159
+ assert comparator._assess_significance(-10.0) == "minor_improvement"
160
+
161
+ # Negligible change
162
+ assert comparator._assess_significance(2.0) == "negligible"
163
+
164
+ # Minor regression
165
+ assert comparator._assess_significance(15.0) == "minor_regression"
166
+
167
+ # Major regression
168
+ assert comparator._assess_significance(25.0) == "major_regression"
169
+
170
+ def test_generate_text_report(self, comparator):
171
+ """Test text report generation."""
172
+ # Create mock report
173
+ report = PerformanceReport(
174
+ before_benchmarks=[],
175
+ after_benchmarks=[],
176
+ overall_improvement=True,
177
+ overall_change_percentage=-10.5,
178
+ recommendations=["Test recommendation"],
179
+ )
180
+
181
+ text_report = comparator.generate_performance_report(report, "text")
182
+
183
+ assert isinstance(text_report, str)
184
+ assert len(text_report) > 0
185
+ assert "Performance Comparison Report" in text_report
186
+ assert "EXECUTIVE SUMMARY" in text_report
187
+ assert "IMPROVEMENT" in text_report
188
+ assert "-10.5%" in text_report
189
+
190
+ def test_generate_json_report(self, comparator):
191
+ """Test JSON report generation."""
192
+ report = PerformanceReport(
193
+ before_benchmarks=[],
194
+ after_benchmarks=[],
195
+ overall_improvement=False,
196
+ overall_change_percentage=5.2,
197
+ )
198
+
199
+ json_report = comparator.generate_performance_report(report, "json")
200
+
201
+ assert isinstance(json_report, str)
202
+
203
+ # Should be valid JSON
204
+ import json
205
+
206
+ data = json.loads(json_report)
207
+
208
+ assert "summary" in data
209
+ assert data["summary"]["overall_improvement"] is False
210
+ assert data["summary"]["overall_change_percentage"] == 5.2
211
+
212
+ def test_generate_markdown_report(self, comparator):
213
+ """Test markdown report generation."""
214
+ report = PerformanceReport(
215
+ before_benchmarks=[],
216
+ after_benchmarks=[],
217
+ overall_improvement=True,
218
+ overall_change_percentage=-15.0,
219
+ )
220
+
221
+ markdown_report = comparator.generate_performance_report(report, "markdown")
222
+
223
+ assert isinstance(markdown_report, str)
224
+ assert len(markdown_report) > 0
225
+ assert "# LocalRuntime Performance Comparison Report" in markdown_report
226
+ assert "## Executive Summary" in markdown_report
227
+ assert "| Metric | Value |" in markdown_report
228
+
229
+ @patch("kailash.migration.performance_comparator.LocalRuntime")
230
+ def test_benchmark_error_handling(
231
+ self, mock_runtime_class, comparator, simple_workflow
232
+ ):
233
+ """Test error handling during benchmarking."""
234
+ # Setup mock to raise an exception
235
+ mock_runtime = Mock()
236
+ mock_runtime.execute.side_effect = Exception("Test error")
237
+ mock_runtime_class.return_value = mock_runtime
238
+
239
+ config = {"debug": True}
240
+ test_workflows = [("error_test", simple_workflow)]
241
+
242
+ benchmarks = comparator.benchmark_configuration(config, test_workflows)
243
+
244
+ assert len(benchmarks) == 1
245
+ benchmark = benchmarks[0]
246
+ assert benchmark.success is False
247
+ assert benchmark.error_message == "Test error"
248
+
249
+ def test_recommendations_generation(self, comparator):
250
+ """Test generation of performance recommendations."""
251
+ # Create report with regression
252
+ report = PerformanceReport(
253
+ before_benchmarks=[],
254
+ after_benchmarks=[],
255
+ overall_improvement=False,
256
+ overall_change_percentage=15.0,
257
+ risk_assessment="medium",
258
+ )
259
+
260
+ # Add mock comparison showing execution time regression
261
+ comparison = ComparisonResult(
262
+ metric_name="execution_time",
263
+ before_value=100.0,
264
+ after_value=115.0,
265
+ change_absolute=15.0,
266
+ change_percentage=15.0,
267
+ improvement=False,
268
+ significance="minor_regression",
269
+ unit="ms",
270
+ )
271
+ report.comparisons = [comparison]
272
+
273
+ comparator._generate_recommendations(report)
274
+
275
+ assert len(report.recommendations) > 0
276
+
277
+ # Should include regression-related recommendations
278
+ recommendations_text = " ".join(report.recommendations)
279
+ assert (
280
+ "regressed" in recommendations_text.lower()
281
+ or "performance" in recommendations_text.lower()
282
+ )
283
+
284
+ def test_overall_performance_assessment(self, comparator):
285
+ """Test overall performance assessment."""
286
+ report = PerformanceReport(before_benchmarks=[], after_benchmarks=[])
287
+
288
+ # Add comparisons with mixed results
289
+ comparisons = [
290
+ ComparisonResult(
291
+ metric_name="execution_time",
292
+ before_value=100.0,
293
+ after_value=90.0,
294
+ change_absolute=-10.0,
295
+ change_percentage=-10.0,
296
+ improvement=True,
297
+ significance="minor_improvement",
298
+ unit="ms",
299
+ ),
300
+ ComparisonResult(
301
+ metric_name="memory_usage",
302
+ before_value=50.0,
303
+ after_value=55.0,
304
+ change_absolute=5.0,
305
+ change_percentage=10.0,
306
+ improvement=False,
307
+ significance="minor_regression",
308
+ unit="mb",
309
+ ),
310
+ ]
311
+ report.comparisons = comparisons
312
+
313
+ comparator._assess_overall_performance(report)
314
+
315
+ # Should calculate weighted average
316
+ assert isinstance(report.overall_change_percentage, float)
317
+ assert isinstance(report.overall_improvement, bool)
318
+ assert report.risk_assessment in ["low", "medium", "high"]
319
+
320
+ def test_save_report(self, comparator, tmp_path):
321
+ """Test saving report to file."""
322
+ report = PerformanceReport(
323
+ before_benchmarks=[],
324
+ after_benchmarks=[],
325
+ overall_improvement=True,
326
+ overall_change_percentage=-5.0,
327
+ )
328
+
329
+ file_path = tmp_path / "performance_report.json"
330
+ comparator.save_report(report, file_path, "json")
331
+
332
+ assert file_path.exists()
333
+
334
+ # Check file content
335
+ import json
336
+
337
+ with open(file_path) as f:
338
+ data = json.load(f)
339
+
340
+ assert "summary" in data
341
+ assert data["summary"]["overall_improvement"] is True
342
+
343
+
344
+ class TestPerformanceMetric:
345
+ """Test cases for PerformanceMetric dataclass."""
346
+
347
+ def test_creation(self):
348
+ """Test PerformanceMetric creation."""
349
+ metric = PerformanceMetric(name="execution_time", value=123.45, unit="ms")
350
+
351
+ assert metric.name == "execution_time"
352
+ assert metric.value == 123.45
353
+ assert metric.unit == "ms"
354
+ assert metric.timestamp is not None
355
+ assert isinstance(metric.metadata, dict)
356
+
357
+
358
+ class TestPerformanceBenchmark:
359
+ """Test cases for PerformanceBenchmark dataclass."""
360
+
361
+ def test_creation(self):
362
+ """Test PerformanceBenchmark creation."""
363
+ benchmark = PerformanceBenchmark(
364
+ test_name="test_case", configuration={"debug": True}
365
+ )
366
+
367
+ assert benchmark.test_name == "test_case"
368
+ assert benchmark.configuration == {"debug": True}
369
+ assert benchmark.execution_time_ms == 0.0
370
+ assert benchmark.memory_usage_mb == 0.0
371
+ assert benchmark.cpu_usage_percent == 0.0
372
+ assert benchmark.success is True
373
+ assert benchmark.error_message is None
374
+ assert benchmark.run_timestamp is not None
375
+ assert isinstance(benchmark.metrics, list)
376
+
377
+
378
+ class TestComparisonResult:
379
+ """Test cases for ComparisonResult dataclass."""
380
+
381
+ def test_creation(self):
382
+ """Test ComparisonResult creation."""
383
+ result = ComparisonResult(
384
+ metric_name="test_metric",
385
+ before_value=100.0,
386
+ after_value=90.0,
387
+ change_absolute=-10.0,
388
+ change_percentage=-10.0,
389
+ improvement=True,
390
+ significance="minor_improvement",
391
+ unit="ms",
392
+ )
393
+
394
+ assert result.metric_name == "test_metric"
395
+ assert result.before_value == 100.0
396
+ assert result.after_value == 90.0
397
+ assert result.change_absolute == -10.0
398
+ assert result.change_percentage == -10.0
399
+ assert result.improvement is True
400
+ assert result.significance == "minor_improvement"
401
+ assert result.unit == "ms"
402
+
403
+
404
+ class TestPerformanceReport:
405
+ """Test cases for PerformanceReport dataclass."""
406
+
407
+ def test_creation(self):
408
+ """Test PerformanceReport creation."""
409
+ before_benchmarks = [
410
+ PerformanceBenchmark("test1", {"debug": True}),
411
+ PerformanceBenchmark("test2", {"debug": True}),
412
+ ]
413
+ after_benchmarks = [
414
+ PerformanceBenchmark("test1", {"debug": False}),
415
+ PerformanceBenchmark("test2", {"debug": False}),
416
+ ]
417
+
418
+ report = PerformanceReport(
419
+ before_benchmarks=before_benchmarks, after_benchmarks=after_benchmarks
420
+ )
421
+
422
+ assert len(report.before_benchmarks) == 2
423
+ assert len(report.after_benchmarks) == 2
424
+ assert isinstance(report.comparisons, list)
425
+ assert report.overall_improvement is False
426
+ assert report.overall_change_percentage == 0.0
427
+ assert isinstance(report.recommendations, list)
428
+ assert report.risk_assessment == "low"
429
+ assert report.generated_at is not None
430
+
431
+
432
+ if __name__ == "__main__":
433
+ pytest.main([__file__])