kailash 0.9.2__py3-none-any.whl → 0.9.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,497 @@
1
+ """Compatibility reporting for conditional execution.
2
+
3
+ This module provides detailed compatibility analysis and reporting
4
+ for workflows using conditional execution.
5
+ """
6
+
7
+ import logging
8
+ from dataclasses import dataclass, field
9
+ from enum import Enum
10
+ from typing import Any, Dict, List, Optional, Set
11
+
12
+ from kailash.analysis import ConditionalBranchAnalyzer
13
+ from kailash.workflow.graph import Workflow
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ class CompatibilityLevel(Enum):
19
+ """Compatibility levels for conditional execution."""
20
+
21
+ FULLY_COMPATIBLE = "fully_compatible"
22
+ PARTIALLY_COMPATIBLE = "partially_compatible"
23
+ INCOMPATIBLE = "incompatible"
24
+
25
+
26
+ @dataclass
27
+ class PatternInfo:
28
+ """Information about a detected pattern."""
29
+
30
+ pattern_type: str
31
+ node_ids: List[str]
32
+ description: str
33
+ compatibility: CompatibilityLevel
34
+ recommendation: Optional[str] = None
35
+
36
+
37
+ @dataclass
38
+ class CompatibilityReport:
39
+ """Comprehensive compatibility report for a workflow."""
40
+
41
+ workflow_id: str
42
+ workflow_name: str
43
+ overall_compatibility: CompatibilityLevel
44
+ node_count: int
45
+ switch_count: int
46
+ detected_patterns: List[PatternInfo] = field(default_factory=list)
47
+ warnings: List[str] = field(default_factory=list)
48
+ recommendations: List[str] = field(default_factory=list)
49
+ execution_estimate: Optional[str] = None
50
+
51
+ def to_dict(self) -> Dict[str, Any]:
52
+ """Convert report to dictionary format."""
53
+ return {
54
+ "workflow_id": self.workflow_id,
55
+ "workflow_name": self.workflow_name,
56
+ "overall_compatibility": self.overall_compatibility.value,
57
+ "node_count": self.node_count,
58
+ "switch_count": self.switch_count,
59
+ "detected_patterns": [
60
+ {
61
+ "type": p.pattern_type,
62
+ "nodes": p.node_ids,
63
+ "description": p.description,
64
+ "compatibility": p.compatibility.value,
65
+ "recommendation": p.recommendation,
66
+ }
67
+ for p in self.detected_patterns
68
+ ],
69
+ "warnings": self.warnings,
70
+ "recommendations": self.recommendations,
71
+ "execution_estimate": self.execution_estimate,
72
+ }
73
+
74
+ def to_markdown(self) -> str:
75
+ """Generate markdown report."""
76
+ lines = [
77
+ "# Conditional Execution Compatibility Report",
78
+ "",
79
+ f"**Workflow**: {self.workflow_name} ({self.workflow_id})",
80
+ f"**Overall Compatibility**: {self.overall_compatibility.value.replace('_', ' ').title()}",
81
+ f"**Nodes**: {self.node_count} total, {self.switch_count} switches",
82
+ "",
83
+ ]
84
+
85
+ if self.execution_estimate:
86
+ lines.extend(
87
+ [
88
+ "## Performance Estimate",
89
+ f"{self.execution_estimate}",
90
+ "",
91
+ ]
92
+ )
93
+
94
+ if self.detected_patterns:
95
+ lines.extend(
96
+ [
97
+ "## Detected Patterns",
98
+ "",
99
+ ]
100
+ )
101
+ for pattern in self.detected_patterns:
102
+ compat_icon = (
103
+ "✅"
104
+ if pattern.compatibility == CompatibilityLevel.FULLY_COMPATIBLE
105
+ else "⚠️"
106
+ )
107
+ lines.extend(
108
+ [
109
+ f"### {compat_icon} {pattern.pattern_type}",
110
+ f"- **Nodes**: {', '.join(pattern.node_ids)}",
111
+ f"- **Description**: {pattern.description}",
112
+ f"- **Compatibility**: {pattern.compatibility.value.replace('_', ' ').title()}",
113
+ ]
114
+ )
115
+ if pattern.recommendation:
116
+ lines.append(f"- **Recommendation**: {pattern.recommendation}")
117
+ lines.append("")
118
+
119
+ if self.warnings:
120
+ lines.extend(
121
+ [
122
+ "## ⚠️ Warnings",
123
+ "",
124
+ ]
125
+ )
126
+ for warning in self.warnings:
127
+ lines.append(f"- {warning}")
128
+ lines.append("")
129
+
130
+ if self.recommendations:
131
+ lines.extend(
132
+ [
133
+ "## 💡 Recommendations",
134
+ "",
135
+ ]
136
+ )
137
+ for rec in self.recommendations:
138
+ lines.append(f"- {rec}")
139
+
140
+ return "\n".join(lines)
141
+
142
+
143
+ class CompatibilityReporter:
144
+ """Analyze and report on conditional execution compatibility."""
145
+
146
+ def __init__(self):
147
+ """Initialize compatibility reporter."""
148
+ self.analyzer = None
149
+
150
+ def analyze_workflow(self, workflow: Workflow) -> CompatibilityReport:
151
+ """Analyze workflow compatibility with conditional execution.
152
+
153
+ Args:
154
+ workflow: Workflow to analyze
155
+
156
+ Returns:
157
+ Comprehensive compatibility report
158
+ """
159
+ self.analyzer = ConditionalBranchAnalyzer(workflow)
160
+
161
+ # Initialize report
162
+ report = CompatibilityReport(
163
+ workflow_id=workflow.workflow_id,
164
+ workflow_name=workflow.name or "Unnamed Workflow",
165
+ overall_compatibility=CompatibilityLevel.FULLY_COMPATIBLE,
166
+ node_count=len(workflow.graph.nodes()),
167
+ switch_count=len(self.analyzer._find_switch_nodes()),
168
+ )
169
+
170
+ # Analyze various patterns
171
+ self._analyze_basic_switches(report)
172
+ self._analyze_cycles(workflow, report)
173
+ self._analyze_merge_nodes(workflow, report)
174
+ self._analyze_hierarchical_switches(report)
175
+ self._analyze_complex_dependencies(workflow, report)
176
+
177
+ # Determine overall compatibility
178
+ self._determine_overall_compatibility(report)
179
+
180
+ # Add performance estimate
181
+ self._add_performance_estimate(report)
182
+
183
+ # Generate recommendations
184
+ self._generate_recommendations(report)
185
+
186
+ return report
187
+
188
+ def _analyze_basic_switches(self, report: CompatibilityReport) -> None:
189
+ """Analyze basic switch patterns."""
190
+ switch_nodes = self.analyzer._find_switch_nodes()
191
+
192
+ if switch_nodes:
193
+ # Check for simple conditional routing
194
+ simple_switches = []
195
+ complex_switches = []
196
+
197
+ for switch_id in switch_nodes:
198
+ branch_map = self.analyzer._get_switch_branch_map(switch_id)
199
+ if len(branch_map) <= 2: # true/false outputs
200
+ simple_switches.append(switch_id)
201
+ else:
202
+ complex_switches.append(switch_id)
203
+
204
+ if simple_switches:
205
+ report.detected_patterns.append(
206
+ PatternInfo(
207
+ pattern_type="Simple Conditional Routing",
208
+ node_ids=simple_switches,
209
+ description="Basic true/false conditional branches",
210
+ compatibility=CompatibilityLevel.FULLY_COMPATIBLE,
211
+ )
212
+ )
213
+
214
+ if complex_switches:
215
+ report.detected_patterns.append(
216
+ PatternInfo(
217
+ pattern_type="Multi-Case Switches",
218
+ node_ids=complex_switches,
219
+ description="Switches with multiple output cases",
220
+ compatibility=CompatibilityLevel.FULLY_COMPATIBLE,
221
+ recommendation="Multi-case switches are supported and will benefit from branch pruning",
222
+ )
223
+ )
224
+
225
+ # Also check for switches configured with cases parameter
226
+ for switch_id in switch_nodes:
227
+ node_data = self.analyzer.workflow.graph.nodes[switch_id]
228
+ node_config = node_data.get("config", {})
229
+ if "cases" in node_config and node_config["cases"]:
230
+ if switch_id not in complex_switches:
231
+ report.detected_patterns.append(
232
+ PatternInfo(
233
+ pattern_type="Multi-Case Switches",
234
+ node_ids=[switch_id],
235
+ description=f"Switch with {len(node_config['cases'])} cases",
236
+ compatibility=CompatibilityLevel.FULLY_COMPATIBLE,
237
+ recommendation="Multi-case switches are supported and will benefit from branch pruning",
238
+ )
239
+ )
240
+
241
+ def _analyze_cycles(self, workflow: Workflow, report: CompatibilityReport) -> None:
242
+ """Analyze cycle patterns."""
243
+ try:
244
+ import networkx as nx
245
+
246
+ cycles = list(nx.simple_cycles(workflow.graph))
247
+
248
+ if cycles:
249
+ # Check if cycles contain switches
250
+ switch_nodes = set(self.analyzer._find_switch_nodes())
251
+ cycles_with_switches = []
252
+
253
+ for cycle in cycles:
254
+ if any(node in switch_nodes for node in cycle):
255
+ cycles_with_switches.append(cycle)
256
+
257
+ if cycles_with_switches:
258
+ report.detected_patterns.append(
259
+ PatternInfo(
260
+ pattern_type="Cycles with Conditional Routing",
261
+ node_ids=[
262
+ node for cycle in cycles_with_switches for node in cycle
263
+ ],
264
+ description="Cyclic workflows with conditional branches",
265
+ compatibility=CompatibilityLevel.PARTIALLY_COMPATIBLE,
266
+ recommendation="Conditional execution is disabled for cyclic workflows to prevent infinite loops",
267
+ )
268
+ )
269
+ report.warnings.append(
270
+ "Workflow contains cycles. Conditional execution will fall back to standard mode."
271
+ )
272
+ report.overall_compatibility = (
273
+ CompatibilityLevel.PARTIALLY_COMPATIBLE
274
+ )
275
+ else:
276
+ report.detected_patterns.append(
277
+ PatternInfo(
278
+ pattern_type="Cycles without Switches",
279
+ node_ids=[node for cycle in cycles for node in cycle],
280
+ description="Cyclic workflows without conditional routing",
281
+ compatibility=CompatibilityLevel.PARTIALLY_COMPATIBLE,
282
+ )
283
+ )
284
+
285
+ except Exception as e:
286
+ logger.warning(f"Error analyzing cycles: {e}")
287
+
288
+ def _analyze_merge_nodes(
289
+ self, workflow: Workflow, report: CompatibilityReport
290
+ ) -> None:
291
+ """Analyze merge node patterns."""
292
+ # Look for nodes with multiple incoming edges (potential merge points)
293
+ merge_candidates = []
294
+
295
+ for node_id in workflow.graph.nodes():
296
+ in_degree = workflow.graph.in_degree(node_id)
297
+ if in_degree > 1:
298
+ # Check if any incoming edges are from switches
299
+ incoming_from_switches = False
300
+ for pred in workflow.graph.predecessors(node_id):
301
+ node_data = workflow.graph.nodes[pred]
302
+ node_instance = node_data.get("node") or node_data.get("instance")
303
+ if node_instance and "Switch" in node_instance.__class__.__name__:
304
+ incoming_from_switches = True
305
+ break
306
+
307
+ if incoming_from_switches:
308
+ merge_candidates.append(node_id)
309
+
310
+ if merge_candidates:
311
+ # Check for MergeNode type
312
+ actual_merge_nodes = []
313
+ implicit_merge_nodes = []
314
+
315
+ for node_id in merge_candidates:
316
+ node_data = workflow.graph.nodes[node_id]
317
+ node_instance = node_data.get("node") or node_data.get("instance")
318
+ if node_instance and "Merge" in node_instance.__class__.__name__:
319
+ actual_merge_nodes.append(node_id)
320
+ else:
321
+ implicit_merge_nodes.append(node_id)
322
+
323
+ if actual_merge_nodes:
324
+ report.detected_patterns.append(
325
+ PatternInfo(
326
+ pattern_type="Merge Nodes with Conditional Inputs",
327
+ node_ids=actual_merge_nodes,
328
+ description="MergeNodes receiving conditional branches",
329
+ compatibility=CompatibilityLevel.FULLY_COMPATIBLE,
330
+ recommendation="MergeNodes handle conditional inputs gracefully",
331
+ )
332
+ )
333
+
334
+ if implicit_merge_nodes:
335
+ report.detected_patterns.append(
336
+ PatternInfo(
337
+ pattern_type="Implicit Merge Points",
338
+ node_ids=implicit_merge_nodes,
339
+ description="Regular nodes receiving multiple conditional inputs",
340
+ compatibility=CompatibilityLevel.PARTIALLY_COMPATIBLE,
341
+ recommendation="Consider using explicit MergeNode for better handling",
342
+ )
343
+ )
344
+ report.warnings.append(
345
+ f"Nodes {implicit_merge_nodes} receive multiple conditional inputs without explicit merge handling"
346
+ )
347
+
348
+ def _analyze_hierarchical_switches(self, report: CompatibilityReport) -> None:
349
+ """Analyze hierarchical switch patterns."""
350
+ switch_nodes = self.analyzer._find_switch_nodes()
351
+
352
+ if len(switch_nodes) > 1:
353
+ # Check for dependencies between switches
354
+ hierarchies = self.analyzer.detect_switch_hierarchies()
355
+
356
+ if hierarchies:
357
+ for hierarchy in hierarchies:
358
+ if len(hierarchy["layers"]) > 1:
359
+ all_switches = [
360
+ s for layer in hierarchy["layers"] for s in layer
361
+ ]
362
+ report.detected_patterns.append(
363
+ PatternInfo(
364
+ pattern_type="Hierarchical Switches",
365
+ node_ids=all_switches,
366
+ description=f"{len(hierarchy['layers'])} layers of dependent switches",
367
+ compatibility=CompatibilityLevel.PARTIALLY_COMPATIBLE,
368
+ recommendation="Complex hierarchies may require careful testing",
369
+ )
370
+ )
371
+
372
+ if len(hierarchy["layers"]) > 3:
373
+ report.warnings.append(
374
+ "Deep switch hierarchies (>3 levels) may impact performance"
375
+ )
376
+
377
+ def _analyze_complex_dependencies(
378
+ self, workflow: Workflow, report: CompatibilityReport
379
+ ) -> None:
380
+ """Analyze complex dependency patterns."""
381
+ # Check for nodes that depend on multiple switches
382
+ switch_nodes = set(self.analyzer._find_switch_nodes())
383
+
384
+ for node_id in workflow.graph.nodes():
385
+ if node_id in switch_nodes:
386
+ continue
387
+
388
+ # Find all upstream switches
389
+ upstream_switches = set()
390
+ for pred in workflow.graph.predecessors(node_id):
391
+ if pred in switch_nodes:
392
+ upstream_switches.add(pred)
393
+
394
+ if len(upstream_switches) > 2:
395
+ report.detected_patterns.append(
396
+ PatternInfo(
397
+ pattern_type="Complex Switch Dependencies",
398
+ node_ids=[node_id],
399
+ description=f"Node depends on {len(upstream_switches)} switches",
400
+ compatibility=CompatibilityLevel.PARTIALLY_COMPATIBLE,
401
+ recommendation="Complex dependencies are supported but may benefit from refactoring",
402
+ )
403
+ )
404
+
405
+ def _determine_overall_compatibility(self, report: CompatibilityReport) -> None:
406
+ """Determine overall compatibility level."""
407
+ if not report.detected_patterns:
408
+ return
409
+
410
+ # Check pattern compatibility levels
411
+ has_incompatible = any(
412
+ p.compatibility == CompatibilityLevel.INCOMPATIBLE
413
+ for p in report.detected_patterns
414
+ )
415
+ has_partial = any(
416
+ p.compatibility == CompatibilityLevel.PARTIALLY_COMPATIBLE
417
+ for p in report.detected_patterns
418
+ )
419
+
420
+ if has_incompatible:
421
+ report.overall_compatibility = CompatibilityLevel.INCOMPATIBLE
422
+ elif has_partial:
423
+ report.overall_compatibility = CompatibilityLevel.PARTIALLY_COMPATIBLE
424
+ else:
425
+ report.overall_compatibility = CompatibilityLevel.FULLY_COMPATIBLE
426
+
427
+ def _add_performance_estimate(self, report: CompatibilityReport) -> None:
428
+ """Add performance improvement estimate."""
429
+ if report.switch_count == 0:
430
+ report.execution_estimate = (
431
+ "No conditional branches detected. No performance improvement expected."
432
+ )
433
+ return
434
+
435
+ # Estimate based on switch patterns
436
+ total_branches = sum(
437
+ len(self.analyzer._get_switch_branch_map(switch_id))
438
+ for switch_id in self.analyzer._find_switch_nodes()
439
+ )
440
+
441
+ avg_branches_per_switch = (
442
+ total_branches / report.switch_count if report.switch_count > 0 else 0
443
+ )
444
+
445
+ # Rough estimate based on branching factor
446
+ if avg_branches_per_switch <= 2:
447
+ min_improvement = 20
448
+ max_improvement = 30
449
+ elif avg_branches_per_switch <= 4:
450
+ min_improvement = 30
451
+ max_improvement = 40
452
+ else:
453
+ min_improvement = 40
454
+ max_improvement = 50
455
+
456
+ # Adjust for complexity
457
+ if report.overall_compatibility == CompatibilityLevel.PARTIALLY_COMPATIBLE:
458
+ min_improvement *= 0.7
459
+ max_improvement *= 0.7
460
+
461
+ report.execution_estimate = (
462
+ f"Expected performance improvement: {min_improvement:.0f}-{max_improvement:.0f}% "
463
+ f"reduction in execution time with conditional execution enabled."
464
+ )
465
+
466
+ def _generate_recommendations(self, report: CompatibilityReport) -> None:
467
+ """Generate actionable recommendations."""
468
+ if report.overall_compatibility == CompatibilityLevel.FULLY_COMPATIBLE:
469
+ report.recommendations.append(
470
+ "Workflow is fully compatible with conditional execution. "
471
+ "Enable with: LocalRuntime(conditional_execution='skip_branches')"
472
+ )
473
+
474
+ if report.warnings:
475
+ report.recommendations.append(
476
+ "Review warnings above and consider workflow refactoring for optimal performance"
477
+ )
478
+
479
+ # Check for optimization opportunities
480
+ simple_switch_count = sum(
481
+ 1
482
+ for p in report.detected_patterns
483
+ if p.pattern_type == "Simple Conditional Routing"
484
+ )
485
+
486
+ if simple_switch_count > 5:
487
+ report.recommendations.append(
488
+ f"With {simple_switch_count} conditional branches, consider consolidating "
489
+ "related logic to reduce workflow complexity"
490
+ )
491
+
492
+ # Performance testing recommendation
493
+ if report.switch_count > 0:
494
+ report.recommendations.append(
495
+ "Run performance benchmarks to validate improvement estimates: "
496
+ "python -m kailash.tools.benchmark_conditional <workflow_file>"
497
+ )