kailash 0.9.2__py3-none-any.whl → 0.9.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -199,7 +199,7 @@ class ParallelRuntime:
199
199
  )
200
200
 
201
201
  # Process nodes until all are complete
202
- while ready_nodes or pending_nodes:
202
+ while ready_nodes or pending_nodes or node_tasks:
203
203
  # Schedule ready nodes up to max_workers limit
204
204
  while ready_nodes and len(node_tasks) < self.max_workers:
205
205
  node_id = ready_nodes.popleft()
@@ -0,0 +1,215 @@
1
+ """Performance monitoring for conditional execution.
2
+
3
+ This module provides performance tracking and automatic fallback capabilities
4
+ for the conditional execution feature.
5
+ """
6
+
7
+ import logging
8
+ import time
9
+ from collections import deque
10
+ from dataclasses import dataclass, field
11
+ from typing import Dict, Optional, Tuple
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ @dataclass
17
+ class ExecutionMetrics:
18
+ """Metrics for a single execution."""
19
+
20
+ execution_time: float
21
+ node_count: int
22
+ skipped_nodes: int
23
+ memory_usage: Optional[float] = None
24
+ execution_mode: str = "route_data"
25
+
26
+ @property
27
+ def nodes_per_second(self) -> float:
28
+ """Calculate execution rate."""
29
+ if self.execution_time > 0:
30
+ return self.node_count / self.execution_time
31
+ return 0.0
32
+
33
+ @property
34
+ def skip_ratio(self) -> float:
35
+ """Calculate percentage of nodes skipped."""
36
+ total = self.node_count + self.skipped_nodes
37
+ if total > 0:
38
+ return self.skipped_nodes / total
39
+ return 0.0
40
+
41
+
42
+ class PerformanceMonitor:
43
+ """Monitor performance and make mode switching decisions.
44
+
45
+ Tracks execution performance of conditional vs standard execution
46
+ and automatically switches modes based on performance thresholds.
47
+ """
48
+
49
+ def __init__(
50
+ self,
51
+ performance_threshold: float = 0.9, # Switch if conditional is 90% slower
52
+ sample_size: int = 10,
53
+ min_samples: int = 3,
54
+ ):
55
+ """Initialize performance monitor.
56
+
57
+ Args:
58
+ performance_threshold: Ratio threshold for switching modes (0.9 = 10% slower triggers switch)
59
+ sample_size: Number of recent executions to track
60
+ min_samples: Minimum samples before making switching decisions
61
+ """
62
+ self.performance_threshold = performance_threshold
63
+ self.sample_size = sample_size
64
+ self.min_samples = min_samples
65
+
66
+ # Track metrics for each mode
67
+ self.metrics: Dict[str, deque] = {
68
+ "route_data": deque(maxlen=sample_size),
69
+ "skip_branches": deque(maxlen=sample_size),
70
+ }
71
+
72
+ # Performance statistics
73
+ self.mode_performance: Dict[str, float] = {
74
+ "route_data": 0.0,
75
+ "skip_branches": 0.0,
76
+ }
77
+
78
+ # Current recommendation
79
+ self.recommended_mode = "route_data" # Safe default
80
+ self._last_evaluation_time = 0.0
81
+ self._evaluation_interval = 60.0 # Re-evaluate every minute
82
+
83
+ def record_execution(self, metrics: ExecutionMetrics) -> None:
84
+ """Record execution metrics.
85
+
86
+ Args:
87
+ metrics: Execution metrics to record
88
+ """
89
+ mode = metrics.execution_mode
90
+ if mode in self.metrics:
91
+ self.metrics[mode].append(metrics)
92
+ logger.debug(
93
+ f"Recorded {mode} execution: {metrics.execution_time:.3f}s, "
94
+ f"{metrics.node_count} nodes, {metrics.skipped_nodes} skipped"
95
+ )
96
+
97
+ def should_switch_mode(self, current_mode: str) -> Tuple[bool, str, str]:
98
+ """Determine if mode should be switched based on performance.
99
+
100
+ Args:
101
+ current_mode: Currently active execution mode
102
+
103
+ Returns:
104
+ Tuple of (should_switch, recommended_mode, reason)
105
+ """
106
+ # Check if enough time has passed since last evaluation
107
+ current_time = time.time()
108
+ if current_time - self._last_evaluation_time < self._evaluation_interval:
109
+ return False, current_mode, "Too soon since last evaluation"
110
+
111
+ self._last_evaluation_time = current_time
112
+
113
+ # Calculate average performance for each mode
114
+ route_data_avg = self._calculate_average_performance("route_data")
115
+ skip_branches_avg = self._calculate_average_performance("skip_branches")
116
+
117
+ # Not enough data to make decision
118
+ if route_data_avg is None or skip_branches_avg is None:
119
+ return False, current_mode, "Insufficient performance data"
120
+
121
+ # Update performance statistics
122
+ self.mode_performance["route_data"] = route_data_avg
123
+ self.mode_performance["skip_branches"] = skip_branches_avg
124
+
125
+ # Determine recommendation based on performance
126
+ if skip_branches_avg < route_data_avg * self.performance_threshold:
127
+ # skip_branches is significantly faster
128
+ self.recommended_mode = "skip_branches"
129
+ if current_mode != "skip_branches":
130
+ reason = (
131
+ f"skip_branches mode is {(1 - skip_branches_avg/route_data_avg)*100:.1f}% faster "
132
+ f"({skip_branches_avg:.3f}s vs {route_data_avg:.3f}s)"
133
+ )
134
+ return True, "skip_branches", reason
135
+ else:
136
+ # route_data is faster or difference is negligible
137
+ self.recommended_mode = "route_data"
138
+ if current_mode != "route_data":
139
+ reason = (
140
+ f"route_data mode is faster or difference negligible "
141
+ f"({route_data_avg:.3f}s vs {skip_branches_avg:.3f}s)"
142
+ )
143
+ return True, "route_data", reason
144
+
145
+ return False, current_mode, "Current mode is optimal"
146
+
147
+ def _calculate_average_performance(self, mode: str) -> Optional[float]:
148
+ """Calculate average execution time for a mode.
149
+
150
+ Args:
151
+ mode: Execution mode to analyze
152
+
153
+ Returns:
154
+ Average execution time per node, or None if insufficient data
155
+ """
156
+ if mode not in self.metrics:
157
+ return None
158
+
159
+ metrics_list = list(self.metrics[mode])
160
+ if len(metrics_list) < self.min_samples:
161
+ return None
162
+
163
+ # Calculate average time per node
164
+ total_time = sum(m.execution_time for m in metrics_list)
165
+ total_nodes = sum(m.node_count for m in metrics_list)
166
+
167
+ if total_nodes > 0:
168
+ return total_time / total_nodes
169
+ return None
170
+
171
+ def get_performance_report(self) -> Dict[str, any]:
172
+ """Generate performance report.
173
+
174
+ Returns:
175
+ Dictionary with performance statistics
176
+ """
177
+ report = {
178
+ "recommended_mode": self.recommended_mode,
179
+ "mode_performance": self.mode_performance.copy(),
180
+ "sample_counts": {
181
+ mode: len(metrics) for mode, metrics in self.metrics.items()
182
+ },
183
+ "performance_threshold": self.performance_threshold,
184
+ }
185
+
186
+ # Add detailed metrics if available
187
+ for mode, metrics_deque in self.metrics.items():
188
+ if metrics_deque:
189
+ metrics_list = list(metrics_deque)
190
+ report[f"{mode}_stats"] = {
191
+ "avg_execution_time": sum(m.execution_time for m in metrics_list)
192
+ / len(metrics_list),
193
+ "avg_nodes": sum(m.node_count for m in metrics_list)
194
+ / len(metrics_list),
195
+ "avg_skip_ratio": sum(m.skip_ratio for m in metrics_list)
196
+ / len(metrics_list),
197
+ "total_executions": len(metrics_list),
198
+ }
199
+
200
+ return report
201
+
202
+ def clear_metrics(self, mode: Optional[str] = None) -> None:
203
+ """Clear performance metrics.
204
+
205
+ Args:
206
+ mode: Specific mode to clear, or None to clear all
207
+ """
208
+ if mode:
209
+ if mode in self.metrics:
210
+ self.metrics[mode].clear()
211
+ else:
212
+ for m in self.metrics.values():
213
+ m.clear()
214
+
215
+ logger.info(f"Cleared performance metrics for: {mode or 'all modes'}")
@@ -207,6 +207,7 @@ class ImportPathValidator:
207
207
  - Module exists as sibling to current file
208
208
  - Module is not a known SDK module
209
209
  - Module is not a standard library module
210
+ - Module is not a legitimate top-level package
210
211
  """
211
212
  # Skip if it's a known SDK module
212
213
  if module_name in self.sdk_modules:
@@ -228,6 +229,12 @@ class ImportPathValidator:
228
229
  ]:
229
230
  return False
230
231
 
232
+ # Skip common top-level package names that are meant for absolute imports
233
+ # These are legitimate when used as project structure roots
234
+ top_level_packages = ["src", "lib", "app", "pkg"]
235
+ if module_name in top_level_packages:
236
+ return False
237
+
231
238
  # Check if module exists as sibling
232
239
  parent_dir = file_path.parent
233
240
  possible_module = parent_dir / module_name