crackerjack 0.33.0__py3-none-any.whl → 0.33.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

Files changed (198) hide show
  1. crackerjack/__main__.py +1350 -34
  2. crackerjack/adapters/__init__.py +17 -0
  3. crackerjack/adapters/lsp_client.py +358 -0
  4. crackerjack/adapters/rust_tool_adapter.py +194 -0
  5. crackerjack/adapters/rust_tool_manager.py +193 -0
  6. crackerjack/adapters/skylos_adapter.py +231 -0
  7. crackerjack/adapters/zuban_adapter.py +560 -0
  8. crackerjack/agents/base.py +7 -3
  9. crackerjack/agents/coordinator.py +271 -33
  10. crackerjack/agents/documentation_agent.py +9 -15
  11. crackerjack/agents/dry_agent.py +3 -15
  12. crackerjack/agents/formatting_agent.py +1 -1
  13. crackerjack/agents/import_optimization_agent.py +36 -180
  14. crackerjack/agents/performance_agent.py +17 -98
  15. crackerjack/agents/performance_helpers.py +7 -31
  16. crackerjack/agents/proactive_agent.py +1 -3
  17. crackerjack/agents/refactoring_agent.py +16 -85
  18. crackerjack/agents/refactoring_helpers.py +7 -42
  19. crackerjack/agents/security_agent.py +9 -48
  20. crackerjack/agents/test_creation_agent.py +356 -513
  21. crackerjack/agents/test_specialist_agent.py +0 -4
  22. crackerjack/api.py +6 -25
  23. crackerjack/cli/cache_handlers.py +204 -0
  24. crackerjack/cli/cache_handlers_enhanced.py +683 -0
  25. crackerjack/cli/facade.py +100 -0
  26. crackerjack/cli/handlers.py +224 -9
  27. crackerjack/cli/interactive.py +6 -4
  28. crackerjack/cli/options.py +642 -55
  29. crackerjack/cli/utils.py +2 -1
  30. crackerjack/code_cleaner.py +58 -117
  31. crackerjack/config/global_lock_config.py +8 -48
  32. crackerjack/config/hooks.py +53 -62
  33. crackerjack/core/async_workflow_orchestrator.py +24 -34
  34. crackerjack/core/autofix_coordinator.py +3 -17
  35. crackerjack/core/enhanced_container.py +4 -13
  36. crackerjack/core/file_lifecycle.py +12 -89
  37. crackerjack/core/performance.py +2 -2
  38. crackerjack/core/performance_monitor.py +15 -55
  39. crackerjack/core/phase_coordinator.py +104 -204
  40. crackerjack/core/resource_manager.py +14 -90
  41. crackerjack/core/service_watchdog.py +62 -95
  42. crackerjack/core/session_coordinator.py +149 -0
  43. crackerjack/core/timeout_manager.py +14 -72
  44. crackerjack/core/websocket_lifecycle.py +13 -78
  45. crackerjack/core/workflow_orchestrator.py +171 -174
  46. crackerjack/docs/INDEX.md +11 -0
  47. crackerjack/docs/generated/api/API_REFERENCE.md +10895 -0
  48. crackerjack/docs/generated/api/CLI_REFERENCE.md +109 -0
  49. crackerjack/docs/generated/api/CROSS_REFERENCES.md +1755 -0
  50. crackerjack/docs/generated/api/PROTOCOLS.md +3 -0
  51. crackerjack/docs/generated/api/SERVICES.md +1252 -0
  52. crackerjack/documentation/__init__.py +31 -0
  53. crackerjack/documentation/ai_templates.py +756 -0
  54. crackerjack/documentation/dual_output_generator.py +765 -0
  55. crackerjack/documentation/mkdocs_integration.py +518 -0
  56. crackerjack/documentation/reference_generator.py +977 -0
  57. crackerjack/dynamic_config.py +55 -50
  58. crackerjack/executors/async_hook_executor.py +10 -15
  59. crackerjack/executors/cached_hook_executor.py +117 -43
  60. crackerjack/executors/hook_executor.py +8 -34
  61. crackerjack/executors/hook_lock_manager.py +26 -183
  62. crackerjack/executors/individual_hook_executor.py +13 -11
  63. crackerjack/executors/lsp_aware_hook_executor.py +270 -0
  64. crackerjack/executors/tool_proxy.py +417 -0
  65. crackerjack/hooks/lsp_hook.py +79 -0
  66. crackerjack/intelligence/adaptive_learning.py +25 -10
  67. crackerjack/intelligence/agent_orchestrator.py +2 -5
  68. crackerjack/intelligence/agent_registry.py +34 -24
  69. crackerjack/intelligence/agent_selector.py +5 -7
  70. crackerjack/interactive.py +17 -6
  71. crackerjack/managers/async_hook_manager.py +0 -1
  72. crackerjack/managers/hook_manager.py +79 -1
  73. crackerjack/managers/publish_manager.py +44 -8
  74. crackerjack/managers/test_command_builder.py +1 -15
  75. crackerjack/managers/test_executor.py +1 -3
  76. crackerjack/managers/test_manager.py +98 -7
  77. crackerjack/managers/test_manager_backup.py +10 -9
  78. crackerjack/mcp/cache.py +2 -2
  79. crackerjack/mcp/client_runner.py +1 -1
  80. crackerjack/mcp/context.py +191 -68
  81. crackerjack/mcp/dashboard.py +7 -5
  82. crackerjack/mcp/enhanced_progress_monitor.py +31 -28
  83. crackerjack/mcp/file_monitor.py +30 -23
  84. crackerjack/mcp/progress_components.py +31 -21
  85. crackerjack/mcp/progress_monitor.py +50 -53
  86. crackerjack/mcp/rate_limiter.py +6 -6
  87. crackerjack/mcp/server_core.py +17 -16
  88. crackerjack/mcp/service_watchdog.py +2 -1
  89. crackerjack/mcp/state.py +4 -7
  90. crackerjack/mcp/task_manager.py +11 -9
  91. crackerjack/mcp/tools/core_tools.py +173 -32
  92. crackerjack/mcp/tools/error_analyzer.py +3 -2
  93. crackerjack/mcp/tools/execution_tools.py +8 -10
  94. crackerjack/mcp/tools/execution_tools_backup.py +42 -30
  95. crackerjack/mcp/tools/intelligence_tool_registry.py +7 -5
  96. crackerjack/mcp/tools/intelligence_tools.py +5 -2
  97. crackerjack/mcp/tools/monitoring_tools.py +33 -70
  98. crackerjack/mcp/tools/proactive_tools.py +24 -11
  99. crackerjack/mcp/tools/progress_tools.py +5 -8
  100. crackerjack/mcp/tools/utility_tools.py +20 -14
  101. crackerjack/mcp/tools/workflow_executor.py +62 -40
  102. crackerjack/mcp/websocket/app.py +8 -0
  103. crackerjack/mcp/websocket/endpoints.py +352 -357
  104. crackerjack/mcp/websocket/jobs.py +40 -57
  105. crackerjack/mcp/websocket/monitoring_endpoints.py +2935 -0
  106. crackerjack/mcp/websocket/server.py +7 -25
  107. crackerjack/mcp/websocket/websocket_handler.py +6 -17
  108. crackerjack/mixins/__init__.py +0 -2
  109. crackerjack/mixins/error_handling.py +1 -70
  110. crackerjack/models/config.py +12 -1
  111. crackerjack/models/config_adapter.py +49 -1
  112. crackerjack/models/protocols.py +122 -122
  113. crackerjack/models/resource_protocols.py +55 -210
  114. crackerjack/monitoring/ai_agent_watchdog.py +13 -13
  115. crackerjack/monitoring/metrics_collector.py +426 -0
  116. crackerjack/monitoring/regression_prevention.py +8 -8
  117. crackerjack/monitoring/websocket_server.py +643 -0
  118. crackerjack/orchestration/advanced_orchestrator.py +11 -6
  119. crackerjack/orchestration/coverage_improvement.py +3 -3
  120. crackerjack/orchestration/execution_strategies.py +26 -6
  121. crackerjack/orchestration/test_progress_streamer.py +8 -5
  122. crackerjack/plugins/base.py +2 -2
  123. crackerjack/plugins/hooks.py +7 -0
  124. crackerjack/plugins/managers.py +11 -8
  125. crackerjack/security/__init__.py +0 -1
  126. crackerjack/security/audit.py +6 -35
  127. crackerjack/services/anomaly_detector.py +392 -0
  128. crackerjack/services/api_extractor.py +615 -0
  129. crackerjack/services/backup_service.py +2 -2
  130. crackerjack/services/bounded_status_operations.py +15 -152
  131. crackerjack/services/cache.py +127 -1
  132. crackerjack/services/changelog_automation.py +395 -0
  133. crackerjack/services/config.py +15 -9
  134. crackerjack/services/config_merge.py +19 -80
  135. crackerjack/services/config_template.py +506 -0
  136. crackerjack/services/contextual_ai_assistant.py +48 -22
  137. crackerjack/services/coverage_badge_service.py +171 -0
  138. crackerjack/services/coverage_ratchet.py +27 -25
  139. crackerjack/services/debug.py +3 -3
  140. crackerjack/services/dependency_analyzer.py +460 -0
  141. crackerjack/services/dependency_monitor.py +14 -11
  142. crackerjack/services/documentation_generator.py +491 -0
  143. crackerjack/services/documentation_service.py +675 -0
  144. crackerjack/services/enhanced_filesystem.py +6 -5
  145. crackerjack/services/enterprise_optimizer.py +865 -0
  146. crackerjack/services/error_pattern_analyzer.py +676 -0
  147. crackerjack/services/file_hasher.py +1 -1
  148. crackerjack/services/git.py +8 -25
  149. crackerjack/services/health_metrics.py +10 -8
  150. crackerjack/services/heatmap_generator.py +735 -0
  151. crackerjack/services/initialization.py +11 -30
  152. crackerjack/services/input_validator.py +5 -97
  153. crackerjack/services/intelligent_commit.py +327 -0
  154. crackerjack/services/log_manager.py +15 -12
  155. crackerjack/services/logging.py +4 -3
  156. crackerjack/services/lsp_client.py +628 -0
  157. crackerjack/services/memory_optimizer.py +19 -87
  158. crackerjack/services/metrics.py +42 -33
  159. crackerjack/services/parallel_executor.py +9 -67
  160. crackerjack/services/pattern_cache.py +1 -1
  161. crackerjack/services/pattern_detector.py +6 -6
  162. crackerjack/services/performance_benchmarks.py +18 -59
  163. crackerjack/services/performance_cache.py +20 -81
  164. crackerjack/services/performance_monitor.py +27 -95
  165. crackerjack/services/predictive_analytics.py +510 -0
  166. crackerjack/services/quality_baseline.py +234 -0
  167. crackerjack/services/quality_baseline_enhanced.py +646 -0
  168. crackerjack/services/quality_intelligence.py +785 -0
  169. crackerjack/services/regex_patterns.py +618 -524
  170. crackerjack/services/regex_utils.py +43 -123
  171. crackerjack/services/secure_path_utils.py +5 -164
  172. crackerjack/services/secure_status_formatter.py +30 -141
  173. crackerjack/services/secure_subprocess.py +11 -92
  174. crackerjack/services/security.py +9 -41
  175. crackerjack/services/security_logger.py +12 -24
  176. crackerjack/services/server_manager.py +124 -16
  177. crackerjack/services/status_authentication.py +16 -159
  178. crackerjack/services/status_security_manager.py +4 -131
  179. crackerjack/services/thread_safe_status_collector.py +19 -125
  180. crackerjack/services/unified_config.py +21 -13
  181. crackerjack/services/validation_rate_limiter.py +5 -54
  182. crackerjack/services/version_analyzer.py +459 -0
  183. crackerjack/services/version_checker.py +1 -1
  184. crackerjack/services/websocket_resource_limiter.py +10 -144
  185. crackerjack/services/zuban_lsp_service.py +390 -0
  186. crackerjack/slash_commands/__init__.py +2 -7
  187. crackerjack/slash_commands/run.md +2 -2
  188. crackerjack/tools/validate_input_validator_patterns.py +14 -40
  189. crackerjack/tools/validate_regex_patterns.py +19 -48
  190. {crackerjack-0.33.0.dist-info → crackerjack-0.33.2.dist-info}/METADATA +196 -25
  191. crackerjack-0.33.2.dist-info/RECORD +229 -0
  192. crackerjack/CLAUDE.md +0 -207
  193. crackerjack/RULES.md +0 -380
  194. crackerjack/py313.py +0 -234
  195. crackerjack-0.33.0.dist-info/RECORD +0 -187
  196. {crackerjack-0.33.0.dist-info → crackerjack-0.33.2.dist-info}/WHEEL +0 -0
  197. {crackerjack-0.33.0.dist-info → crackerjack-0.33.2.dist-info}/entry_points.txt +0 -0
  198. {crackerjack-0.33.0.dist-info → crackerjack-0.33.2.dist-info}/licenses/LICENSE +0 -0
@@ -1,9 +1,3 @@
1
- """Performance monitoring and profiling service for crackerjack workflows.
2
-
3
- This module provides comprehensive performance tracking, benchmarking,
4
- and analysis capabilities for the workflow execution system.
5
- """
6
-
7
1
  import json
8
2
  import typing as t
9
3
  from collections import defaultdict, deque
@@ -20,19 +14,15 @@ from crackerjack.services.performance_cache import get_performance_cache
20
14
 
21
15
  @dataclass
22
16
  class PerformanceMetric:
23
- """Individual performance metric."""
24
-
25
17
  name: str
26
18
  value: float
27
19
  unit: str
28
20
  timestamp: datetime = field(default_factory=datetime.now)
29
- metadata: dict[str, Any] = field(default_factory=dict)
21
+ metadata: dict[str, Any] = field(default_factory=dict[str, t.Any])
30
22
 
31
23
 
32
24
  @dataclass
33
25
  class PhasePerformance:
34
- """Performance data for a workflow phase."""
35
-
36
26
  phase_name: str
37
27
  start_time: datetime
38
28
  end_time: datetime | None = None
@@ -48,15 +38,12 @@ class PhasePerformance:
48
38
  metrics: list[PerformanceMetric] = field(default_factory=list)
49
39
 
50
40
  def finalize(self, end_time: datetime | None = None) -> None:
51
- """Finalize phase timing."""
52
41
  self.end_time = end_time or datetime.now()
53
42
  self.duration_seconds = (self.end_time - self.start_time).total_seconds()
54
43
 
55
44
 
56
45
  @dataclass
57
46
  class WorkflowPerformance:
58
- """Complete workflow performance data."""
59
-
60
47
  workflow_id: str
61
48
  start_time: datetime
62
49
  end_time: datetime | None = None
@@ -66,27 +53,20 @@ class WorkflowPerformance:
66
53
  performance_score: float = 0.0
67
54
 
68
55
  def add_phase(self, phase: PhasePerformance) -> None:
69
- """Add phase performance data."""
70
56
  self.phases.append(phase)
71
57
 
72
58
  def finalize(self, success: bool = True) -> None:
73
- """Finalize workflow timing and calculate performance score."""
74
59
  self.end_time = datetime.now()
75
60
  self.total_duration_seconds = (self.end_time - self.start_time).total_seconds()
76
61
  self.overall_success = success
77
62
  self.performance_score = self._calculate_performance_score()
78
63
 
79
64
  def _calculate_performance_score(self) -> float:
80
- """Calculate overall performance score (0-100)."""
81
65
  if not self.phases:
82
66
  return 0.0
83
67
 
84
- # Base score from duration (faster = higher score)
85
- duration_score = max(
86
- 0, 100 - (self.total_duration_seconds / 10)
87
- ) # Penalize >10s
68
+ duration_score = max(0, 100 - (self.total_duration_seconds / 10))
88
69
 
89
- # Cache efficiency score
90
70
  total_hits = sum(p.cache_hits for p in self.phases)
91
71
  total_misses = sum(p.cache_misses for p in self.phases)
92
72
  cache_ratio = (
@@ -94,9 +74,8 @@ class WorkflowPerformance:
94
74
  if total_hits + total_misses > 0
95
75
  else 0
96
76
  )
97
- cache_score = cache_ratio * 20 # Max 20 points for cache efficiency
77
+ cache_score = cache_ratio * 20
98
78
 
99
- # Parallelization score
100
79
  total_parallel = sum(p.parallel_operations for p in self.phases)
101
80
  total_sequential = sum(p.sequential_operations for p in self.phases)
102
81
  parallel_ratio = (
@@ -104,13 +83,11 @@ class WorkflowPerformance:
104
83
  if total_parallel + total_sequential > 0
105
84
  else 0
106
85
  )
107
- parallel_score = parallel_ratio * 15 # Max 15 points for parallelization
86
+ parallel_score = parallel_ratio * 15
108
87
 
109
- # Memory efficiency score (lower memory usage = higher score)
110
88
  max_memory = max((p.memory_peak_mb for p in self.phases), default=0)
111
- memory_score = max(0, 15 - (max_memory / 50)) # Penalize >50MB usage
89
+ memory_score = max(0, 15 - (max_memory / 50))
112
90
 
113
- # Success bonus
114
91
  success_score = 10 if self.overall_success else 0
115
92
 
116
93
  return min(
@@ -125,8 +102,6 @@ class WorkflowPerformance:
125
102
 
126
103
  @dataclass
127
104
  class PerformanceBenchmark:
128
- """Performance benchmark data."""
129
-
130
105
  operation_name: str
131
106
  baseline_duration_seconds: float
132
107
  current_duration_seconds: float
@@ -134,7 +109,6 @@ class PerformanceBenchmark:
134
109
  regression: bool = False
135
110
 
136
111
  def __post_init__(self) -> None:
137
- """Calculate improvement metrics."""
138
112
  if self.baseline_duration_seconds > 0:
139
113
  self.improvement_percentage = (
140
114
  (self.baseline_duration_seconds - self.current_duration_seconds)
@@ -145,8 +119,6 @@ class PerformanceBenchmark:
145
119
 
146
120
 
147
121
  class PerformanceMonitor:
148
- """Central performance monitoring service."""
149
-
150
122
  def __init__(
151
123
  self,
152
124
  data_retention_days: int = 30,
@@ -159,25 +131,22 @@ class PerformanceMonitor:
159
131
  self._initialize_thresholds()
160
132
 
161
133
  def _initialize_data_structures(self, history_size: int) -> None:
162
- """Initialize performance data structures."""
163
134
  self._active_workflows: dict[str, WorkflowPerformance] = {}
164
135
  self._active_phases: dict[str, PhasePerformance] = {}
165
136
  self._completed_workflows: deque[WorkflowPerformance] = deque(
166
137
  maxlen=history_size
167
138
  )
168
139
  self._benchmarks: dict[str, deque[float]] = defaultdict(
169
- lambda: deque(maxlen=history_size)
140
+ lambda: deque(maxlen=history_size) # type: ignore[arg-type]
170
141
  )
171
142
 
172
143
  def _initialize_services(self) -> None:
173
- """Initialize external services and utilities."""
174
144
  self._lock = Lock()
175
145
  self._logger = get_logger("crackerjack.performance_monitor")
176
146
  self._memory_optimizer = MemoryOptimizer.get_instance()
177
147
  self._cache = get_performance_cache()
178
148
 
179
149
  def _initialize_thresholds(self) -> None:
180
- """Initialize performance warning thresholds."""
181
150
  self._warning_thresholds = {
182
151
  "duration_seconds": 30.0,
183
152
  "memory_mb": 100.0,
@@ -185,7 +154,6 @@ class PerformanceMonitor:
185
154
  }
186
155
 
187
156
  def start_workflow(self, workflow_id: str) -> None:
188
- """Start monitoring a workflow."""
189
157
  with self._lock:
190
158
  if workflow_id in self._active_workflows:
191
159
  self._logger.warning(f"Workflow {workflow_id} already being monitored")
@@ -199,13 +167,11 @@ class PerformanceMonitor:
199
167
  self._active_workflows[workflow_id] = workflow
200
168
  self._logger.debug(f"Started monitoring workflow: {workflow_id}")
201
169
 
202
- # Start memory profiling
203
170
  self._memory_optimizer.start_profiling()
204
171
 
205
172
  def end_workflow(
206
173
  self, workflow_id: str, success: bool = True
207
174
  ) -> WorkflowPerformance:
208
- """End workflow monitoring and return performance data."""
209
175
  with self._lock:
210
176
  if workflow_id not in self._active_workflows:
211
177
  self._logger.warning(f"Workflow {workflow_id} not found for ending")
@@ -216,31 +182,27 @@ class PerformanceMonitor:
216
182
  workflow = self._active_workflows.pop(workflow_id)
217
183
  workflow.finalize(success)
218
184
 
219
- # Add to completed workflows for analysis
220
185
  self._completed_workflows.append(workflow)
221
186
 
222
187
  self._logger.info(
223
188
  f"Completed workflow {workflow_id}: "
224
- f"{workflow.total_duration_seconds:.2f}s, "
225
- f"score: {workflow.performance_score:.1f}, "
189
+ f"{workflow.total_duration_seconds: .2f}s, "
190
+ f"score: {workflow.performance_score: .1f}, "
226
191
  f"phases: {len(workflow.phases)}"
227
192
  )
228
193
 
229
- # Check for performance warnings
230
194
  self._check_performance_warnings(workflow)
231
195
 
232
196
  return workflow
233
197
 
234
198
  def start_phase(self, workflow_id: str, phase_name: str) -> None:
235
- """Start monitoring a workflow phase."""
236
- phase_key = f"{workflow_id}:{phase_name}"
199
+ phase_key = f"{workflow_id}: {phase_name}"
237
200
 
238
201
  with self._lock:
239
202
  if phase_key in self._active_phases:
240
203
  self._logger.warning(f"Phase {phase_key} already being monitored")
241
204
  return
242
205
 
243
- # Record memory checkpoint
244
206
  memory_mb = self._memory_optimizer.record_checkpoint(f"{phase_name}_start")
245
207
 
246
208
  phase = PhasePerformance(
@@ -255,8 +217,7 @@ class PerformanceMonitor:
255
217
  def end_phase(
256
218
  self, workflow_id: str, phase_name: str, success: bool = True
257
219
  ) -> PhasePerformance:
258
- """End phase monitoring and attach to workflow."""
259
- phase_key = f"{workflow_id}:{phase_name}"
220
+ phase_key = f"{workflow_id}: {phase_name}"
260
221
 
261
222
  with self._lock:
262
223
  if phase_key not in self._active_phases:
@@ -268,24 +229,21 @@ class PerformanceMonitor:
268
229
  phase = self._active_phases.pop(phase_key)
269
230
  phase.success = success
270
231
 
271
- # Record final memory usage
272
232
  phase.memory_end_mb = self._memory_optimizer.record_checkpoint(
273
233
  f"{phase_name}_end"
274
234
  )
275
235
 
276
- # Get cache statistics
277
236
  cache_stats = self._cache.get_stats()
278
237
  phase.cache_hits = cache_stats.hits
279
238
  phase.cache_misses = cache_stats.misses
280
239
 
281
240
  phase.finalize()
282
241
 
283
- # Add to workflow if it exists
284
242
  if workflow_id in self._active_workflows:
285
243
  self._active_workflows[workflow_id].add_phase(phase)
286
244
 
287
245
  self._logger.debug(
288
- f"Completed phase {phase_key}: {phase.duration_seconds:.2f}s"
246
+ f"Completed phase {phase_key}: {phase.duration_seconds: .2f}s"
289
247
  )
290
248
 
291
249
  return phase
@@ -299,7 +257,6 @@ class PerformanceMonitor:
299
257
  unit: str = "",
300
258
  metadata: dict[str, t.Any] | None = None,
301
259
  ) -> None:
302
- """Record a performance metric."""
303
260
  metric = PerformanceMetric(
304
261
  name=metric_name,
305
262
  value=value,
@@ -307,7 +264,7 @@ class PerformanceMonitor:
307
264
  metadata=metadata or {},
308
265
  )
309
266
 
310
- phase_key = f"{workflow_id}:{phase_name}"
267
+ phase_key = f"{workflow_id}: {phase_name}"
311
268
 
312
269
  with self._lock:
313
270
  if phase_key in self._active_phases:
@@ -318,16 +275,14 @@ class PerformanceMonitor:
318
275
  )
319
276
 
320
277
  def record_parallel_operation(self, workflow_id: str, phase_name: str) -> None:
321
- """Record a parallel operation."""
322
- phase_key = f"{workflow_id}:{phase_name}"
278
+ phase_key = f"{workflow_id}: {phase_name}"
323
279
 
324
280
  with self._lock:
325
281
  if phase_key in self._active_phases:
326
282
  self._active_phases[phase_key].parallel_operations += 1
327
283
 
328
284
  def record_sequential_operation(self, workflow_id: str, phase_name: str) -> None:
329
- """Record a sequential operation."""
330
- phase_key = f"{workflow_id}:{phase_name}"
285
+ phase_key = f"{workflow_id}: {phase_name}"
331
286
 
332
287
  with self._lock:
333
288
  if phase_key in self._active_phases:
@@ -336,13 +291,11 @@ class PerformanceMonitor:
336
291
  def benchmark_operation(
337
292
  self, operation_name: str, duration_seconds: float
338
293
  ) -> PerformanceBenchmark:
339
- """Benchmark an operation against historical data."""
340
294
  with self._lock:
341
295
  history = self._benchmarks[operation_name]
342
296
  history.append(duration_seconds)
343
297
 
344
298
  if len(history) > 1:
345
- # Use median as baseline to avoid outlier skew
346
299
  sorted_history = sorted(history)
347
300
  baseline = sorted_history[len(sorted_history) // 2]
348
301
 
@@ -352,7 +305,6 @@ class PerformanceMonitor:
352
305
  current_duration_seconds=duration_seconds,
353
306
  )
354
307
  else:
355
- # First measurement, no baseline
356
308
  return PerformanceBenchmark(
357
309
  operation_name=operation_name,
358
310
  baseline_duration_seconds=duration_seconds,
@@ -360,14 +312,14 @@ class PerformanceMonitor:
360
312
  )
361
313
 
362
314
  def get_performance_summary(self, last_n_workflows: int = 10) -> dict[str, Any]:
363
- """Get performance summary for recent workflows."""
364
315
  with self._lock:
365
- recent_workflows = list(self._completed_workflows)[-last_n_workflows:]
316
+ recent_workflows = list[t.Any](self._completed_workflows)[
317
+ -last_n_workflows:
318
+ ]
366
319
 
367
320
  if not recent_workflows:
368
321
  return {"message": "No completed workflows to analyze"}
369
322
 
370
- # Calculate aggregate statistics using helper methods
371
323
  basic_stats = self._calculate_basic_workflow_stats(recent_workflows)
372
324
  cache_stats = self._calculate_cache_statistics(recent_workflows)
373
325
  parallel_stats = self._calculate_parallelization_statistics(
@@ -387,7 +339,6 @@ class PerformanceMonitor:
387
339
  def _calculate_basic_workflow_stats(
388
340
  self, workflows: list[WorkflowPerformance]
389
341
  ) -> dict[str, Any]:
390
- """Calculate basic workflow statistics (duration, score, success rate)."""
391
342
  total_duration = sum(w.total_duration_seconds for w in workflows)
392
343
  avg_duration = total_duration / len(workflows)
393
344
  avg_score = sum(w.performance_score for w in workflows) / len(workflows)
@@ -402,7 +353,6 @@ class PerformanceMonitor:
402
353
  def _calculate_cache_statistics(
403
354
  self, workflows: list[WorkflowPerformance]
404
355
  ) -> dict[str, Any]:
405
- """Calculate cache hit/miss statistics across workflows."""
406
356
  total_cache_hits = sum(sum(p.cache_hits for p in w.phases) for w in workflows)
407
357
  total_cache_misses = sum(
408
358
  sum(p.cache_misses for p in w.phases) for w in workflows
@@ -423,7 +373,6 @@ class PerformanceMonitor:
423
373
  def _calculate_parallelization_statistics(
424
374
  self, workflows: list[WorkflowPerformance]
425
375
  ) -> dict[str, Any]:
426
- """Calculate parallelization statistics across workflows."""
427
376
  total_parallel = sum(
428
377
  sum(p.parallel_operations for p in w.phases) for w in workflows
429
378
  )
@@ -444,7 +393,6 @@ class PerformanceMonitor:
444
393
  }
445
394
 
446
395
  def get_benchmark_trends(self) -> dict[str, dict[str, Any]]:
447
- """Get benchmark trends for all operations."""
448
396
  trends = {}
449
397
 
450
398
  with self._lock:
@@ -452,7 +400,7 @@ class PerformanceMonitor:
452
400
  if len(history) < 2:
453
401
  continue
454
402
 
455
- history_list = list(history)
403
+ history_list = list[t.Any](history)
456
404
  basic_stats = self._calculate_benchmark_basic_stats(history_list)
457
405
  trend_percentage = self._calculate_trend_percentage(history_list)
458
406
 
@@ -466,7 +414,6 @@ class PerformanceMonitor:
466
414
  def _calculate_benchmark_basic_stats(
467
415
  self, history_list: list[float]
468
416
  ) -> dict[str, float]:
469
- """Calculate basic statistics for benchmark history."""
470
417
  avg_duration = sum(history_list) / len(history_list)
471
418
  min_duration = min(history_list)
472
419
  max_duration = max(history_list)
@@ -478,7 +425,6 @@ class PerformanceMonitor:
478
425
  }
479
426
 
480
427
  def _calculate_trend_percentage(self, history_list: list[float]) -> float:
481
- """Calculate trend percentage for benchmark improvement."""
482
428
  if len(history_list) < 5:
483
429
  return 0.0
484
430
 
@@ -492,7 +438,6 @@ class PerformanceMonitor:
492
438
  return ((older_avg - recent_avg) / older_avg * 100) if older_avg > 0 else 0.0
493
439
 
494
440
  def export_performance_data(self, output_path: Path) -> None:
495
- """Export performance data to JSON file."""
496
441
  with self._lock:
497
442
  data = {
498
443
  "export_timestamp": datetime.now().isoformat(),
@@ -521,7 +466,8 @@ class PerformanceMonitor:
521
466
  for w in self._completed_workflows
522
467
  ],
523
468
  "benchmarks": {
524
- name: list(history) for name, history in self._benchmarks.items()
469
+ name: list[t.Any](history)
470
+ for name, history in self._benchmarks.items()
525
471
  },
526
472
  "summary": self.get_performance_summary(),
527
473
  "trends": self.get_benchmark_trends(),
@@ -533,44 +479,38 @@ class PerformanceMonitor:
533
479
  self._logger.info(f"Exported performance data to {output_path}")
534
480
 
535
481
  def _check_performance_warnings(self, workflow: WorkflowPerformance) -> None:
536
- """Check for performance warnings and log them only in debug mode."""
537
482
  warnings = []
538
483
 
539
- # Collect warnings from different checks
540
484
  warnings.extend(self._check_duration_warning(workflow))
541
485
  warnings.extend(self._check_memory_warning(workflow))
542
486
  warnings.extend(self._check_cache_warning(workflow))
543
487
 
544
- # Log all warnings at debug level to avoid console spam
545
488
  for warning in warnings:
546
489
  self._logger.debug(
547
490
  f"Performance warning for {workflow.workflow_id}: {warning}"
548
491
  )
549
492
 
550
493
  def _check_duration_warning(self, workflow: WorkflowPerformance) -> list[str]:
551
- """Check for duration-based warnings."""
552
494
  if (
553
495
  workflow.total_duration_seconds
554
496
  > self._warning_thresholds["duration_seconds"]
555
497
  ):
556
498
  return [
557
- f"Slow workflow duration: {workflow.total_duration_seconds:.1f}s "
499
+ f"Slow workflow duration: {workflow.total_duration_seconds: .1f}s "
558
500
  f"(threshold: {self._warning_thresholds['duration_seconds']}s)"
559
501
  ]
560
502
  return []
561
503
 
562
504
  def _check_memory_warning(self, workflow: WorkflowPerformance) -> list[str]:
563
- """Check for memory usage warnings."""
564
505
  max_memory = max((p.memory_peak_mb for p in workflow.phases), default=0)
565
506
  if max_memory > self._warning_thresholds["memory_mb"]:
566
507
  return [
567
- f"High memory usage: {max_memory:.1f}MB "
508
+ f"High memory usage: {max_memory: .1f}MB "
568
509
  f"(threshold: {self._warning_thresholds['memory_mb']}MB)"
569
510
  ]
570
511
  return []
571
512
 
572
513
  def _check_cache_warning(self, workflow: WorkflowPerformance) -> list[str]:
573
- """Check for cache efficiency warnings."""
574
514
  total_hits = sum(p.cache_hits for p in workflow.phases)
575
515
  total_misses = sum(p.cache_misses for p in workflow.phases)
576
516
 
@@ -578,19 +518,17 @@ class PerformanceMonitor:
578
518
  hit_ratio = total_hits / (total_hits + total_misses)
579
519
  if hit_ratio < self._warning_thresholds["cache_hit_ratio"]:
580
520
  return [
581
- f"Low cache hit ratio: {hit_ratio:.2f} "
521
+ f"Low cache hit ratio: {hit_ratio: .2f} "
582
522
  f"(threshold: {self._warning_thresholds['cache_hit_ratio']})"
583
523
  ]
584
524
  return []
585
525
 
586
526
 
587
- # Global monitor instance
588
527
  _global_monitor: PerformanceMonitor | None = None
589
528
  _monitor_lock = Lock()
590
529
 
591
530
 
592
531
  def get_performance_monitor() -> PerformanceMonitor:
593
- """Get global performance monitor instance."""
594
532
  global _global_monitor
595
533
  with _monitor_lock:
596
534
  if _global_monitor is None:
@@ -598,16 +536,13 @@ def get_performance_monitor() -> PerformanceMonitor:
598
536
  return _global_monitor
599
537
 
600
538
 
601
- # Context manager for easy phase monitoring
602
539
  class phase_monitor:
603
- """Context manager for phase performance monitoring."""
604
-
605
540
  def __init__(self, workflow_id: str, phase_name: str):
606
541
  self.workflow_id = workflow_id
607
542
  self.phase_name = phase_name
608
543
  self.monitor = get_performance_monitor()
609
544
 
610
- def __enter__(self):
545
+ def __enter__(self) -> "phase_monitor":
611
546
  self.monitor.start_phase(self.workflow_id, self.phase_name)
612
547
  return self
613
548
 
@@ -620,14 +555,11 @@ class phase_monitor:
620
555
  success = exc_type is None
621
556
  self.monitor.end_phase(self.workflow_id, self.phase_name, success)
622
557
 
623
- def record_parallel_op(self):
624
- """Record a parallel operation in this phase."""
558
+ def record_parallel_op(self) -> None:
625
559
  self.monitor.record_parallel_operation(self.workflow_id, self.phase_name)
626
560
 
627
- def record_sequential_op(self):
628
- """Record a sequential operation in this phase."""
561
+ def record_sequential_op(self) -> None:
629
562
  self.monitor.record_sequential_operation(self.workflow_id, self.phase_name)
630
563
 
631
- def record_metric(self, name: str, value: float, unit: str = ""):
632
- """Record a custom metric in this phase."""
564
+ def record_metric(self, name: str, value: float, unit: str = "") -> None:
633
565
  self.monitor.record_metric(self.workflow_id, self.phase_name, name, value, unit)