crackerjack 0.32.0__py3-none-any.whl → 0.33.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

Files changed (200) hide show
  1. crackerjack/__main__.py +1350 -34
  2. crackerjack/adapters/__init__.py +17 -0
  3. crackerjack/adapters/lsp_client.py +358 -0
  4. crackerjack/adapters/rust_tool_adapter.py +194 -0
  5. crackerjack/adapters/rust_tool_manager.py +193 -0
  6. crackerjack/adapters/skylos_adapter.py +231 -0
  7. crackerjack/adapters/zuban_adapter.py +560 -0
  8. crackerjack/agents/base.py +7 -3
  9. crackerjack/agents/coordinator.py +271 -33
  10. crackerjack/agents/documentation_agent.py +9 -15
  11. crackerjack/agents/dry_agent.py +3 -15
  12. crackerjack/agents/formatting_agent.py +1 -1
  13. crackerjack/agents/import_optimization_agent.py +36 -180
  14. crackerjack/agents/performance_agent.py +17 -98
  15. crackerjack/agents/performance_helpers.py +7 -31
  16. crackerjack/agents/proactive_agent.py +1 -3
  17. crackerjack/agents/refactoring_agent.py +16 -85
  18. crackerjack/agents/refactoring_helpers.py +7 -42
  19. crackerjack/agents/security_agent.py +9 -48
  20. crackerjack/agents/test_creation_agent.py +356 -513
  21. crackerjack/agents/test_specialist_agent.py +0 -4
  22. crackerjack/api.py +6 -25
  23. crackerjack/cli/cache_handlers.py +204 -0
  24. crackerjack/cli/cache_handlers_enhanced.py +683 -0
  25. crackerjack/cli/facade.py +100 -0
  26. crackerjack/cli/handlers.py +224 -9
  27. crackerjack/cli/interactive.py +6 -4
  28. crackerjack/cli/options.py +642 -55
  29. crackerjack/cli/utils.py +2 -1
  30. crackerjack/code_cleaner.py +58 -117
  31. crackerjack/config/global_lock_config.py +8 -48
  32. crackerjack/config/hooks.py +53 -62
  33. crackerjack/core/async_workflow_orchestrator.py +24 -34
  34. crackerjack/core/autofix_coordinator.py +3 -17
  35. crackerjack/core/enhanced_container.py +64 -6
  36. crackerjack/core/file_lifecycle.py +12 -89
  37. crackerjack/core/performance.py +2 -2
  38. crackerjack/core/performance_monitor.py +15 -55
  39. crackerjack/core/phase_coordinator.py +257 -218
  40. crackerjack/core/resource_manager.py +14 -90
  41. crackerjack/core/service_watchdog.py +62 -95
  42. crackerjack/core/session_coordinator.py +149 -0
  43. crackerjack/core/timeout_manager.py +14 -72
  44. crackerjack/core/websocket_lifecycle.py +13 -78
  45. crackerjack/core/workflow_orchestrator.py +558 -240
  46. crackerjack/docs/INDEX.md +11 -0
  47. crackerjack/docs/generated/api/API_REFERENCE.md +10895 -0
  48. crackerjack/docs/generated/api/CLI_REFERENCE.md +109 -0
  49. crackerjack/docs/generated/api/CROSS_REFERENCES.md +1755 -0
  50. crackerjack/docs/generated/api/PROTOCOLS.md +3 -0
  51. crackerjack/docs/generated/api/SERVICES.md +1252 -0
  52. crackerjack/documentation/__init__.py +31 -0
  53. crackerjack/documentation/ai_templates.py +756 -0
  54. crackerjack/documentation/dual_output_generator.py +765 -0
  55. crackerjack/documentation/mkdocs_integration.py +518 -0
  56. crackerjack/documentation/reference_generator.py +977 -0
  57. crackerjack/dynamic_config.py +55 -50
  58. crackerjack/executors/async_hook_executor.py +10 -15
  59. crackerjack/executors/cached_hook_executor.py +117 -43
  60. crackerjack/executors/hook_executor.py +8 -34
  61. crackerjack/executors/hook_lock_manager.py +26 -183
  62. crackerjack/executors/individual_hook_executor.py +13 -11
  63. crackerjack/executors/lsp_aware_hook_executor.py +270 -0
  64. crackerjack/executors/tool_proxy.py +417 -0
  65. crackerjack/hooks/lsp_hook.py +79 -0
  66. crackerjack/intelligence/adaptive_learning.py +25 -10
  67. crackerjack/intelligence/agent_orchestrator.py +2 -5
  68. crackerjack/intelligence/agent_registry.py +34 -24
  69. crackerjack/intelligence/agent_selector.py +5 -7
  70. crackerjack/interactive.py +17 -6
  71. crackerjack/managers/async_hook_manager.py +0 -1
  72. crackerjack/managers/hook_manager.py +79 -1
  73. crackerjack/managers/publish_manager.py +66 -13
  74. crackerjack/managers/test_command_builder.py +5 -17
  75. crackerjack/managers/test_executor.py +1 -3
  76. crackerjack/managers/test_manager.py +109 -7
  77. crackerjack/managers/test_manager_backup.py +10 -9
  78. crackerjack/mcp/cache.py +2 -2
  79. crackerjack/mcp/client_runner.py +1 -1
  80. crackerjack/mcp/context.py +191 -68
  81. crackerjack/mcp/dashboard.py +7 -5
  82. crackerjack/mcp/enhanced_progress_monitor.py +31 -28
  83. crackerjack/mcp/file_monitor.py +30 -23
  84. crackerjack/mcp/progress_components.py +31 -21
  85. crackerjack/mcp/progress_monitor.py +50 -53
  86. crackerjack/mcp/rate_limiter.py +6 -6
  87. crackerjack/mcp/server_core.py +161 -32
  88. crackerjack/mcp/service_watchdog.py +2 -1
  89. crackerjack/mcp/state.py +4 -7
  90. crackerjack/mcp/task_manager.py +11 -9
  91. crackerjack/mcp/tools/core_tools.py +174 -33
  92. crackerjack/mcp/tools/error_analyzer.py +3 -2
  93. crackerjack/mcp/tools/execution_tools.py +15 -12
  94. crackerjack/mcp/tools/execution_tools_backup.py +42 -30
  95. crackerjack/mcp/tools/intelligence_tool_registry.py +7 -5
  96. crackerjack/mcp/tools/intelligence_tools.py +5 -2
  97. crackerjack/mcp/tools/monitoring_tools.py +33 -70
  98. crackerjack/mcp/tools/proactive_tools.py +24 -11
  99. crackerjack/mcp/tools/progress_tools.py +5 -8
  100. crackerjack/mcp/tools/utility_tools.py +20 -14
  101. crackerjack/mcp/tools/workflow_executor.py +62 -40
  102. crackerjack/mcp/websocket/app.py +8 -0
  103. crackerjack/mcp/websocket/endpoints.py +352 -357
  104. crackerjack/mcp/websocket/jobs.py +40 -57
  105. crackerjack/mcp/websocket/monitoring_endpoints.py +2935 -0
  106. crackerjack/mcp/websocket/server.py +7 -25
  107. crackerjack/mcp/websocket/websocket_handler.py +6 -17
  108. crackerjack/mixins/__init__.py +3 -0
  109. crackerjack/mixins/error_handling.py +145 -0
  110. crackerjack/models/config.py +21 -1
  111. crackerjack/models/config_adapter.py +49 -1
  112. crackerjack/models/protocols.py +176 -107
  113. crackerjack/models/resource_protocols.py +55 -210
  114. crackerjack/models/task.py +3 -0
  115. crackerjack/monitoring/ai_agent_watchdog.py +13 -13
  116. crackerjack/monitoring/metrics_collector.py +426 -0
  117. crackerjack/monitoring/regression_prevention.py +8 -8
  118. crackerjack/monitoring/websocket_server.py +643 -0
  119. crackerjack/orchestration/advanced_orchestrator.py +11 -6
  120. crackerjack/orchestration/coverage_improvement.py +3 -3
  121. crackerjack/orchestration/execution_strategies.py +26 -6
  122. crackerjack/orchestration/test_progress_streamer.py +8 -5
  123. crackerjack/plugins/base.py +2 -2
  124. crackerjack/plugins/hooks.py +7 -0
  125. crackerjack/plugins/managers.py +11 -8
  126. crackerjack/security/__init__.py +0 -1
  127. crackerjack/security/audit.py +90 -105
  128. crackerjack/services/anomaly_detector.py +392 -0
  129. crackerjack/services/api_extractor.py +615 -0
  130. crackerjack/services/backup_service.py +2 -2
  131. crackerjack/services/bounded_status_operations.py +15 -152
  132. crackerjack/services/cache.py +127 -1
  133. crackerjack/services/changelog_automation.py +395 -0
  134. crackerjack/services/config.py +18 -11
  135. crackerjack/services/config_merge.py +30 -85
  136. crackerjack/services/config_template.py +506 -0
  137. crackerjack/services/contextual_ai_assistant.py +48 -22
  138. crackerjack/services/coverage_badge_service.py +171 -0
  139. crackerjack/services/coverage_ratchet.py +41 -17
  140. crackerjack/services/debug.py +3 -3
  141. crackerjack/services/dependency_analyzer.py +460 -0
  142. crackerjack/services/dependency_monitor.py +14 -11
  143. crackerjack/services/documentation_generator.py +491 -0
  144. crackerjack/services/documentation_service.py +675 -0
  145. crackerjack/services/enhanced_filesystem.py +6 -5
  146. crackerjack/services/enterprise_optimizer.py +865 -0
  147. crackerjack/services/error_pattern_analyzer.py +676 -0
  148. crackerjack/services/file_hasher.py +1 -1
  149. crackerjack/services/git.py +41 -45
  150. crackerjack/services/health_metrics.py +10 -8
  151. crackerjack/services/heatmap_generator.py +735 -0
  152. crackerjack/services/initialization.py +30 -33
  153. crackerjack/services/input_validator.py +5 -97
  154. crackerjack/services/intelligent_commit.py +327 -0
  155. crackerjack/services/log_manager.py +15 -12
  156. crackerjack/services/logging.py +4 -3
  157. crackerjack/services/lsp_client.py +628 -0
  158. crackerjack/services/memory_optimizer.py +409 -0
  159. crackerjack/services/metrics.py +42 -33
  160. crackerjack/services/parallel_executor.py +416 -0
  161. crackerjack/services/pattern_cache.py +1 -1
  162. crackerjack/services/pattern_detector.py +6 -6
  163. crackerjack/services/performance_benchmarks.py +250 -576
  164. crackerjack/services/performance_cache.py +382 -0
  165. crackerjack/services/performance_monitor.py +565 -0
  166. crackerjack/services/predictive_analytics.py +510 -0
  167. crackerjack/services/quality_baseline.py +234 -0
  168. crackerjack/services/quality_baseline_enhanced.py +646 -0
  169. crackerjack/services/quality_intelligence.py +785 -0
  170. crackerjack/services/regex_patterns.py +605 -524
  171. crackerjack/services/regex_utils.py +43 -123
  172. crackerjack/services/secure_path_utils.py +5 -164
  173. crackerjack/services/secure_status_formatter.py +30 -141
  174. crackerjack/services/secure_subprocess.py +11 -92
  175. crackerjack/services/security.py +61 -30
  176. crackerjack/services/security_logger.py +18 -22
  177. crackerjack/services/server_manager.py +124 -16
  178. crackerjack/services/status_authentication.py +16 -159
  179. crackerjack/services/status_security_manager.py +4 -131
  180. crackerjack/services/terminal_utils.py +0 -0
  181. crackerjack/services/thread_safe_status_collector.py +19 -125
  182. crackerjack/services/unified_config.py +21 -13
  183. crackerjack/services/validation_rate_limiter.py +5 -54
  184. crackerjack/services/version_analyzer.py +459 -0
  185. crackerjack/services/version_checker.py +1 -1
  186. crackerjack/services/websocket_resource_limiter.py +10 -144
  187. crackerjack/services/zuban_lsp_service.py +390 -0
  188. crackerjack/slash_commands/__init__.py +2 -7
  189. crackerjack/slash_commands/run.md +2 -2
  190. crackerjack/tools/validate_input_validator_patterns.py +14 -40
  191. crackerjack/tools/validate_regex_patterns.py +19 -48
  192. {crackerjack-0.32.0.dist-info → crackerjack-0.33.1.dist-info}/METADATA +197 -26
  193. crackerjack-0.33.1.dist-info/RECORD +229 -0
  194. crackerjack/CLAUDE.md +0 -207
  195. crackerjack/RULES.md +0 -380
  196. crackerjack/py313.py +0 -234
  197. crackerjack-0.32.0.dist-info/RECORD +0 -180
  198. {crackerjack-0.32.0.dist-info → crackerjack-0.33.1.dist-info}/WHEEL +0 -0
  199. {crackerjack-0.32.0.dist-info → crackerjack-0.33.1.dist-info}/entry_points.txt +0 -0
  200. {crackerjack-0.32.0.dist-info → crackerjack-0.33.1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,565 @@
1
+ import json
2
+ import typing as t
3
+ from collections import defaultdict, deque
4
+ from dataclasses import dataclass, field
5
+ from datetime import datetime
6
+ from pathlib import Path
7
+ from threading import Lock
8
+ from typing import Any
9
+
10
+ from crackerjack.services.logging import get_logger
11
+ from crackerjack.services.memory_optimizer import MemoryOptimizer
12
+ from crackerjack.services.performance_cache import get_performance_cache
13
+
14
+
15
+ @dataclass
16
+ class PerformanceMetric:
17
+ name: str
18
+ value: float
19
+ unit: str
20
+ timestamp: datetime = field(default_factory=datetime.now)
21
+ metadata: dict[str, Any] = field(default_factory=dict[str, t.Any])
22
+
23
+
24
+ @dataclass
25
+ class PhasePerformance:
26
+ phase_name: str
27
+ start_time: datetime
28
+ end_time: datetime | None = None
29
+ duration_seconds: float = 0.0
30
+ memory_start_mb: float = 0.0
31
+ memory_peak_mb: float = 0.0
32
+ memory_end_mb: float = 0.0
33
+ cache_hits: int = 0
34
+ cache_misses: int = 0
35
+ parallel_operations: int = 0
36
+ sequential_operations: int = 0
37
+ success: bool = True
38
+ metrics: list[PerformanceMetric] = field(default_factory=list)
39
+
40
+ def finalize(self, end_time: datetime | None = None) -> None:
41
+ self.end_time = end_time or datetime.now()
42
+ self.duration_seconds = (self.end_time - self.start_time).total_seconds()
43
+
44
+
45
+ @dataclass
46
+ class WorkflowPerformance:
47
+ workflow_id: str
48
+ start_time: datetime
49
+ end_time: datetime | None = None
50
+ total_duration_seconds: float = 0.0
51
+ phases: list[PhasePerformance] = field(default_factory=list)
52
+ overall_success: bool = True
53
+ performance_score: float = 0.0
54
+
55
+ def add_phase(self, phase: PhasePerformance) -> None:
56
+ self.phases.append(phase)
57
+
58
+ def finalize(self, success: bool = True) -> None:
59
+ self.end_time = datetime.now()
60
+ self.total_duration_seconds = (self.end_time - self.start_time).total_seconds()
61
+ self.overall_success = success
62
+ self.performance_score = self._calculate_performance_score()
63
+
64
+ def _calculate_performance_score(self) -> float:
65
+ if not self.phases:
66
+ return 0.0
67
+
68
+ duration_score = max(0, 100 - (self.total_duration_seconds / 10))
69
+
70
+ total_hits = sum(p.cache_hits for p in self.phases)
71
+ total_misses = sum(p.cache_misses for p in self.phases)
72
+ cache_ratio = (
73
+ total_hits / (total_hits + total_misses)
74
+ if total_hits + total_misses > 0
75
+ else 0
76
+ )
77
+ cache_score = cache_ratio * 20
78
+
79
+ total_parallel = sum(p.parallel_operations for p in self.phases)
80
+ total_sequential = sum(p.sequential_operations for p in self.phases)
81
+ parallel_ratio = (
82
+ total_parallel / (total_parallel + total_sequential)
83
+ if total_parallel + total_sequential > 0
84
+ else 0
85
+ )
86
+ parallel_score = parallel_ratio * 15
87
+
88
+ max_memory = max((p.memory_peak_mb for p in self.phases), default=0)
89
+ memory_score = max(0, 15 - (max_memory / 50))
90
+
91
+ success_score = 10 if self.overall_success else 0
92
+
93
+ return min(
94
+ 100,
95
+ duration_score
96
+ + cache_score
97
+ + parallel_score
98
+ + memory_score
99
+ + success_score,
100
+ )
101
+
102
+
103
+ @dataclass
104
+ class PerformanceBenchmark:
105
+ operation_name: str
106
+ baseline_duration_seconds: float
107
+ current_duration_seconds: float
108
+ improvement_percentage: float = 0.0
109
+ regression: bool = False
110
+
111
+ def __post_init__(self) -> None:
112
+ if self.baseline_duration_seconds > 0:
113
+ self.improvement_percentage = (
114
+ (self.baseline_duration_seconds - self.current_duration_seconds)
115
+ / self.baseline_duration_seconds
116
+ * 100
117
+ )
118
+ self.regression = self.improvement_percentage < 0
119
+
120
+
121
+ class PerformanceMonitor:
122
+ def __init__(
123
+ self,
124
+ data_retention_days: int = 30,
125
+ benchmark_history_size: int = 100,
126
+ ):
127
+ self.data_retention_days = data_retention_days
128
+ self.benchmark_history_size = benchmark_history_size
129
+ self._initialize_data_structures(benchmark_history_size)
130
+ self._initialize_services()
131
+ self._initialize_thresholds()
132
+
133
+ def _initialize_data_structures(self, history_size: int) -> None:
134
+ self._active_workflows: dict[str, WorkflowPerformance] = {}
135
+ self._active_phases: dict[str, PhasePerformance] = {}
136
+ self._completed_workflows: deque[WorkflowPerformance] = deque(
137
+ maxlen=history_size
138
+ )
139
+ self._benchmarks: dict[str, deque[float]] = defaultdict(
140
+ lambda: deque(maxlen=history_size) # type: ignore[arg-type]
141
+ )
142
+
143
+ def _initialize_services(self) -> None:
144
+ self._lock = Lock()
145
+ self._logger = get_logger("crackerjack.performance_monitor")
146
+ self._memory_optimizer = MemoryOptimizer.get_instance()
147
+ self._cache = get_performance_cache()
148
+
149
+ def _initialize_thresholds(self) -> None:
150
+ self._warning_thresholds = {
151
+ "duration_seconds": 30.0,
152
+ "memory_mb": 100.0,
153
+ "cache_hit_ratio": 0.5,
154
+ }
155
+
156
+ def start_workflow(self, workflow_id: str) -> None:
157
+ with self._lock:
158
+ if workflow_id in self._active_workflows:
159
+ self._logger.warning(f"Workflow {workflow_id} already being monitored")
160
+ return
161
+
162
+ workflow = WorkflowPerformance(
163
+ workflow_id=workflow_id,
164
+ start_time=datetime.now(),
165
+ )
166
+
167
+ self._active_workflows[workflow_id] = workflow
168
+ self._logger.debug(f"Started monitoring workflow: {workflow_id}")
169
+
170
+ self._memory_optimizer.start_profiling()
171
+
172
+ def end_workflow(
173
+ self, workflow_id: str, success: bool = True
174
+ ) -> WorkflowPerformance:
175
+ with self._lock:
176
+ if workflow_id not in self._active_workflows:
177
+ self._logger.warning(f"Workflow {workflow_id} not found for ending")
178
+ return WorkflowPerformance(
179
+ workflow_id=workflow_id, start_time=datetime.now()
180
+ )
181
+
182
+ workflow = self._active_workflows.pop(workflow_id)
183
+ workflow.finalize(success)
184
+
185
+ self._completed_workflows.append(workflow)
186
+
187
+ self._logger.info(
188
+ f"Completed workflow {workflow_id}: "
189
+ f"{workflow.total_duration_seconds: .2f}s, "
190
+ f"score: {workflow.performance_score: .1f}, "
191
+ f"phases: {len(workflow.phases)}"
192
+ )
193
+
194
+ self._check_performance_warnings(workflow)
195
+
196
+ return workflow
197
+
198
+ def start_phase(self, workflow_id: str, phase_name: str) -> None:
199
+ phase_key = f"{workflow_id}: {phase_name}"
200
+
201
+ with self._lock:
202
+ if phase_key in self._active_phases:
203
+ self._logger.warning(f"Phase {phase_key} already being monitored")
204
+ return
205
+
206
+ memory_mb = self._memory_optimizer.record_checkpoint(f"{phase_name}_start")
207
+
208
+ phase = PhasePerformance(
209
+ phase_name=phase_name,
210
+ start_time=datetime.now(),
211
+ memory_start_mb=memory_mb,
212
+ )
213
+
214
+ self._active_phases[phase_key] = phase
215
+ self._logger.debug(f"Started monitoring phase: {phase_key}")
216
+
217
+ def end_phase(
218
+ self, workflow_id: str, phase_name: str, success: bool = True
219
+ ) -> PhasePerformance:
220
+ phase_key = f"{workflow_id}: {phase_name}"
221
+
222
+ with self._lock:
223
+ if phase_key not in self._active_phases:
224
+ self._logger.warning(f"Phase {phase_key} not found for ending")
225
+ return PhasePerformance(
226
+ phase_name=phase_name, start_time=datetime.now()
227
+ )
228
+
229
+ phase = self._active_phases.pop(phase_key)
230
+ phase.success = success
231
+
232
+ phase.memory_end_mb = self._memory_optimizer.record_checkpoint(
233
+ f"{phase_name}_end"
234
+ )
235
+
236
+ cache_stats = self._cache.get_stats()
237
+ phase.cache_hits = cache_stats.hits
238
+ phase.cache_misses = cache_stats.misses
239
+
240
+ phase.finalize()
241
+
242
+ if workflow_id in self._active_workflows:
243
+ self._active_workflows[workflow_id].add_phase(phase)
244
+
245
+ self._logger.debug(
246
+ f"Completed phase {phase_key}: {phase.duration_seconds: .2f}s"
247
+ )
248
+
249
+ return phase
250
+
251
+ def record_metric(
252
+ self,
253
+ workflow_id: str,
254
+ phase_name: str,
255
+ metric_name: str,
256
+ value: float,
257
+ unit: str = "",
258
+ metadata: dict[str, t.Any] | None = None,
259
+ ) -> None:
260
+ metric = PerformanceMetric(
261
+ name=metric_name,
262
+ value=value,
263
+ unit=unit,
264
+ metadata=metadata or {},
265
+ )
266
+
267
+ phase_key = f"{workflow_id}: {phase_name}"
268
+
269
+ with self._lock:
270
+ if phase_key in self._active_phases:
271
+ self._active_phases[phase_key].metrics.append(metric)
272
+ else:
273
+ self._logger.warning(
274
+ f"Phase {phase_key} not found for metric {metric_name}"
275
+ )
276
+
277
+ def record_parallel_operation(self, workflow_id: str, phase_name: str) -> None:
278
+ phase_key = f"{workflow_id}: {phase_name}"
279
+
280
+ with self._lock:
281
+ if phase_key in self._active_phases:
282
+ self._active_phases[phase_key].parallel_operations += 1
283
+
284
+ def record_sequential_operation(self, workflow_id: str, phase_name: str) -> None:
285
+ phase_key = f"{workflow_id}: {phase_name}"
286
+
287
+ with self._lock:
288
+ if phase_key in self._active_phases:
289
+ self._active_phases[phase_key].sequential_operations += 1
290
+
291
+ def benchmark_operation(
292
+ self, operation_name: str, duration_seconds: float
293
+ ) -> PerformanceBenchmark:
294
+ with self._lock:
295
+ history = self._benchmarks[operation_name]
296
+ history.append(duration_seconds)
297
+
298
+ if len(history) > 1:
299
+ sorted_history = sorted(history)
300
+ baseline = sorted_history[len(sorted_history) // 2]
301
+
302
+ return PerformanceBenchmark(
303
+ operation_name=operation_name,
304
+ baseline_duration_seconds=baseline,
305
+ current_duration_seconds=duration_seconds,
306
+ )
307
+ else:
308
+ return PerformanceBenchmark(
309
+ operation_name=operation_name,
310
+ baseline_duration_seconds=duration_seconds,
311
+ current_duration_seconds=duration_seconds,
312
+ )
313
+
314
+ def get_performance_summary(self, last_n_workflows: int = 10) -> dict[str, Any]:
315
+ with self._lock:
316
+ recent_workflows = list[t.Any](self._completed_workflows)[
317
+ -last_n_workflows:
318
+ ]
319
+
320
+ if not recent_workflows:
321
+ return {"message": "No completed workflows to analyze"}
322
+
323
+ basic_stats = self._calculate_basic_workflow_stats(recent_workflows)
324
+ cache_stats = self._calculate_cache_statistics(recent_workflows)
325
+ parallel_stats = self._calculate_parallelization_statistics(
326
+ recent_workflows
327
+ )
328
+
329
+ return (
330
+ {
331
+ "workflows_analyzed": len(recent_workflows),
332
+ }
333
+ | basic_stats
334
+ | cache_stats
335
+ | parallel_stats
336
+ | {}
337
+ )
338
+
339
+ def _calculate_basic_workflow_stats(
340
+ self, workflows: list[WorkflowPerformance]
341
+ ) -> dict[str, Any]:
342
+ total_duration = sum(w.total_duration_seconds for w in workflows)
343
+ avg_duration = total_duration / len(workflows)
344
+ avg_score = sum(w.performance_score for w in workflows) / len(workflows)
345
+ success_rate = sum(1 for w in workflows if w.overall_success) / len(workflows)
346
+
347
+ return {
348
+ "avg_duration_seconds": round(avg_duration, 2),
349
+ "avg_performance_score": round(avg_score, 1),
350
+ "success_rate": round(success_rate, 2),
351
+ }
352
+
353
+ def _calculate_cache_statistics(
354
+ self, workflows: list[WorkflowPerformance]
355
+ ) -> dict[str, Any]:
356
+ total_cache_hits = sum(sum(p.cache_hits for p in w.phases) for w in workflows)
357
+ total_cache_misses = sum(
358
+ sum(p.cache_misses for p in w.phases) for w in workflows
359
+ )
360
+
361
+ cache_hit_ratio = (
362
+ total_cache_hits / (total_cache_hits + total_cache_misses)
363
+ if total_cache_hits + total_cache_misses > 0
364
+ else 0
365
+ )
366
+
367
+ return {
368
+ "cache_hit_ratio": round(cache_hit_ratio, 2),
369
+ "total_cache_hits": total_cache_hits,
370
+ "total_cache_misses": total_cache_misses,
371
+ }
372
+
373
+ def _calculate_parallelization_statistics(
374
+ self, workflows: list[WorkflowPerformance]
375
+ ) -> dict[str, Any]:
376
+ total_parallel = sum(
377
+ sum(p.parallel_operations for p in w.phases) for w in workflows
378
+ )
379
+ total_sequential = sum(
380
+ sum(p.sequential_operations for p in w.phases) for w in workflows
381
+ )
382
+
383
+ parallel_ratio = (
384
+ total_parallel / (total_parallel + total_sequential)
385
+ if total_parallel + total_sequential > 0
386
+ else 0
387
+ )
388
+
389
+ return {
390
+ "parallel_operation_ratio": round(parallel_ratio, 2),
391
+ "total_parallel_operations": total_parallel,
392
+ "total_sequential_operations": total_sequential,
393
+ }
394
+
395
+ def get_benchmark_trends(self) -> dict[str, dict[str, Any]]:
396
+ trends = {}
397
+
398
+ with self._lock:
399
+ for operation_name, history in self._benchmarks.items():
400
+ if len(history) < 2:
401
+ continue
402
+
403
+ history_list = list[t.Any](history)
404
+ basic_stats = self._calculate_benchmark_basic_stats(history_list)
405
+ trend_percentage = self._calculate_trend_percentage(history_list)
406
+
407
+ trends[operation_name] = basic_stats | {
408
+ "trend_percentage": round(trend_percentage, 1),
409
+ "sample_count": len(history_list),
410
+ }
411
+
412
+ return trends
413
+
414
+ def _calculate_benchmark_basic_stats(
415
+ self, history_list: list[float]
416
+ ) -> dict[str, float]:
417
+ avg_duration = sum(history_list) / len(history_list)
418
+ min_duration = min(history_list)
419
+ max_duration = max(history_list)
420
+
421
+ return {
422
+ "avg_duration_seconds": round(avg_duration, 3),
423
+ "min_duration_seconds": round(min_duration, 3),
424
+ "max_duration_seconds": round(max_duration, 3),
425
+ }
426
+
427
+ def _calculate_trend_percentage(self, history_list: list[float]) -> float:
428
+ if len(history_list) < 5:
429
+ return 0.0
430
+
431
+ recent_avg = sum(history_list[-5:]) / 5
432
+ older_avg = (
433
+ sum(history_list[:-5]) / len(history_list[:-5])
434
+ if len(history_list) > 5
435
+ else recent_avg
436
+ )
437
+
438
+ return ((older_avg - recent_avg) / older_avg * 100) if older_avg > 0 else 0.0
439
+
440
+ def export_performance_data(self, output_path: Path) -> None:
441
+ with self._lock:
442
+ data = {
443
+ "export_timestamp": datetime.now().isoformat(),
444
+ "completed_workflows": [
445
+ {
446
+ "workflow_id": w.workflow_id,
447
+ "start_time": w.start_time.isoformat(),
448
+ "end_time": w.end_time.isoformat() if w.end_time else None,
449
+ "duration_seconds": w.total_duration_seconds,
450
+ "performance_score": w.performance_score,
451
+ "success": w.overall_success,
452
+ "phases": [
453
+ {
454
+ "name": p.phase_name,
455
+ "duration_seconds": p.duration_seconds,
456
+ "memory_peak_mb": p.memory_peak_mb,
457
+ "cache_hits": p.cache_hits,
458
+ "cache_misses": p.cache_misses,
459
+ "parallel_operations": p.parallel_operations,
460
+ "sequential_operations": p.sequential_operations,
461
+ "success": p.success,
462
+ }
463
+ for p in w.phases
464
+ ],
465
+ }
466
+ for w in self._completed_workflows
467
+ ],
468
+ "benchmarks": {
469
+ name: list[t.Any](history)
470
+ for name, history in self._benchmarks.items()
471
+ },
472
+ "summary": self.get_performance_summary(),
473
+ "trends": self.get_benchmark_trends(),
474
+ }
475
+
476
+ with output_path.open("w") as f:
477
+ json.dump(data, f, indent=2)
478
+
479
+ self._logger.info(f"Exported performance data to {output_path}")
480
+
481
+ def _check_performance_warnings(self, workflow: WorkflowPerformance) -> None:
482
+ warnings = []
483
+
484
+ warnings.extend(self._check_duration_warning(workflow))
485
+ warnings.extend(self._check_memory_warning(workflow))
486
+ warnings.extend(self._check_cache_warning(workflow))
487
+
488
+ for warning in warnings:
489
+ self._logger.debug(
490
+ f"Performance warning for {workflow.workflow_id}: {warning}"
491
+ )
492
+
493
+ def _check_duration_warning(self, workflow: WorkflowPerformance) -> list[str]:
494
+ if (
495
+ workflow.total_duration_seconds
496
+ > self._warning_thresholds["duration_seconds"]
497
+ ):
498
+ return [
499
+ f"Slow workflow duration: {workflow.total_duration_seconds: .1f}s "
500
+ f"(threshold: {self._warning_thresholds['duration_seconds']}s)"
501
+ ]
502
+ return []
503
+
504
+ def _check_memory_warning(self, workflow: WorkflowPerformance) -> list[str]:
505
+ max_memory = max((p.memory_peak_mb for p in workflow.phases), default=0)
506
+ if max_memory > self._warning_thresholds["memory_mb"]:
507
+ return [
508
+ f"High memory usage: {max_memory: .1f}MB "
509
+ f"(threshold: {self._warning_thresholds['memory_mb']}MB)"
510
+ ]
511
+ return []
512
+
513
+ def _check_cache_warning(self, workflow: WorkflowPerformance) -> list[str]:
514
+ total_hits = sum(p.cache_hits for p in workflow.phases)
515
+ total_misses = sum(p.cache_misses for p in workflow.phases)
516
+
517
+ if total_hits + total_misses > 0:
518
+ hit_ratio = total_hits / (total_hits + total_misses)
519
+ if hit_ratio < self._warning_thresholds["cache_hit_ratio"]:
520
+ return [
521
+ f"Low cache hit ratio: {hit_ratio: .2f} "
522
+ f"(threshold: {self._warning_thresholds['cache_hit_ratio']})"
523
+ ]
524
+ return []
525
+
526
+
527
+ _global_monitor: PerformanceMonitor | None = None
528
+ _monitor_lock = Lock()
529
+
530
+
531
+ def get_performance_monitor() -> PerformanceMonitor:
532
+ global _global_monitor
533
+ with _monitor_lock:
534
+ if _global_monitor is None:
535
+ _global_monitor = PerformanceMonitor()
536
+ return _global_monitor
537
+
538
+
539
+ class phase_monitor:
540
+ def __init__(self, workflow_id: str, phase_name: str):
541
+ self.workflow_id = workflow_id
542
+ self.phase_name = phase_name
543
+ self.monitor = get_performance_monitor()
544
+
545
+ def __enter__(self) -> "phase_monitor":
546
+ self.monitor.start_phase(self.workflow_id, self.phase_name)
547
+ return self
548
+
549
+ def __exit__(
550
+ self,
551
+ exc_type: type[BaseException] | None,
552
+ exc_val: BaseException | None,
553
+ exc_tb: object | None,
554
+ ) -> None:
555
+ success = exc_type is None
556
+ self.monitor.end_phase(self.workflow_id, self.phase_name, success)
557
+
558
+ def record_parallel_op(self) -> None:
559
+ self.monitor.record_parallel_operation(self.workflow_id, self.phase_name)
560
+
561
+ def record_sequential_op(self) -> None:
562
+ self.monitor.record_sequential_operation(self.workflow_id, self.phase_name)
563
+
564
+ def record_metric(self, name: str, value: float, unit: str = "") -> None:
565
+ self.monitor.record_metric(self.workflow_id, self.phase_name, name, value, unit)