crackerjack 0.33.0__py3-none-any.whl → 0.33.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

Files changed (198) hide show
  1. crackerjack/__main__.py +1350 -34
  2. crackerjack/adapters/__init__.py +17 -0
  3. crackerjack/adapters/lsp_client.py +358 -0
  4. crackerjack/adapters/rust_tool_adapter.py +194 -0
  5. crackerjack/adapters/rust_tool_manager.py +193 -0
  6. crackerjack/adapters/skylos_adapter.py +231 -0
  7. crackerjack/adapters/zuban_adapter.py +560 -0
  8. crackerjack/agents/base.py +7 -3
  9. crackerjack/agents/coordinator.py +271 -33
  10. crackerjack/agents/documentation_agent.py +9 -15
  11. crackerjack/agents/dry_agent.py +3 -15
  12. crackerjack/agents/formatting_agent.py +1 -1
  13. crackerjack/agents/import_optimization_agent.py +36 -180
  14. crackerjack/agents/performance_agent.py +17 -98
  15. crackerjack/agents/performance_helpers.py +7 -31
  16. crackerjack/agents/proactive_agent.py +1 -3
  17. crackerjack/agents/refactoring_agent.py +16 -85
  18. crackerjack/agents/refactoring_helpers.py +7 -42
  19. crackerjack/agents/security_agent.py +9 -48
  20. crackerjack/agents/test_creation_agent.py +356 -513
  21. crackerjack/agents/test_specialist_agent.py +0 -4
  22. crackerjack/api.py +6 -25
  23. crackerjack/cli/cache_handlers.py +204 -0
  24. crackerjack/cli/cache_handlers_enhanced.py +683 -0
  25. crackerjack/cli/facade.py +100 -0
  26. crackerjack/cli/handlers.py +224 -9
  27. crackerjack/cli/interactive.py +6 -4
  28. crackerjack/cli/options.py +642 -55
  29. crackerjack/cli/utils.py +2 -1
  30. crackerjack/code_cleaner.py +58 -117
  31. crackerjack/config/global_lock_config.py +8 -48
  32. crackerjack/config/hooks.py +53 -62
  33. crackerjack/core/async_workflow_orchestrator.py +24 -34
  34. crackerjack/core/autofix_coordinator.py +3 -17
  35. crackerjack/core/enhanced_container.py +4 -13
  36. crackerjack/core/file_lifecycle.py +12 -89
  37. crackerjack/core/performance.py +2 -2
  38. crackerjack/core/performance_monitor.py +15 -55
  39. crackerjack/core/phase_coordinator.py +104 -204
  40. crackerjack/core/resource_manager.py +14 -90
  41. crackerjack/core/service_watchdog.py +62 -95
  42. crackerjack/core/session_coordinator.py +149 -0
  43. crackerjack/core/timeout_manager.py +14 -72
  44. crackerjack/core/websocket_lifecycle.py +13 -78
  45. crackerjack/core/workflow_orchestrator.py +171 -174
  46. crackerjack/docs/INDEX.md +11 -0
  47. crackerjack/docs/generated/api/API_REFERENCE.md +10895 -0
  48. crackerjack/docs/generated/api/CLI_REFERENCE.md +109 -0
  49. crackerjack/docs/generated/api/CROSS_REFERENCES.md +1755 -0
  50. crackerjack/docs/generated/api/PROTOCOLS.md +3 -0
  51. crackerjack/docs/generated/api/SERVICES.md +1252 -0
  52. crackerjack/documentation/__init__.py +31 -0
  53. crackerjack/documentation/ai_templates.py +756 -0
  54. crackerjack/documentation/dual_output_generator.py +765 -0
  55. crackerjack/documentation/mkdocs_integration.py +518 -0
  56. crackerjack/documentation/reference_generator.py +977 -0
  57. crackerjack/dynamic_config.py +55 -50
  58. crackerjack/executors/async_hook_executor.py +10 -15
  59. crackerjack/executors/cached_hook_executor.py +117 -43
  60. crackerjack/executors/hook_executor.py +8 -34
  61. crackerjack/executors/hook_lock_manager.py +26 -183
  62. crackerjack/executors/individual_hook_executor.py +13 -11
  63. crackerjack/executors/lsp_aware_hook_executor.py +270 -0
  64. crackerjack/executors/tool_proxy.py +417 -0
  65. crackerjack/hooks/lsp_hook.py +79 -0
  66. crackerjack/intelligence/adaptive_learning.py +25 -10
  67. crackerjack/intelligence/agent_orchestrator.py +2 -5
  68. crackerjack/intelligence/agent_registry.py +34 -24
  69. crackerjack/intelligence/agent_selector.py +5 -7
  70. crackerjack/interactive.py +17 -6
  71. crackerjack/managers/async_hook_manager.py +0 -1
  72. crackerjack/managers/hook_manager.py +79 -1
  73. crackerjack/managers/publish_manager.py +44 -8
  74. crackerjack/managers/test_command_builder.py +1 -15
  75. crackerjack/managers/test_executor.py +1 -3
  76. crackerjack/managers/test_manager.py +98 -7
  77. crackerjack/managers/test_manager_backup.py +10 -9
  78. crackerjack/mcp/cache.py +2 -2
  79. crackerjack/mcp/client_runner.py +1 -1
  80. crackerjack/mcp/context.py +191 -68
  81. crackerjack/mcp/dashboard.py +7 -5
  82. crackerjack/mcp/enhanced_progress_monitor.py +31 -28
  83. crackerjack/mcp/file_monitor.py +30 -23
  84. crackerjack/mcp/progress_components.py +31 -21
  85. crackerjack/mcp/progress_monitor.py +50 -53
  86. crackerjack/mcp/rate_limiter.py +6 -6
  87. crackerjack/mcp/server_core.py +17 -16
  88. crackerjack/mcp/service_watchdog.py +2 -1
  89. crackerjack/mcp/state.py +4 -7
  90. crackerjack/mcp/task_manager.py +11 -9
  91. crackerjack/mcp/tools/core_tools.py +173 -32
  92. crackerjack/mcp/tools/error_analyzer.py +3 -2
  93. crackerjack/mcp/tools/execution_tools.py +8 -10
  94. crackerjack/mcp/tools/execution_tools_backup.py +42 -30
  95. crackerjack/mcp/tools/intelligence_tool_registry.py +7 -5
  96. crackerjack/mcp/tools/intelligence_tools.py +5 -2
  97. crackerjack/mcp/tools/monitoring_tools.py +33 -70
  98. crackerjack/mcp/tools/proactive_tools.py +24 -11
  99. crackerjack/mcp/tools/progress_tools.py +5 -8
  100. crackerjack/mcp/tools/utility_tools.py +20 -14
  101. crackerjack/mcp/tools/workflow_executor.py +62 -40
  102. crackerjack/mcp/websocket/app.py +8 -0
  103. crackerjack/mcp/websocket/endpoints.py +352 -357
  104. crackerjack/mcp/websocket/jobs.py +40 -57
  105. crackerjack/mcp/websocket/monitoring_endpoints.py +2935 -0
  106. crackerjack/mcp/websocket/server.py +7 -25
  107. crackerjack/mcp/websocket/websocket_handler.py +6 -17
  108. crackerjack/mixins/__init__.py +0 -2
  109. crackerjack/mixins/error_handling.py +1 -70
  110. crackerjack/models/config.py +12 -1
  111. crackerjack/models/config_adapter.py +49 -1
  112. crackerjack/models/protocols.py +122 -122
  113. crackerjack/models/resource_protocols.py +55 -210
  114. crackerjack/monitoring/ai_agent_watchdog.py +13 -13
  115. crackerjack/monitoring/metrics_collector.py +426 -0
  116. crackerjack/monitoring/regression_prevention.py +8 -8
  117. crackerjack/monitoring/websocket_server.py +643 -0
  118. crackerjack/orchestration/advanced_orchestrator.py +11 -6
  119. crackerjack/orchestration/coverage_improvement.py +3 -3
  120. crackerjack/orchestration/execution_strategies.py +26 -6
  121. crackerjack/orchestration/test_progress_streamer.py +8 -5
  122. crackerjack/plugins/base.py +2 -2
  123. crackerjack/plugins/hooks.py +7 -0
  124. crackerjack/plugins/managers.py +11 -8
  125. crackerjack/security/__init__.py +0 -1
  126. crackerjack/security/audit.py +6 -35
  127. crackerjack/services/anomaly_detector.py +392 -0
  128. crackerjack/services/api_extractor.py +615 -0
  129. crackerjack/services/backup_service.py +2 -2
  130. crackerjack/services/bounded_status_operations.py +15 -152
  131. crackerjack/services/cache.py +127 -1
  132. crackerjack/services/changelog_automation.py +395 -0
  133. crackerjack/services/config.py +15 -9
  134. crackerjack/services/config_merge.py +19 -80
  135. crackerjack/services/config_template.py +506 -0
  136. crackerjack/services/contextual_ai_assistant.py +48 -22
  137. crackerjack/services/coverage_badge_service.py +171 -0
  138. crackerjack/services/coverage_ratchet.py +27 -25
  139. crackerjack/services/debug.py +3 -3
  140. crackerjack/services/dependency_analyzer.py +460 -0
  141. crackerjack/services/dependency_monitor.py +14 -11
  142. crackerjack/services/documentation_generator.py +491 -0
  143. crackerjack/services/documentation_service.py +675 -0
  144. crackerjack/services/enhanced_filesystem.py +6 -5
  145. crackerjack/services/enterprise_optimizer.py +865 -0
  146. crackerjack/services/error_pattern_analyzer.py +676 -0
  147. crackerjack/services/file_hasher.py +1 -1
  148. crackerjack/services/git.py +8 -25
  149. crackerjack/services/health_metrics.py +10 -8
  150. crackerjack/services/heatmap_generator.py +735 -0
  151. crackerjack/services/initialization.py +11 -30
  152. crackerjack/services/input_validator.py +5 -97
  153. crackerjack/services/intelligent_commit.py +327 -0
  154. crackerjack/services/log_manager.py +15 -12
  155. crackerjack/services/logging.py +4 -3
  156. crackerjack/services/lsp_client.py +628 -0
  157. crackerjack/services/memory_optimizer.py +19 -87
  158. crackerjack/services/metrics.py +42 -33
  159. crackerjack/services/parallel_executor.py +9 -67
  160. crackerjack/services/pattern_cache.py +1 -1
  161. crackerjack/services/pattern_detector.py +6 -6
  162. crackerjack/services/performance_benchmarks.py +18 -59
  163. crackerjack/services/performance_cache.py +20 -81
  164. crackerjack/services/performance_monitor.py +27 -95
  165. crackerjack/services/predictive_analytics.py +510 -0
  166. crackerjack/services/quality_baseline.py +234 -0
  167. crackerjack/services/quality_baseline_enhanced.py +646 -0
  168. crackerjack/services/quality_intelligence.py +785 -0
  169. crackerjack/services/regex_patterns.py +618 -524
  170. crackerjack/services/regex_utils.py +43 -123
  171. crackerjack/services/secure_path_utils.py +5 -164
  172. crackerjack/services/secure_status_formatter.py +30 -141
  173. crackerjack/services/secure_subprocess.py +11 -92
  174. crackerjack/services/security.py +9 -41
  175. crackerjack/services/security_logger.py +12 -24
  176. crackerjack/services/server_manager.py +124 -16
  177. crackerjack/services/status_authentication.py +16 -159
  178. crackerjack/services/status_security_manager.py +4 -131
  179. crackerjack/services/thread_safe_status_collector.py +19 -125
  180. crackerjack/services/unified_config.py +21 -13
  181. crackerjack/services/validation_rate_limiter.py +5 -54
  182. crackerjack/services/version_analyzer.py +459 -0
  183. crackerjack/services/version_checker.py +1 -1
  184. crackerjack/services/websocket_resource_limiter.py +10 -144
  185. crackerjack/services/zuban_lsp_service.py +390 -0
  186. crackerjack/slash_commands/__init__.py +2 -7
  187. crackerjack/slash_commands/run.md +2 -2
  188. crackerjack/tools/validate_input_validator_patterns.py +14 -40
  189. crackerjack/tools/validate_regex_patterns.py +19 -48
  190. {crackerjack-0.33.0.dist-info → crackerjack-0.33.2.dist-info}/METADATA +196 -25
  191. crackerjack-0.33.2.dist-info/RECORD +229 -0
  192. crackerjack/CLAUDE.md +0 -207
  193. crackerjack/RULES.md +0 -380
  194. crackerjack/py313.py +0 -234
  195. crackerjack-0.33.0.dist-info/RECORD +0 -187
  196. {crackerjack-0.33.0.dist-info → crackerjack-0.33.2.dist-info}/WHEEL +0 -0
  197. {crackerjack-0.33.0.dist-info → crackerjack-0.33.2.dist-info}/entry_points.txt +0 -0
  198. {crackerjack-0.33.0.dist-info → crackerjack-0.33.2.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,426 @@
1
+ """Real-time metrics collection service for unified monitoring dashboard."""
2
+
3
+ import asyncio
4
+ import logging
5
+ import time
6
+ import typing as t
7
+ from collections import deque
8
+ from collections.abc import Callable
9
+ from dataclasses import asdict, dataclass, field
10
+ from datetime import datetime, timedelta
11
+
12
+ from rich.console import Console
13
+
14
+ from crackerjack.monitoring.ai_agent_watchdog import AgentPerformanceMetrics
15
+ from crackerjack.services.cache import CrackerjackCache
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ @dataclass
21
+ class SystemMetrics:
22
+ """System-level performance metrics."""
23
+
24
+ cpu_usage: float = 0.0
25
+ memory_usage_mb: float = 0.0
26
+ disk_usage_gb: float = 0.0
27
+ active_processes: int = 0
28
+ uptime_seconds: float = 0.0
29
+
30
+ def to_dict(self) -> dict[str, t.Any]:
31
+ return asdict(self)
32
+
33
+
34
+ @dataclass
35
+ class QualityMetrics:
36
+ """Code quality and workflow metrics."""
37
+
38
+ total_issues_found: int = 0
39
+ issues_fixed: int = 0
40
+ success_rate: float = 0.0
41
+ average_confidence: float = 0.0
42
+ test_coverage: float = 0.0
43
+ complexity_violations: int = 0
44
+ security_issues: int = 0
45
+ performance_issues: int = 0
46
+
47
+ def to_dict(self) -> dict[str, t.Any]:
48
+ return asdict(self)
49
+
50
+
51
+ @dataclass
52
+ class WorkflowMetrics:
53
+ """Workflow execution and timing metrics."""
54
+
55
+ jobs_completed: int = 0
56
+ jobs_failed: int = 0
57
+ average_job_duration: float = 0.0
58
+ fastest_job_time: float = 0.0
59
+ slowest_job_time: float = 0.0
60
+ queue_depth: int = 0
61
+ throughput_per_hour: float = 0.0
62
+
63
+ def to_dict(self) -> dict[str, t.Any]:
64
+ return asdict(self)
65
+
66
+
67
+ @dataclass
68
+ class AgentMetrics:
69
+ """AI agent performance and utilization metrics."""
70
+
71
+ active_agents: int = 0
72
+ total_fixes_applied: int = 0
73
+ cache_hit_rate: float = 0.0
74
+ average_response_time: float = 0.0
75
+ agent_effectiveness: dict[str, float] = field(default_factory=dict[str, t.Any])
76
+ issue_type_distribution: dict[str, int] = field(default_factory=dict[str, t.Any])
77
+
78
+ def to_dict(self) -> dict[str, t.Any]:
79
+ return asdict(self)
80
+
81
+
82
+ @dataclass
83
+ class UnifiedDashboardMetrics:
84
+ """Combined metrics for unified dashboard display."""
85
+
86
+ timestamp: datetime = field(default_factory=datetime.now)
87
+ system: SystemMetrics = field(default_factory=SystemMetrics)
88
+ quality: QualityMetrics = field(default_factory=QualityMetrics)
89
+ workflow: WorkflowMetrics = field(default_factory=WorkflowMetrics)
90
+ agents: AgentMetrics = field(default_factory=AgentMetrics)
91
+
92
+ def to_dict(self) -> dict[str, t.Any]:
93
+ return {
94
+ "timestamp": self.timestamp.isoformat(),
95
+ "system": self.system.to_dict(),
96
+ "quality": self.quality.to_dict(),
97
+ "workflow": self.workflow.to_dict(),
98
+ "agents": self.agents.to_dict(),
99
+ }
100
+
101
+
102
+ class MetricsCollector:
103
+ """
104
+ Real-time metrics collection service for unified monitoring dashboard.
105
+
106
+ Collects and aggregates metrics from multiple sources:
107
+ - System performance (CPU, memory, disk)
108
+ - Code quality (issues, coverage, complexity)
109
+ - Workflow execution (jobs, timing, throughput)
110
+ - AI agent performance (fixes, confidence, cache hits)
111
+ """
112
+
113
+ def __init__(self, cache: CrackerjackCache | None = None):
114
+ self.console = Console()
115
+ self.cache = cache or CrackerjackCache()
116
+
117
+ self.is_collecting = False
118
+ self.collection_interval = 5.0 # seconds
119
+ self.history_size = 100
120
+
121
+ # Metrics storage
122
+ self.metrics_history: deque[UnifiedDashboardMetrics] = deque(
123
+ maxlen=self.history_size
124
+ )
125
+ self.current_metrics = UnifiedDashboardMetrics()
126
+
127
+ # Agent performance tracking
128
+ self.agent_metrics: dict[str, AgentPerformanceMetrics] = {}
129
+
130
+ # Workflow tracking
131
+ self.job_start_times: dict[str, float] = {}
132
+ self.job_durations: list[float] = []
133
+
134
+ # Collection tasks
135
+ self.collection_task: asyncio.Task[t.Any] | None = None
136
+ self.listeners: list[Callable[[UnifiedDashboardMetrics], None]] = []
137
+
138
+ async def start_collection(self) -> None:
139
+ """Start the metrics collection service."""
140
+ if self.is_collecting:
141
+ logger.warning("Metrics collection already running")
142
+ return
143
+
144
+ self.is_collecting = True
145
+ self.collection_task = asyncio.create_task(self._collection_loop())
146
+
147
+ logger.info("🔍 Metrics collection started")
148
+
149
+ async def stop_collection(self) -> None:
150
+ """Stop the metrics collection service."""
151
+ self.is_collecting = False
152
+
153
+ if self.collection_task:
154
+ self.collection_task.cancel()
155
+ try:
156
+ await self.collection_task
157
+ except asyncio.CancelledError:
158
+ pass
159
+
160
+ logger.info("🔍 Metrics collection stopped")
161
+
162
+ def add_metrics_listener(
163
+ self, callback: Callable[[UnifiedDashboardMetrics], None]
164
+ ) -> None:
165
+ """Add a callback to be notified of new metrics."""
166
+ self.listeners.append(callback)
167
+
168
+ def remove_metrics_listener(
169
+ self, callback: Callable[[UnifiedDashboardMetrics], None]
170
+ ) -> None:
171
+ """Remove a metrics callback."""
172
+ if callback in self.listeners:
173
+ self.listeners.remove(callback)
174
+
175
+ async def _collection_loop(self) -> None:
176
+ """Main metrics collection loop."""
177
+ while self.is_collecting:
178
+ try:
179
+ await self._collect_all_metrics()
180
+ await asyncio.sleep(self.collection_interval)
181
+ except asyncio.CancelledError:
182
+ break
183
+ except Exception as e:
184
+ logger.error(f"Error in metrics collection: {e}")
185
+ await asyncio.sleep(self.collection_interval)
186
+
187
+ async def _collect_all_metrics(self) -> None:
188
+ """Collect all metrics from various sources."""
189
+ # Collect system metrics
190
+ system_metrics = await self._collect_system_metrics()
191
+
192
+ # Collect quality metrics
193
+ quality_metrics = await self._collect_quality_metrics()
194
+
195
+ # Collect workflow metrics
196
+ workflow_metrics = await self._collect_workflow_metrics()
197
+
198
+ # Collect agent metrics
199
+ agent_metrics = await self._collect_agent_metrics()
200
+
201
+ # Create unified metrics
202
+ unified_metrics = UnifiedDashboardMetrics(
203
+ system=system_metrics,
204
+ quality=quality_metrics,
205
+ workflow=workflow_metrics,
206
+ agents=agent_metrics,
207
+ )
208
+
209
+ # Store metrics
210
+ self.current_metrics = unified_metrics
211
+ self.metrics_history.append(unified_metrics)
212
+
213
+ # Notify listeners
214
+ for listener in self.listeners:
215
+ try:
216
+ listener(unified_metrics)
217
+ except Exception as e:
218
+ logger.error(f"Error in metrics listener: {e}")
219
+
220
+ async def _collect_system_metrics(self) -> SystemMetrics:
221
+ """Collect system performance metrics."""
222
+ try:
223
+ import psutil
224
+
225
+ cpu_usage = psutil.cpu_percent(interval=None)
226
+ memory = psutil.virtual_memory()
227
+ memory_mb = memory.used / (1024 * 1024)
228
+ disk = psutil.disk_usage("/")
229
+ disk_gb = disk.used / (1024 * 1024 * 1024)
230
+
231
+ return SystemMetrics(
232
+ cpu_usage=cpu_usage,
233
+ memory_usage_mb=memory_mb,
234
+ disk_usage_gb=disk_gb,
235
+ active_processes=len(psutil.pids()),
236
+ uptime_seconds=time.time() - psutil.boot_time(),
237
+ )
238
+ except ImportError:
239
+ # Fallback if psutil not available
240
+ return SystemMetrics()
241
+ except Exception as e:
242
+ logger.error(f"Error collecting system metrics: {e}")
243
+ return SystemMetrics()
244
+
245
+ async def _collect_quality_metrics(self) -> QualityMetrics:
246
+ """Collect code quality metrics."""
247
+ try:
248
+ # Try to get metrics from cache or recent runs
249
+ quality_data = self.cache.get("quality_metrics", {})
250
+
251
+ return QualityMetrics(
252
+ total_issues_found=quality_data.get("total_issues", 0),
253
+ issues_fixed=quality_data.get("issues_fixed", 0),
254
+ success_rate=quality_data.get("success_rate", 0.0),
255
+ average_confidence=quality_data.get("avg_confidence", 0.0),
256
+ test_coverage=quality_data.get("test_coverage", 0.0),
257
+ complexity_violations=quality_data.get("complexity_violations", 0),
258
+ security_issues=quality_data.get("security_issues", 0),
259
+ performance_issues=quality_data.get("performance_issues", 0),
260
+ )
261
+ except Exception as e:
262
+ logger.error(f"Error collecting quality metrics: {e}")
263
+ return QualityMetrics()
264
+
265
+ async def _collect_workflow_metrics(self) -> WorkflowMetrics:
266
+ """Collect workflow execution metrics."""
267
+ try:
268
+ workflow_data = self.cache.get("workflow_metrics", {})
269
+
270
+ # Calculate throughput
271
+ recent_jobs = len([d for d in self.job_durations if d > 0])
272
+ throughput = recent_jobs / max(
273
+ 1, self.collection_interval / 3600
274
+ ) # per hour
275
+
276
+ return WorkflowMetrics(
277
+ jobs_completed=workflow_data.get("jobs_completed", 0),
278
+ jobs_failed=workflow_data.get("jobs_failed", 0),
279
+ average_job_duration=sum(self.job_durations)
280
+ / max(1, len(self.job_durations)),
281
+ fastest_job_time=min(self.job_durations) if self.job_durations else 0.0,
282
+ slowest_job_time=max(self.job_durations) if self.job_durations else 0.0,
283
+ queue_depth=workflow_data.get("queue_depth", 0),
284
+ throughput_per_hour=throughput,
285
+ )
286
+ except Exception as e:
287
+ logger.error(f"Error collecting workflow metrics: {e}")
288
+ return WorkflowMetrics()
289
+
290
+ async def _collect_agent_metrics(self) -> AgentMetrics:
291
+ """Collect AI agent performance metrics."""
292
+ try:
293
+ agent_data = self.cache.get("agent_metrics", {})
294
+
295
+ # Calculate agent effectiveness
296
+ effectiveness = {}
297
+ issue_distribution: dict[str, int] = {}
298
+ total_fixes = 0
299
+ total_response_time = 0.0
300
+
301
+ for agent_name, metrics in self.agent_metrics.items():
302
+ if metrics.total_issues_handled > 0:
303
+ success_rate = (
304
+ metrics.successful_fixes / metrics.total_issues_handled
305
+ )
306
+ effectiveness[agent_name] = success_rate
307
+ total_fixes += metrics.successful_fixes
308
+ total_response_time += metrics.average_execution_time
309
+
310
+ for issue_type, count in metrics.issue_types_handled.items():
311
+ issue_distribution[issue_type.value] = (
312
+ issue_distribution.get(issue_type.value, 0) + count
313
+ )
314
+
315
+ # Calculate cache hit rate from agent usage
316
+ cache_stats = agent_data.get("cache_stats", {})
317
+ cache_hits = cache_stats.get("hits", 0)
318
+ cache_misses = cache_stats.get("misses", 0)
319
+ cache_hit_rate = cache_hits / max(1, cache_hits + cache_misses)
320
+
321
+ return AgentMetrics(
322
+ active_agents=len(self.agent_metrics),
323
+ total_fixes_applied=total_fixes,
324
+ cache_hit_rate=cache_hit_rate,
325
+ average_response_time=total_response_time
326
+ / max(1, len(self.agent_metrics)),
327
+ agent_effectiveness=effectiveness,
328
+ issue_type_distribution=issue_distribution,
329
+ )
330
+ except Exception as e:
331
+ logger.error(f"Error collecting agent metrics: {e}")
332
+ return AgentMetrics()
333
+
334
+ # Integration methods for external systems
335
+
336
+ def record_job_start(self, job_id: str) -> None:
337
+ """Record the start of a job for timing."""
338
+ self.job_start_times[job_id] = time.time()
339
+
340
+ def record_job_completion(self, job_id: str, success: bool = True) -> None:
341
+ """Record job completion and calculate duration."""
342
+ if job_id in self.job_start_times:
343
+ duration = time.time() - self.job_start_times[job_id]
344
+ self.job_durations.append(duration)
345
+ del self.job_start_times[job_id]
346
+
347
+ # Keep only recent job durations
348
+ if len(self.job_durations) > self.history_size:
349
+ self.job_durations = self.job_durations[-self.history_size :]
350
+
351
+ # Update cache
352
+ workflow_data = self.cache.get("workflow_metrics", {})
353
+ if success:
354
+ workflow_data["jobs_completed"] = (
355
+ workflow_data.get("jobs_completed", 0) + 1
356
+ )
357
+ else:
358
+ workflow_data["jobs_failed"] = workflow_data.get("jobs_failed", 0) + 1
359
+ self.cache.set("workflow_metrics", workflow_data)
360
+
361
+ def record_agent_performance(
362
+ self, agent_name: str, metrics: AgentPerformanceMetrics
363
+ ) -> None:
364
+ """Record agent performance metrics."""
365
+ self.agent_metrics[agent_name] = metrics
366
+
367
+ def record_quality_data(
368
+ self, issues_found: int, issues_fixed: int, coverage: float, success_rate: float
369
+ ) -> None:
370
+ """Record quality metrics from a run."""
371
+ quality_data = {
372
+ "total_issues": issues_found,
373
+ "issues_fixed": issues_fixed,
374
+ "test_coverage": coverage,
375
+ "success_rate": success_rate,
376
+ "timestamp": datetime.now().isoformat(),
377
+ }
378
+ self.cache.set("quality_metrics", quality_data)
379
+
380
+ def get_current_metrics(self) -> UnifiedDashboardMetrics:
381
+ """Get the current metrics snapshot."""
382
+ return self.current_metrics
383
+
384
+ def get_metrics_history(self, hours: int = 1) -> list[UnifiedDashboardMetrics]:
385
+ """Get metrics history for the specified number of hours."""
386
+ cutoff = datetime.now() - timedelta(hours=hours)
387
+ return [m for m in self.metrics_history if m.timestamp > cutoff]
388
+
389
+ def get_metrics_summary(self) -> dict[str, t.Any]:
390
+ """Get a summary of current metrics for display."""
391
+ metrics = self.current_metrics
392
+
393
+ return {
394
+ "system": {
395
+ "cpu": f"{metrics.system.cpu_usage:.1f}%",
396
+ "memory": f"{metrics.system.memory_usage_mb:.0f}MB",
397
+ "disk": f"{metrics.system.disk_usage_gb:.1f}GB",
398
+ "uptime": self._format_uptime(metrics.system.uptime_seconds),
399
+ },
400
+ "quality": {
401
+ "success_rate": f"{metrics.quality.success_rate:.1%}",
402
+ "issues_fixed": metrics.quality.issues_fixed,
403
+ "test_coverage": f"{metrics.quality.test_coverage:.1%}",
404
+ "avg_confidence": f"{metrics.quality.average_confidence:.2f}",
405
+ },
406
+ "workflow": {
407
+ "jobs_completed": metrics.workflow.jobs_completed,
408
+ "avg_duration": f"{metrics.workflow.average_job_duration:.1f}s",
409
+ "throughput": f"{metrics.workflow.throughput_per_hour:.1f}/h",
410
+ "queue_depth": metrics.workflow.queue_depth,
411
+ },
412
+ "agents": {
413
+ "active_agents": metrics.agents.active_agents,
414
+ "total_fixes": metrics.agents.total_fixes_applied,
415
+ "cache_hit_rate": f"{metrics.agents.cache_hit_rate:.1%}",
416
+ "avg_response": f"{metrics.agents.average_response_time:.1f}s",
417
+ },
418
+ }
419
+
420
+ def _format_uptime(self, seconds: float) -> str:
421
+ """Format uptime in human-readable format."""
422
+ if seconds < 3600:
423
+ return f"{seconds / 60:.0f}m"
424
+ elif seconds < 86400:
425
+ return f"{seconds / 3600:.0f}h"
426
+ return f"{seconds / 86400:.0f}d"
@@ -48,7 +48,7 @@ class RegressionPreventionSystem:
48
48
  self._initialize_known_patterns()
49
49
  self._load_patterns_from_file()
50
50
 
51
- def _initialize_known_patterns(self):
51
+ def _initialize_known_patterns(self) -> None:
52
52
  self.register_regression_pattern(
53
53
  pattern_id="detect_agent_needs_complexity_22",
54
54
  name="detect_agent_needs Complexity Failure",
@@ -162,7 +162,7 @@ class RegressionPreventionSystem:
162
162
  agent_name: str,
163
163
  issue_type: IssueType,
164
164
  test_cases: list[dict[str, Any]] | None = None,
165
- ):
165
+ ) -> None:
166
166
  pattern = RegressionPattern(
167
167
  pattern_id=pattern_id,
168
168
  name=name,
@@ -313,7 +313,7 @@ class RegressionPreventionSystem:
313
313
  return "critical"
314
314
  return "error"
315
315
 
316
- async def _handle_regression_alert(self, alert: RegressionAlert):
316
+ async def _handle_regression_alert(self, alert: RegressionAlert) -> None:
317
317
  color = {"warning": "yellow", "error": "red", "critical": "bold red"}.get(
318
318
  alert.severity, "white"
319
319
  )
@@ -345,7 +345,7 @@ class RegressionPreventionSystem:
345
345
 
346
346
  self._log_critical_regression(alert)
347
347
 
348
- def _log_critical_regression(self, alert: RegressionAlert):
348
+ def _log_critical_regression(self, alert: RegressionAlert) -> None:
349
349
  log_file = Path(".crackerjack") / "critical_regressions.log"
350
350
  log_file.parent.mkdir(exist_ok=True)
351
351
 
@@ -501,7 +501,7 @@ class RegressionPreventionSystem:
501
501
  cutoff = datetime.now() - timedelta(hours=hours)
502
502
  return [alert for alert in self.regression_alerts if alert.detected_at > cutoff]
503
503
 
504
- def _save_patterns_to_file(self):
504
+ def _save_patterns_to_file(self) -> None:
505
505
  patterns_file = Path(".crackerjack") / "regression_patterns.json"
506
506
  patterns_file.parent.mkdir(exist_ok=True)
507
507
 
@@ -523,16 +523,16 @@ class RegressionPreventionSystem:
523
523
  },
524
524
  }
525
525
 
526
- with open(patterns_file, "w") as f:
526
+ with patterns_file.open("w") as f:
527
527
  json.dump(data, f, indent=2)
528
528
 
529
- def _load_patterns_from_file(self):
529
+ def _load_patterns_from_file(self) -> None:
530
530
  patterns_file = Path(".crackerjack") / "regression_patterns.json"
531
531
  if not patterns_file.exists():
532
532
  return
533
533
 
534
534
  try:
535
- with open(patterns_file) as f:
535
+ with patterns_file.open() as f:
536
536
  data = json.load(f)
537
537
 
538
538
  for pid, pdata in data.get("patterns", {}).items():