crackerjack 0.32.0__py3-none-any.whl → 0.33.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

Files changed (200) hide show
  1. crackerjack/__main__.py +1350 -34
  2. crackerjack/adapters/__init__.py +17 -0
  3. crackerjack/adapters/lsp_client.py +358 -0
  4. crackerjack/adapters/rust_tool_adapter.py +194 -0
  5. crackerjack/adapters/rust_tool_manager.py +193 -0
  6. crackerjack/adapters/skylos_adapter.py +231 -0
  7. crackerjack/adapters/zuban_adapter.py +560 -0
  8. crackerjack/agents/base.py +7 -3
  9. crackerjack/agents/coordinator.py +271 -33
  10. crackerjack/agents/documentation_agent.py +9 -15
  11. crackerjack/agents/dry_agent.py +3 -15
  12. crackerjack/agents/formatting_agent.py +1 -1
  13. crackerjack/agents/import_optimization_agent.py +36 -180
  14. crackerjack/agents/performance_agent.py +17 -98
  15. crackerjack/agents/performance_helpers.py +7 -31
  16. crackerjack/agents/proactive_agent.py +1 -3
  17. crackerjack/agents/refactoring_agent.py +16 -85
  18. crackerjack/agents/refactoring_helpers.py +7 -42
  19. crackerjack/agents/security_agent.py +9 -48
  20. crackerjack/agents/test_creation_agent.py +356 -513
  21. crackerjack/agents/test_specialist_agent.py +0 -4
  22. crackerjack/api.py +6 -25
  23. crackerjack/cli/cache_handlers.py +204 -0
  24. crackerjack/cli/cache_handlers_enhanced.py +683 -0
  25. crackerjack/cli/facade.py +100 -0
  26. crackerjack/cli/handlers.py +224 -9
  27. crackerjack/cli/interactive.py +6 -4
  28. crackerjack/cli/options.py +642 -55
  29. crackerjack/cli/utils.py +2 -1
  30. crackerjack/code_cleaner.py +58 -117
  31. crackerjack/config/global_lock_config.py +8 -48
  32. crackerjack/config/hooks.py +53 -62
  33. crackerjack/core/async_workflow_orchestrator.py +24 -34
  34. crackerjack/core/autofix_coordinator.py +3 -17
  35. crackerjack/core/enhanced_container.py +64 -6
  36. crackerjack/core/file_lifecycle.py +12 -89
  37. crackerjack/core/performance.py +2 -2
  38. crackerjack/core/performance_monitor.py +15 -55
  39. crackerjack/core/phase_coordinator.py +257 -218
  40. crackerjack/core/resource_manager.py +14 -90
  41. crackerjack/core/service_watchdog.py +62 -95
  42. crackerjack/core/session_coordinator.py +149 -0
  43. crackerjack/core/timeout_manager.py +14 -72
  44. crackerjack/core/websocket_lifecycle.py +13 -78
  45. crackerjack/core/workflow_orchestrator.py +558 -240
  46. crackerjack/docs/INDEX.md +11 -0
  47. crackerjack/docs/generated/api/API_REFERENCE.md +10895 -0
  48. crackerjack/docs/generated/api/CLI_REFERENCE.md +109 -0
  49. crackerjack/docs/generated/api/CROSS_REFERENCES.md +1755 -0
  50. crackerjack/docs/generated/api/PROTOCOLS.md +3 -0
  51. crackerjack/docs/generated/api/SERVICES.md +1252 -0
  52. crackerjack/documentation/__init__.py +31 -0
  53. crackerjack/documentation/ai_templates.py +756 -0
  54. crackerjack/documentation/dual_output_generator.py +765 -0
  55. crackerjack/documentation/mkdocs_integration.py +518 -0
  56. crackerjack/documentation/reference_generator.py +977 -0
  57. crackerjack/dynamic_config.py +55 -50
  58. crackerjack/executors/async_hook_executor.py +10 -15
  59. crackerjack/executors/cached_hook_executor.py +117 -43
  60. crackerjack/executors/hook_executor.py +8 -34
  61. crackerjack/executors/hook_lock_manager.py +26 -183
  62. crackerjack/executors/individual_hook_executor.py +13 -11
  63. crackerjack/executors/lsp_aware_hook_executor.py +270 -0
  64. crackerjack/executors/tool_proxy.py +417 -0
  65. crackerjack/hooks/lsp_hook.py +79 -0
  66. crackerjack/intelligence/adaptive_learning.py +25 -10
  67. crackerjack/intelligence/agent_orchestrator.py +2 -5
  68. crackerjack/intelligence/agent_registry.py +34 -24
  69. crackerjack/intelligence/agent_selector.py +5 -7
  70. crackerjack/interactive.py +17 -6
  71. crackerjack/managers/async_hook_manager.py +0 -1
  72. crackerjack/managers/hook_manager.py +79 -1
  73. crackerjack/managers/publish_manager.py +66 -13
  74. crackerjack/managers/test_command_builder.py +5 -17
  75. crackerjack/managers/test_executor.py +1 -3
  76. crackerjack/managers/test_manager.py +109 -7
  77. crackerjack/managers/test_manager_backup.py +10 -9
  78. crackerjack/mcp/cache.py +2 -2
  79. crackerjack/mcp/client_runner.py +1 -1
  80. crackerjack/mcp/context.py +191 -68
  81. crackerjack/mcp/dashboard.py +7 -5
  82. crackerjack/mcp/enhanced_progress_monitor.py +31 -28
  83. crackerjack/mcp/file_monitor.py +30 -23
  84. crackerjack/mcp/progress_components.py +31 -21
  85. crackerjack/mcp/progress_monitor.py +50 -53
  86. crackerjack/mcp/rate_limiter.py +6 -6
  87. crackerjack/mcp/server_core.py +161 -32
  88. crackerjack/mcp/service_watchdog.py +2 -1
  89. crackerjack/mcp/state.py +4 -7
  90. crackerjack/mcp/task_manager.py +11 -9
  91. crackerjack/mcp/tools/core_tools.py +174 -33
  92. crackerjack/mcp/tools/error_analyzer.py +3 -2
  93. crackerjack/mcp/tools/execution_tools.py +15 -12
  94. crackerjack/mcp/tools/execution_tools_backup.py +42 -30
  95. crackerjack/mcp/tools/intelligence_tool_registry.py +7 -5
  96. crackerjack/mcp/tools/intelligence_tools.py +5 -2
  97. crackerjack/mcp/tools/monitoring_tools.py +33 -70
  98. crackerjack/mcp/tools/proactive_tools.py +24 -11
  99. crackerjack/mcp/tools/progress_tools.py +5 -8
  100. crackerjack/mcp/tools/utility_tools.py +20 -14
  101. crackerjack/mcp/tools/workflow_executor.py +62 -40
  102. crackerjack/mcp/websocket/app.py +8 -0
  103. crackerjack/mcp/websocket/endpoints.py +352 -357
  104. crackerjack/mcp/websocket/jobs.py +40 -57
  105. crackerjack/mcp/websocket/monitoring_endpoints.py +2935 -0
  106. crackerjack/mcp/websocket/server.py +7 -25
  107. crackerjack/mcp/websocket/websocket_handler.py +6 -17
  108. crackerjack/mixins/__init__.py +3 -0
  109. crackerjack/mixins/error_handling.py +145 -0
  110. crackerjack/models/config.py +21 -1
  111. crackerjack/models/config_adapter.py +49 -1
  112. crackerjack/models/protocols.py +176 -107
  113. crackerjack/models/resource_protocols.py +55 -210
  114. crackerjack/models/task.py +3 -0
  115. crackerjack/monitoring/ai_agent_watchdog.py +13 -13
  116. crackerjack/monitoring/metrics_collector.py +426 -0
  117. crackerjack/monitoring/regression_prevention.py +8 -8
  118. crackerjack/monitoring/websocket_server.py +643 -0
  119. crackerjack/orchestration/advanced_orchestrator.py +11 -6
  120. crackerjack/orchestration/coverage_improvement.py +3 -3
  121. crackerjack/orchestration/execution_strategies.py +26 -6
  122. crackerjack/orchestration/test_progress_streamer.py +8 -5
  123. crackerjack/plugins/base.py +2 -2
  124. crackerjack/plugins/hooks.py +7 -0
  125. crackerjack/plugins/managers.py +11 -8
  126. crackerjack/security/__init__.py +0 -1
  127. crackerjack/security/audit.py +90 -105
  128. crackerjack/services/anomaly_detector.py +392 -0
  129. crackerjack/services/api_extractor.py +615 -0
  130. crackerjack/services/backup_service.py +2 -2
  131. crackerjack/services/bounded_status_operations.py +15 -152
  132. crackerjack/services/cache.py +127 -1
  133. crackerjack/services/changelog_automation.py +395 -0
  134. crackerjack/services/config.py +18 -11
  135. crackerjack/services/config_merge.py +30 -85
  136. crackerjack/services/config_template.py +506 -0
  137. crackerjack/services/contextual_ai_assistant.py +48 -22
  138. crackerjack/services/coverage_badge_service.py +171 -0
  139. crackerjack/services/coverage_ratchet.py +41 -17
  140. crackerjack/services/debug.py +3 -3
  141. crackerjack/services/dependency_analyzer.py +460 -0
  142. crackerjack/services/dependency_monitor.py +14 -11
  143. crackerjack/services/documentation_generator.py +491 -0
  144. crackerjack/services/documentation_service.py +675 -0
  145. crackerjack/services/enhanced_filesystem.py +6 -5
  146. crackerjack/services/enterprise_optimizer.py +865 -0
  147. crackerjack/services/error_pattern_analyzer.py +676 -0
  148. crackerjack/services/file_hasher.py +1 -1
  149. crackerjack/services/git.py +41 -45
  150. crackerjack/services/health_metrics.py +10 -8
  151. crackerjack/services/heatmap_generator.py +735 -0
  152. crackerjack/services/initialization.py +30 -33
  153. crackerjack/services/input_validator.py +5 -97
  154. crackerjack/services/intelligent_commit.py +327 -0
  155. crackerjack/services/log_manager.py +15 -12
  156. crackerjack/services/logging.py +4 -3
  157. crackerjack/services/lsp_client.py +628 -0
  158. crackerjack/services/memory_optimizer.py +409 -0
  159. crackerjack/services/metrics.py +42 -33
  160. crackerjack/services/parallel_executor.py +416 -0
  161. crackerjack/services/pattern_cache.py +1 -1
  162. crackerjack/services/pattern_detector.py +6 -6
  163. crackerjack/services/performance_benchmarks.py +250 -576
  164. crackerjack/services/performance_cache.py +382 -0
  165. crackerjack/services/performance_monitor.py +565 -0
  166. crackerjack/services/predictive_analytics.py +510 -0
  167. crackerjack/services/quality_baseline.py +234 -0
  168. crackerjack/services/quality_baseline_enhanced.py +646 -0
  169. crackerjack/services/quality_intelligence.py +785 -0
  170. crackerjack/services/regex_patterns.py +605 -524
  171. crackerjack/services/regex_utils.py +43 -123
  172. crackerjack/services/secure_path_utils.py +5 -164
  173. crackerjack/services/secure_status_formatter.py +30 -141
  174. crackerjack/services/secure_subprocess.py +11 -92
  175. crackerjack/services/security.py +61 -30
  176. crackerjack/services/security_logger.py +18 -22
  177. crackerjack/services/server_manager.py +124 -16
  178. crackerjack/services/status_authentication.py +16 -159
  179. crackerjack/services/status_security_manager.py +4 -131
  180. crackerjack/services/terminal_utils.py +0 -0
  181. crackerjack/services/thread_safe_status_collector.py +19 -125
  182. crackerjack/services/unified_config.py +21 -13
  183. crackerjack/services/validation_rate_limiter.py +5 -54
  184. crackerjack/services/version_analyzer.py +459 -0
  185. crackerjack/services/version_checker.py +1 -1
  186. crackerjack/services/websocket_resource_limiter.py +10 -144
  187. crackerjack/services/zuban_lsp_service.py +390 -0
  188. crackerjack/slash_commands/__init__.py +2 -7
  189. crackerjack/slash_commands/run.md +2 -2
  190. crackerjack/tools/validate_input_validator_patterns.py +14 -40
  191. crackerjack/tools/validate_regex_patterns.py +19 -48
  192. {crackerjack-0.32.0.dist-info → crackerjack-0.33.1.dist-info}/METADATA +197 -26
  193. crackerjack-0.33.1.dist-info/RECORD +229 -0
  194. crackerjack/CLAUDE.md +0 -207
  195. crackerjack/RULES.md +0 -380
  196. crackerjack/py313.py +0 -234
  197. crackerjack-0.32.0.dist-info/RECORD +0 -180
  198. {crackerjack-0.32.0.dist-info → crackerjack-0.33.1.dist-info}/WHEEL +0 -0
  199. {crackerjack-0.32.0.dist-info → crackerjack-0.33.1.dist-info}/entry_points.txt +0 -0
  200. {crackerjack-0.32.0.dist-info → crackerjack-0.33.1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,735 @@
1
+ """Heat map visualization generator for error patterns and code quality metrics."""
2
+
3
+ import json
4
+ import logging
5
+ import typing as t
6
+ from collections import defaultdict
7
+ from dataclasses import dataclass, field
8
+ from datetime import datetime, timedelta
9
+ from pathlib import Path
10
+
11
+ from .dependency_analyzer import DependencyGraph
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ @dataclass
17
+ class HeatMapCell:
18
+ """Individual cell in a heat map."""
19
+
20
+ x: int
21
+ y: int
22
+ value: float
23
+ label: str
24
+ metadata: dict[str, t.Any] = field(default_factory=dict[str, t.Any])
25
+ color_intensity: float = 0.0 # 0.0 to 1.0
26
+
27
+
28
+ @dataclass
29
+ class HeatMapData:
30
+ """Complete heat map data structure."""
31
+
32
+ title: str
33
+ cells: list[HeatMapCell]
34
+ x_labels: list[str]
35
+ y_labels: list[str]
36
+ color_scale: dict[str, t.Any]
37
+ metadata: dict[str, t.Any] = field(default_factory=dict[str, t.Any])
38
+ generated_at: datetime = field(default_factory=datetime.now)
39
+
40
+ def to_dict(self) -> dict[str, t.Any]:
41
+ """Convert to dictionary for JSON serialization."""
42
+ return {
43
+ "title": self.title,
44
+ "cells": [
45
+ {
46
+ "x": cell.x,
47
+ "y": cell.y,
48
+ "value": cell.value,
49
+ "label": cell.label,
50
+ "color_intensity": cell.color_intensity,
51
+ "metadata": cell.metadata,
52
+ }
53
+ for cell in self.cells
54
+ ],
55
+ "x_labels": self.x_labels,
56
+ "y_labels": self.y_labels,
57
+ "color_scale": self.color_scale,
58
+ "metadata": self.metadata,
59
+ "generated_at": self.generated_at.isoformat(),
60
+ }
61
+
62
+
63
+ class HeatMapGenerator:
64
+ """Generates heat map visualizations for various code quality metrics."""
65
+
66
+ def __init__(self) -> None:
67
+ """Initialize heat map generator."""
68
+ self.error_data: dict[str, list[dict[str, t.Any]]] = defaultdict(list)
69
+ self.metric_data: dict[str, dict[str, t.Any]] = {}
70
+
71
+ # Color schemes
72
+ self.color_schemes = {
73
+ "error_intensity": {
74
+ "low": "#90EE90", # Light green
75
+ "medium": "#FFD700", # Gold
76
+ "high": "#FF6347", # Tomato
77
+ "critical": "#DC143C", # Crimson
78
+ },
79
+ "quality_score": {
80
+ "excellent": "#228B22", # Forest green
81
+ "good": "#32CD32", # Lime green
82
+ "average": "#FFD700", # Gold
83
+ "poor": "#FF6347", # Tomato
84
+ "critical": "#DC143C", # Crimson
85
+ },
86
+ "complexity": {
87
+ "simple": "#E6F3FF", # Very light blue
88
+ "moderate": "#B3D9FF", # Light blue
89
+ "complex": "#80BFFF", # Medium blue
90
+ "very_complex": "#4D9FFF", # Dark blue
91
+ "extremely_complex": "#1A5CFF", # Very dark blue
92
+ },
93
+ }
94
+
95
+ def add_error_data(
96
+ self,
97
+ file_path: str,
98
+ line_number: int,
99
+ error_type: str,
100
+ severity: str,
101
+ timestamp: datetime | None = None,
102
+ metadata: dict[str, t.Any] | None = None,
103
+ ) -> None:
104
+ """Add error data for heat map generation."""
105
+ if timestamp is None:
106
+ timestamp = datetime.now()
107
+
108
+ error_record = {
109
+ "file_path": file_path,
110
+ "line_number": line_number,
111
+ "error_type": error_type,
112
+ "severity": severity,
113
+ "timestamp": timestamp,
114
+ "metadata": metadata or {},
115
+ }
116
+
117
+ self.error_data[file_path].append(error_record)
118
+
119
+ def add_metric_data(
120
+ self,
121
+ identifier: str,
122
+ metrics: dict[str, float],
123
+ metadata: dict[str, t.Any] | None = None,
124
+ ) -> None:
125
+ """Add metric data for heat map generation."""
126
+ self.metric_data[identifier] = {
127
+ "metrics": metrics,
128
+ "metadata": metadata or {},
129
+ "timestamp": datetime.now(),
130
+ }
131
+
132
+ def generate_error_frequency_heatmap(
133
+ self,
134
+ time_window: timedelta = timedelta(days=7),
135
+ granularity: str = "hourly", # hourly, daily, weekly
136
+ ) -> HeatMapData:
137
+ """Generate heat map showing error frequency patterns over time."""
138
+ now = datetime.now()
139
+ start_time = now - time_window
140
+
141
+ bucket_config = self._get_time_bucket_config(time_window, granularity)
142
+ file_paths = list[t.Any](self.error_data.keys())
143
+ time_buckets = self._create_time_buckets(start_time, bucket_config)
144
+ error_matrix = self._build_error_matrix(
145
+ start_time, now, time_buckets, bucket_config
146
+ )
147
+ cells = self._create_frequency_cells(file_paths, time_buckets, error_matrix)
148
+ labels = self._create_frequency_labels(
149
+ file_paths, time_buckets, bucket_config["format"]
150
+ )
151
+
152
+ return HeatMapData(
153
+ title=f"Error Frequency Heat Map ({granularity.title()})",
154
+ cells=cells,
155
+ x_labels=labels[0],
156
+ y_labels=labels[1],
157
+ color_scale=self.color_schemes["error_intensity"],
158
+ metadata={
159
+ "granularity": granularity,
160
+ "time_window_days": time_window.days,
161
+ "max_errors": self._calculate_max_errors(error_matrix),
162
+ "total_files": len(file_paths),
163
+ },
164
+ )
165
+
166
+ def _get_time_bucket_config(
167
+ self, time_window: timedelta, granularity: str
168
+ ) -> dict[str, t.Any]:
169
+ """Get time bucket configuration based on granularity."""
170
+ if granularity == "hourly":
171
+ return {
172
+ "count": int(time_window.total_seconds() / 3600),
173
+ "size": timedelta(hours=1),
174
+ "format": "%H:%M",
175
+ }
176
+ elif granularity == "daily":
177
+ return {
178
+ "count": time_window.days,
179
+ "size": timedelta(days=1),
180
+ "format": "%m/%d",
181
+ }
182
+ # weekly
183
+ return {
184
+ "count": max(1, time_window.days // 7),
185
+ "size": timedelta(weeks=1),
186
+ "format": "Week %W",
187
+ }
188
+
189
+ def _create_time_buckets(
190
+ self, start_time: datetime, bucket_config: dict[str, t.Any]
191
+ ) -> list[datetime]:
192
+ """Create time buckets for the heatmap."""
193
+ time_buckets = []
194
+ current_time = start_time
195
+ for _ in range(bucket_config["count"]):
196
+ time_buckets.append(current_time)
197
+ current_time += bucket_config["size"]
198
+ return time_buckets
199
+
200
+ def _build_error_matrix(
201
+ self,
202
+ start_time: datetime,
203
+ end_time: datetime,
204
+ time_buckets: list[datetime],
205
+ bucket_config: dict[str, t.Any],
206
+ ) -> dict[str, t.Any]:
207
+ """Build error count matrix for files and time buckets."""
208
+ error_matrix: dict[str, dict[int, int]] = defaultdict(lambda: defaultdict(int))
209
+
210
+ for file_path, errors in self.error_data.items():
211
+ for error in errors:
212
+ error_time = error["timestamp"]
213
+ if start_time <= error_time <= end_time:
214
+ bucket_index = self._find_time_bucket_index(
215
+ error_time, start_time, time_buckets, bucket_config["size"]
216
+ )
217
+ error_matrix[file_path][bucket_index] += 1
218
+
219
+ return error_matrix
220
+
221
+ def _find_time_bucket_index(
222
+ self,
223
+ error_time: datetime,
224
+ start_time: datetime,
225
+ time_buckets: list[datetime],
226
+ bucket_size: timedelta,
227
+ ) -> int:
228
+ """Find the appropriate time bucket index for an error."""
229
+ return min(
230
+ len(time_buckets) - 1,
231
+ int(
232
+ (error_time - start_time).total_seconds() / bucket_size.total_seconds()
233
+ ),
234
+ )
235
+
236
+ def _create_frequency_cells(
237
+ self,
238
+ file_paths: list[str],
239
+ time_buckets: list[datetime],
240
+ error_matrix: dict[str, t.Any],
241
+ ) -> list[HeatMapCell]:
242
+ """Create heat map cells from error frequency data."""
243
+ cells = []
244
+ max_errors = self._calculate_max_errors(error_matrix)
245
+
246
+ for y, file_path in enumerate(file_paths):
247
+ for x in range(len(time_buckets)):
248
+ error_count = error_matrix[file_path][x]
249
+ intensity = error_count / max_errors
250
+
251
+ cell = HeatMapCell(
252
+ x=x,
253
+ y=y,
254
+ value=error_count,
255
+ label=f"{Path(file_path).name}: {error_count} errors",
256
+ color_intensity=intensity,
257
+ metadata={
258
+ "file_path": file_path,
259
+ "time_bucket": time_buckets[x].isoformat(),
260
+ "error_count": error_count,
261
+ },
262
+ )
263
+ cells.append(cell)
264
+
265
+ return cells
266
+
267
+ def _create_frequency_labels(
268
+ self, file_paths: list[str], time_buckets: list[datetime], x_label_format: str
269
+ ) -> tuple[list[str], list[str]]:
270
+ """Create x and y labels for frequency heatmap."""
271
+ x_labels = [bucket.strftime(x_label_format) for bucket in time_buckets]
272
+ y_labels = [Path(fp).name for fp in file_paths]
273
+ return x_labels, y_labels
274
+
275
+ def _calculate_max_errors(self, error_matrix: dict[str, t.Any]) -> int:
276
+ """Calculate maximum error count for normalization."""
277
+ return (
278
+ max(
279
+ max(bucket_counts.values()) if bucket_counts else 0
280
+ for bucket_counts in error_matrix.values()
281
+ )
282
+ or 1
283
+ )
284
+
285
+ def generate_code_complexity_heatmap(self, project_root: str | Path) -> HeatMapData:
286
+ """Generate heat map showing code complexity across files and functions."""
287
+ from .dependency_analyzer import analyze_project_dependencies
288
+
289
+ project_root = Path(project_root)
290
+ dependency_graph = analyze_project_dependencies(project_root)
291
+
292
+ file_complexity = self._extract_file_complexity_data(
293
+ dependency_graph, project_root
294
+ )
295
+ cells = self._create_complexity_cells(file_complexity)
296
+ x_labels, y_labels = self._create_complexity_labels(file_complexity, cells)
297
+ max_complexity = self._calculate_max_complexity(file_complexity)
298
+
299
+ return HeatMapData(
300
+ title="Code Complexity Heat Map",
301
+ cells=cells,
302
+ x_labels=x_labels,
303
+ y_labels=y_labels,
304
+ color_scale=self.color_schemes["complexity"],
305
+ metadata={
306
+ "max_complexity": max_complexity,
307
+ "total_files": len(file_complexity),
308
+ "complexity_threshold": 15,
309
+ },
310
+ )
311
+
312
+ def _extract_file_complexity_data(
313
+ self, dependency_graph: DependencyGraph, project_root: Path
314
+ ) -> dict[str, t.Any]:
315
+ """Extract complexity data grouped by file."""
316
+ file_complexity = defaultdict(list)
317
+
318
+ for node in dependency_graph.nodes.values():
319
+ if node.type in ("function", "method", "class"):
320
+ relative_path = str(Path(node.file_path).relative_to(project_root))
321
+ file_complexity[relative_path].append(
322
+ {
323
+ "name": node.name,
324
+ "complexity": node.complexity,
325
+ "type": node.type,
326
+ "line": node.line_number,
327
+ }
328
+ )
329
+ return file_complexity
330
+
331
+ def _create_complexity_cells(
332
+ self, file_complexity: dict[str, t.Any]
333
+ ) -> list[HeatMapCell]:
334
+ """Create heat map cells from complexity data."""
335
+ cells = []
336
+ files = list[t.Any](file_complexity.keys())
337
+
338
+ for y, file_path in enumerate(files):
339
+ from operator import itemgetter
340
+
341
+ functions = sorted(file_complexity[file_path], key=itemgetter("line"))
342
+
343
+ for x, func_data in enumerate(functions[:50]): # Limit to 50 functions
344
+ cell = self._create_complexity_cell(x, y, func_data, file_path)
345
+ cells.append(cell)
346
+
347
+ return cells
348
+
349
+ def _create_complexity_cell(
350
+ self, x: int, y: int, func_data: dict[str, t.Any], file_path: str
351
+ ) -> HeatMapCell:
352
+ """Create a single complexity heat map cell."""
353
+ complexity = func_data["complexity"]
354
+ intensity = min(1.0, complexity / 15) # Normalize to complexity threshold
355
+ complexity_level = self._get_complexity_level(complexity)
356
+
357
+ return HeatMapCell(
358
+ x=x,
359
+ y=y,
360
+ value=complexity,
361
+ label=f"{func_data['name']}: {complexity}",
362
+ color_intensity=intensity,
363
+ metadata={
364
+ "file_path": file_path,
365
+ "function_name": func_data["name"],
366
+ "function_type": func_data["type"],
367
+ "line_number": func_data["line"],
368
+ "complexity": complexity,
369
+ "complexity_level": complexity_level,
370
+ },
371
+ )
372
+
373
+ def _get_complexity_level(self, complexity: int) -> str:
374
+ """Determine complexity category based on value."""
375
+ if complexity <= 5:
376
+ return "simple"
377
+ elif complexity <= 10:
378
+ return "moderate"
379
+ elif complexity <= 15:
380
+ return "complex"
381
+ elif complexity <= 20:
382
+ return "very_complex"
383
+ return "extremely_complex"
384
+
385
+ def _create_complexity_labels(
386
+ self, file_complexity: dict[str, t.Any], cells: list[HeatMapCell]
387
+ ) -> tuple[list[str], list[str]]:
388
+ """Create x and y labels for complexity heat map."""
389
+ files = list[t.Any](file_complexity.keys())
390
+ y_labels = [Path(fp).name for fp in files]
391
+
392
+ max_x = max(cell.x for cell in cells) if cells else 0
393
+ x_labels = [f"Func {x + 1}" for x in range(max_x + 1)]
394
+
395
+ return x_labels, y_labels
396
+
397
+ def _calculate_max_complexity(self, file_complexity: dict[str, t.Any]) -> int:
398
+ """Calculate maximum complexity value across all functions."""
399
+ return (
400
+ max(
401
+ max(item["complexity"] for item in items)
402
+ for items in file_complexity.values()
403
+ if items
404
+ )
405
+ or 1
406
+ )
407
+
408
+ def generate_quality_metrics_heatmap(self) -> HeatMapData:
409
+ """Generate heat map showing various quality metrics."""
410
+ if not self.metric_data:
411
+ return self._get_default_quality_heatmap()
412
+
413
+ metric_types = self._get_quality_metric_types()
414
+ identifiers = list[t.Any](self.metric_data.keys())
415
+ max_values = self._calculate_metric_max_values(metric_types)
416
+ cells = self._create_quality_metric_cells(identifiers, metric_types, max_values)
417
+
418
+ return HeatMapData(
419
+ title="Quality Metrics Heat Map",
420
+ cells=cells,
421
+ x_labels=metric_types,
422
+ y_labels=identifiers,
423
+ color_scale=self.color_schemes["quality_score"],
424
+ metadata={
425
+ "metric_count": len(metric_types),
426
+ "entity_count": len(identifiers),
427
+ },
428
+ )
429
+
430
+ def _get_default_quality_heatmap(self) -> HeatMapData:
431
+ """Return default quality heatmap for empty data."""
432
+ return HeatMapData(
433
+ title="Quality Metrics Heat Map",
434
+ cells=[],
435
+ x_labels=[],
436
+ y_labels=[],
437
+ color_scale=self.color_schemes["quality_score"],
438
+ )
439
+
440
+ def _get_quality_metric_types(self) -> list[str]:
441
+ """Define metric types to visualize."""
442
+ return [
443
+ "test_coverage",
444
+ "complexity_score",
445
+ "duplication_ratio",
446
+ "documentation_ratio",
447
+ "security_score",
448
+ "performance_score",
449
+ ]
450
+
451
+ def _calculate_metric_max_values(self, metric_types: list[str]) -> dict[str, float]:
452
+ """Calculate max values for normalization."""
453
+ max_values = {}
454
+ for metric_type in metric_types:
455
+ values = [
456
+ data["metrics"][metric_type]
457
+ for data in self.metric_data.values()
458
+ if metric_type in data["metrics"]
459
+ ]
460
+ max_values[metric_type] = max(values) if values else 1
461
+ return max_values
462
+
463
+ def _create_quality_metric_cells(
464
+ self,
465
+ identifiers: list[str],
466
+ metric_types: list[str],
467
+ max_values: dict[str, float],
468
+ ) -> list[HeatMapCell]:
469
+ """Create cells for quality metrics heatmap."""
470
+ cells = []
471
+ for y, identifier in enumerate(identifiers):
472
+ data = self.metric_data[identifier]
473
+ metrics = data["metrics"]
474
+
475
+ for x, metric_type in enumerate(metric_types):
476
+ value = metrics.get(metric_type, 0)
477
+ intensity = value / max_values[metric_type]
478
+ quality_score = self._calculate_quality_score(metric_type, intensity)
479
+ quality_level = self._determine_quality_level(quality_score)
480
+
481
+ cell = HeatMapCell(
482
+ x=x,
483
+ y=y,
484
+ value=value,
485
+ label=f"{identifier}: {metric_type} = {value:.2f}",
486
+ color_intensity=quality_score,
487
+ metadata={
488
+ "identifier": identifier,
489
+ "metric_type": metric_type,
490
+ "raw_value": value,
491
+ "quality_score": quality_score,
492
+ "quality_level": quality_level,
493
+ },
494
+ )
495
+ cells.append(cell)
496
+ return cells
497
+
498
+ def _calculate_quality_score(self, metric_type: str, intensity: float) -> float:
499
+ """Calculate quality score for a metric (higher is better for most metrics)."""
500
+ if metric_type in ("complexity_score", "duplication_ratio"):
501
+ # Lower is better for these metrics
502
+ return 1.0 - min(1.0, intensity)
503
+ # Higher is better
504
+ return intensity
505
+
506
+ def _determine_quality_level(self, quality_score: float) -> str:
507
+ """Determine quality level from quality score."""
508
+ if quality_score >= 0.9:
509
+ return "excellent"
510
+ elif quality_score >= 0.7:
511
+ return "good"
512
+ elif quality_score >= 0.5:
513
+ return "average"
514
+ elif quality_score >= 0.3:
515
+ return "poor"
516
+ return "critical"
517
+
518
+ def generate_test_failure_heatmap(
519
+ self, time_window: timedelta = timedelta(days=14)
520
+ ) -> HeatMapData:
521
+ """Generate heat map showing test failure patterns."""
522
+ test_errors = self._filter_test_errors(time_window)
523
+ test_matrix = self._group_test_errors_by_matrix(test_errors)
524
+ test_files = list[t.Any](test_matrix.keys())
525
+ error_types = self._collect_error_types(test_matrix)
526
+ max_failures = self._calculate_max_test_failures(test_matrix)
527
+ cells = self._create_test_failure_cells(
528
+ test_matrix, test_files, error_types, max_failures
529
+ )
530
+ metadata = self._build_test_failure_metadata(
531
+ time_window, max_failures, test_files, error_types
532
+ )
533
+
534
+ return HeatMapData(
535
+ title="Test Failure Heat Map",
536
+ cells=cells,
537
+ x_labels=error_types,
538
+ y_labels=test_files,
539
+ color_scale=self.color_schemes["error_intensity"],
540
+ metadata=metadata,
541
+ )
542
+
543
+ def _filter_test_errors(self, time_window: timedelta) -> list[dict[str, t.Any]]:
544
+ """Filter for test-related errors within the time window."""
545
+ test_errors: list[dict[str, t.Any]] = []
546
+ now = datetime.now()
547
+ start_time = now - time_window
548
+
549
+ for file_path, errors in self.error_data.items():
550
+ for error in errors:
551
+ if error["timestamp"] >= start_time and (
552
+ "test" in error["error_type"].lower() or "test" in file_path.lower()
553
+ ):
554
+ test_errors.append(error | {"file_path": file_path})
555
+ return test_errors
556
+
557
+ def _group_test_errors_by_matrix(
558
+ self, test_errors: list[dict[str, t.Any]]
559
+ ) -> defaultdict[str, defaultdict[str, int]]:
560
+ """Group test errors by file and error type."""
561
+ test_matrix: defaultdict[str, defaultdict[str, int]] = defaultdict(
562
+ lambda: defaultdict(int)
563
+ )
564
+
565
+ for error in test_errors:
566
+ file_name = Path(error["file_path"]).name
567
+ error_type = error["error_type"]
568
+ test_matrix[file_name][error_type] += 1
569
+
570
+ return test_matrix
571
+
572
+ def _collect_error_types(
573
+ self, test_matrix: defaultdict[str, defaultdict[str, int]]
574
+ ) -> list[str]:
575
+ """Collect all unique error types from the test matrix."""
576
+ all_error_types: set[str] = set()
577
+ for error_types in test_matrix.values():
578
+ all_error_types.update(error_types.keys())
579
+ return list[t.Any](all_error_types)
580
+
581
+ def _calculate_max_test_failures(
582
+ self, test_matrix: defaultdict[str, defaultdict[str, int]]
583
+ ) -> int:
584
+ """Calculate maximum failures for normalization."""
585
+ return (
586
+ max(
587
+ max(error_counts.values()) if error_counts else 0
588
+ for error_counts in test_matrix.values()
589
+ )
590
+ or 1
591
+ )
592
+
593
+ def _create_test_failure_cells(
594
+ self,
595
+ test_matrix: defaultdict[str, defaultdict[str, int]],
596
+ test_files: list[str],
597
+ error_types: list[str],
598
+ max_failures: int,
599
+ ) -> list[HeatMapCell]:
600
+ """Create cells for the test failure heatmap."""
601
+ cells = []
602
+ for y, test_file in enumerate(test_files):
603
+ for x, error_type in enumerate(error_types):
604
+ failure_count = test_matrix[test_file][error_type]
605
+ intensity = failure_count / max_failures
606
+
607
+ cell = HeatMapCell(
608
+ x=x,
609
+ y=y,
610
+ value=failure_count,
611
+ label=f"{test_file}: {error_type} ({failure_count} failures)",
612
+ color_intensity=intensity,
613
+ metadata={
614
+ "test_file": test_file,
615
+ "error_type": error_type,
616
+ "failure_count": failure_count,
617
+ },
618
+ )
619
+ cells.append(cell)
620
+ return cells
621
+
622
+ def _build_test_failure_metadata(
623
+ self,
624
+ time_window: timedelta,
625
+ max_failures: int,
626
+ test_files: list[str],
627
+ error_types: list[str],
628
+ ) -> dict[str, t.Any]:
629
+ """Build metadata dictionary for test failure heatmap."""
630
+ return {
631
+ "time_window_days": time_window.days,
632
+ "max_failures": max_failures,
633
+ "total_test_files": len(test_files),
634
+ "total_error_types": len(error_types),
635
+ }
636
+
637
+ def export_heatmap_data(
638
+ self, heatmap: HeatMapData, output_path: str | Path, format_type: str = "json"
639
+ ) -> None:
640
+ """Export heat map data to file."""
641
+ if format_type.lower() == "json":
642
+ with open(output_path, "w", encoding="utf-8") as f:
643
+ json.dump(heatmap.to_dict(), f, indent=2)
644
+ elif format_type.lower() == "csv":
645
+ import csv
646
+
647
+ with open(output_path, "w", newline="", encoding="utf-8") as f:
648
+ writer = csv.writer(f)
649
+
650
+ # Write header
651
+ writer.writerow(["x", "y", "value", "label", "intensity"])
652
+
653
+ # Write data
654
+ for cell in heatmap.cells:
655
+ writer.writerow(
656
+ [
657
+ cell.x,
658
+ cell.y,
659
+ cell.value,
660
+ cell.label,
661
+ cell.color_intensity,
662
+ ]
663
+ )
664
+ else:
665
+ msg = f"Unsupported format: {format_type}"
666
+ raise ValueError(msg)
667
+
668
+ def generate_html_visualization(self, heatmap: HeatMapData) -> str:
669
+ """Generate HTML visualization for the heat map."""
670
+ html_template = """
671
+ <!DOCTYPE html>
672
+ <html>
673
+ <head>
674
+ <title>{title}</title>
675
+ <style>
676
+ body {{ font-family: Arial, sans-serif; margin: 20px; }}
677
+ .heatmap {{ display: grid; gap: 1px; background: #ddd; }}
678
+ .cell {{
679
+ padding: 5px;
680
+ text-align: center;
681
+ font-size: 10px;
682
+ min-width: 80px;
683
+ min-height: 20px;
684
+ }}
685
+ .legend {{ margin-top: 20px; }}
686
+ .legend-item {{ display: inline-block; margin: 0 10px; }}
687
+ </style>
688
+ </head>
689
+ <body>
690
+ <h1>{title}</h1>
691
+ <div class="heatmap" style="grid-template-columns: repeat({cols}, 1fr);">
692
+ {cells_html}
693
+ </div>
694
+ <div class="legend">
695
+ {legend_html}
696
+ </div>
697
+ <p>Generated at: {generated_at}</p>
698
+ </body>
699
+ </html>
700
+ """
701
+
702
+ # Generate cells HTML
703
+ cells_html = ""
704
+ for cell in heatmap.cells:
705
+ # Calculate color based on intensity
706
+ intensity = int(255 * (1 - cell.color_intensity))
707
+ color = f"rgb({255}, {intensity}, {intensity})"
708
+
709
+ cells_html += f"""
710
+ <div class="cell" style="background-color: {color};"
711
+ title="{cell.label}">
712
+ {cell.value:.1f}
713
+ </div>
714
+ """
715
+
716
+ # Generate legend HTML
717
+ legend_html = ""
718
+ for level, color in heatmap.color_scale.items():
719
+ legend_html += f"""
720
+ <div class="legend-item">
721
+ <span style="background-color: {color}; padding: 2px 8px;">
722
+ {level.title()}
723
+ </span>
724
+ </div>
725
+ """
726
+
727
+ max_x = max(cell.x for cell in heatmap.cells) if heatmap.cells else 1
728
+
729
+ return html_template.format(
730
+ title=heatmap.title,
731
+ cols=max_x + 1,
732
+ cells_html=cells_html,
733
+ legend_html=legend_html,
734
+ generated_at=heatmap.generated_at.isoformat(),
735
+ )