crackerjack 0.31.10__py3-none-any.whl → 0.31.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

Files changed (155) hide show
  1. crackerjack/CLAUDE.md +288 -705
  2. crackerjack/__main__.py +22 -8
  3. crackerjack/agents/__init__.py +0 -3
  4. crackerjack/agents/architect_agent.py +0 -43
  5. crackerjack/agents/base.py +1 -9
  6. crackerjack/agents/coordinator.py +2 -148
  7. crackerjack/agents/documentation_agent.py +109 -81
  8. crackerjack/agents/dry_agent.py +122 -97
  9. crackerjack/agents/formatting_agent.py +3 -16
  10. crackerjack/agents/import_optimization_agent.py +1174 -130
  11. crackerjack/agents/performance_agent.py +956 -188
  12. crackerjack/agents/performance_helpers.py +229 -0
  13. crackerjack/agents/proactive_agent.py +1 -48
  14. crackerjack/agents/refactoring_agent.py +516 -246
  15. crackerjack/agents/refactoring_helpers.py +282 -0
  16. crackerjack/agents/security_agent.py +393 -90
  17. crackerjack/agents/test_creation_agent.py +1776 -120
  18. crackerjack/agents/test_specialist_agent.py +59 -15
  19. crackerjack/agents/tracker.py +0 -102
  20. crackerjack/api.py +145 -37
  21. crackerjack/cli/handlers.py +48 -30
  22. crackerjack/cli/interactive.py +11 -11
  23. crackerjack/cli/options.py +66 -4
  24. crackerjack/code_cleaner.py +808 -148
  25. crackerjack/config/global_lock_config.py +110 -0
  26. crackerjack/config/hooks.py +43 -64
  27. crackerjack/core/async_workflow_orchestrator.py +247 -97
  28. crackerjack/core/autofix_coordinator.py +192 -109
  29. crackerjack/core/enhanced_container.py +46 -63
  30. crackerjack/core/file_lifecycle.py +549 -0
  31. crackerjack/core/performance.py +9 -8
  32. crackerjack/core/performance_monitor.py +395 -0
  33. crackerjack/core/phase_coordinator.py +281 -94
  34. crackerjack/core/proactive_workflow.py +9 -58
  35. crackerjack/core/resource_manager.py +501 -0
  36. crackerjack/core/service_watchdog.py +490 -0
  37. crackerjack/core/session_coordinator.py +4 -8
  38. crackerjack/core/timeout_manager.py +504 -0
  39. crackerjack/core/websocket_lifecycle.py +475 -0
  40. crackerjack/core/workflow_orchestrator.py +343 -209
  41. crackerjack/dynamic_config.py +47 -6
  42. crackerjack/errors.py +3 -4
  43. crackerjack/executors/async_hook_executor.py +63 -13
  44. crackerjack/executors/cached_hook_executor.py +14 -14
  45. crackerjack/executors/hook_executor.py +100 -37
  46. crackerjack/executors/hook_lock_manager.py +856 -0
  47. crackerjack/executors/individual_hook_executor.py +120 -86
  48. crackerjack/intelligence/__init__.py +0 -7
  49. crackerjack/intelligence/adaptive_learning.py +13 -86
  50. crackerjack/intelligence/agent_orchestrator.py +15 -78
  51. crackerjack/intelligence/agent_registry.py +12 -59
  52. crackerjack/intelligence/agent_selector.py +31 -92
  53. crackerjack/intelligence/integration.py +1 -41
  54. crackerjack/interactive.py +9 -9
  55. crackerjack/managers/async_hook_manager.py +25 -8
  56. crackerjack/managers/hook_manager.py +9 -9
  57. crackerjack/managers/publish_manager.py +57 -59
  58. crackerjack/managers/test_command_builder.py +6 -36
  59. crackerjack/managers/test_executor.py +9 -61
  60. crackerjack/managers/test_manager.py +17 -63
  61. crackerjack/managers/test_manager_backup.py +77 -127
  62. crackerjack/managers/test_progress.py +4 -23
  63. crackerjack/mcp/cache.py +5 -12
  64. crackerjack/mcp/client_runner.py +10 -10
  65. crackerjack/mcp/context.py +64 -6
  66. crackerjack/mcp/dashboard.py +14 -11
  67. crackerjack/mcp/enhanced_progress_monitor.py +55 -55
  68. crackerjack/mcp/file_monitor.py +72 -42
  69. crackerjack/mcp/progress_components.py +103 -84
  70. crackerjack/mcp/progress_monitor.py +122 -49
  71. crackerjack/mcp/rate_limiter.py +12 -12
  72. crackerjack/mcp/server_core.py +16 -22
  73. crackerjack/mcp/service_watchdog.py +26 -26
  74. crackerjack/mcp/state.py +15 -0
  75. crackerjack/mcp/tools/core_tools.py +95 -39
  76. crackerjack/mcp/tools/error_analyzer.py +6 -32
  77. crackerjack/mcp/tools/execution_tools.py +1 -56
  78. crackerjack/mcp/tools/execution_tools_backup.py +35 -131
  79. crackerjack/mcp/tools/intelligence_tool_registry.py +0 -36
  80. crackerjack/mcp/tools/intelligence_tools.py +2 -55
  81. crackerjack/mcp/tools/monitoring_tools.py +308 -145
  82. crackerjack/mcp/tools/proactive_tools.py +12 -42
  83. crackerjack/mcp/tools/progress_tools.py +23 -15
  84. crackerjack/mcp/tools/utility_tools.py +3 -40
  85. crackerjack/mcp/tools/workflow_executor.py +40 -60
  86. crackerjack/mcp/websocket/app.py +0 -3
  87. crackerjack/mcp/websocket/endpoints.py +206 -268
  88. crackerjack/mcp/websocket/jobs.py +213 -66
  89. crackerjack/mcp/websocket/server.py +84 -6
  90. crackerjack/mcp/websocket/websocket_handler.py +137 -29
  91. crackerjack/models/config_adapter.py +3 -16
  92. crackerjack/models/protocols.py +162 -3
  93. crackerjack/models/resource_protocols.py +454 -0
  94. crackerjack/models/task.py +3 -3
  95. crackerjack/monitoring/__init__.py +0 -0
  96. crackerjack/monitoring/ai_agent_watchdog.py +25 -71
  97. crackerjack/monitoring/regression_prevention.py +28 -87
  98. crackerjack/orchestration/advanced_orchestrator.py +44 -78
  99. crackerjack/orchestration/coverage_improvement.py +10 -60
  100. crackerjack/orchestration/execution_strategies.py +16 -16
  101. crackerjack/orchestration/test_progress_streamer.py +61 -53
  102. crackerjack/plugins/base.py +1 -1
  103. crackerjack/plugins/managers.py +22 -20
  104. crackerjack/py313.py +65 -21
  105. crackerjack/services/backup_service.py +467 -0
  106. crackerjack/services/bounded_status_operations.py +627 -0
  107. crackerjack/services/cache.py +7 -9
  108. crackerjack/services/config.py +35 -52
  109. crackerjack/services/config_integrity.py +5 -16
  110. crackerjack/services/config_merge.py +542 -0
  111. crackerjack/services/contextual_ai_assistant.py +17 -19
  112. crackerjack/services/coverage_ratchet.py +44 -73
  113. crackerjack/services/debug.py +25 -39
  114. crackerjack/services/dependency_monitor.py +52 -50
  115. crackerjack/services/enhanced_filesystem.py +14 -11
  116. crackerjack/services/file_hasher.py +1 -1
  117. crackerjack/services/filesystem.py +1 -12
  118. crackerjack/services/git.py +71 -47
  119. crackerjack/services/health_metrics.py +31 -27
  120. crackerjack/services/initialization.py +276 -428
  121. crackerjack/services/input_validator.py +760 -0
  122. crackerjack/services/log_manager.py +16 -16
  123. crackerjack/services/logging.py +7 -6
  124. crackerjack/services/metrics.py +43 -43
  125. crackerjack/services/pattern_cache.py +2 -31
  126. crackerjack/services/pattern_detector.py +26 -63
  127. crackerjack/services/performance_benchmarks.py +20 -45
  128. crackerjack/services/regex_patterns.py +2887 -0
  129. crackerjack/services/regex_utils.py +537 -0
  130. crackerjack/services/secure_path_utils.py +683 -0
  131. crackerjack/services/secure_status_formatter.py +534 -0
  132. crackerjack/services/secure_subprocess.py +605 -0
  133. crackerjack/services/security.py +47 -10
  134. crackerjack/services/security_logger.py +492 -0
  135. crackerjack/services/server_manager.py +109 -50
  136. crackerjack/services/smart_scheduling.py +8 -25
  137. crackerjack/services/status_authentication.py +603 -0
  138. crackerjack/services/status_security_manager.py +442 -0
  139. crackerjack/services/thread_safe_status_collector.py +546 -0
  140. crackerjack/services/tool_version_service.py +1 -23
  141. crackerjack/services/unified_config.py +36 -58
  142. crackerjack/services/validation_rate_limiter.py +269 -0
  143. crackerjack/services/version_checker.py +9 -40
  144. crackerjack/services/websocket_resource_limiter.py +572 -0
  145. crackerjack/slash_commands/__init__.py +52 -2
  146. crackerjack/tools/__init__.py +0 -0
  147. crackerjack/tools/validate_input_validator_patterns.py +262 -0
  148. crackerjack/tools/validate_regex_patterns.py +198 -0
  149. {crackerjack-0.31.10.dist-info → crackerjack-0.31.12.dist-info}/METADATA +197 -12
  150. crackerjack-0.31.12.dist-info/RECORD +178 -0
  151. crackerjack/cli/facade.py +0 -104
  152. crackerjack-0.31.10.dist-info/RECORD +0 -149
  153. {crackerjack-0.31.10.dist-info → crackerjack-0.31.12.dist-info}/WHEEL +0 -0
  154. {crackerjack-0.31.10.dist-info → crackerjack-0.31.12.dist-info}/entry_points.txt +0 -0
  155. {crackerjack-0.31.10.dist-info → crackerjack-0.31.12.dist-info}/licenses/LICENSE +0 -0
@@ -1,16 +1,24 @@
1
1
  import ast
2
2
  import logging
3
+ import typing as t
4
+ from collections.abc import Awaitable, Callable
3
5
  from dataclasses import dataclass
4
6
  from pathlib import Path
5
7
 
6
8
  from ..agents.base import Issue, IssueType, Priority
7
9
  from .pattern_cache import CachedPattern, PatternCache
8
10
 
11
+ DetectorMethod = Callable[[Path, str, ast.AST], Awaitable[list["AntiPattern"]]]
12
+
13
+
14
+ class AntiPatternConfig(t.TypedDict):
15
+ detector: DetectorMethod
16
+ description: str
17
+ prevention: str
18
+
9
19
 
10
20
  @dataclass
11
21
  class AntiPattern:
12
- """Detected anti-pattern that could lead to issues."""
13
-
14
22
  pattern_type: str
15
23
  severity: Priority
16
24
  file_path: str
@@ -21,19 +29,12 @@ class AntiPattern:
21
29
 
22
30
 
23
31
  class PatternDetector:
24
- """Detects anti-patterns and suggests proactive refactoring.
25
-
26
- Analyzes code to identify patterns that commonly lead to quality
27
- violations, allowing proactive fixes before issues occur.
28
- """
29
-
30
32
  def __init__(self, project_path: Path, pattern_cache: PatternCache) -> None:
31
33
  self.project_path = project_path
32
34
  self.pattern_cache = pattern_cache
33
35
  self.logger = logging.getLogger(__name__)
34
36
 
35
- # Known anti-patterns and their detection rules
36
- self._anti_patterns = {
37
+ self._anti_patterns: dict[str, AntiPatternConfig] = {
37
38
  "complexity_hotspot": {
38
39
  "detector": self._detect_complexity_hotspots,
39
40
  "description": "Functions approaching complexity limits",
@@ -62,14 +63,12 @@ class PatternDetector:
62
63
  }
63
64
 
64
65
  async def analyze_codebase(self) -> list[AntiPattern]:
65
- """Analyze the entire codebase for anti-patterns."""
66
66
  self.logger.info("Starting proactive anti-pattern analysis")
67
67
 
68
68
  anti_patterns = []
69
69
  python_files = list(self.project_path.glob("**/*.py"))
70
70
 
71
71
  for file_path in python_files:
72
- # Skip files in common ignore patterns
73
72
  if self._should_skip_file(file_path):
74
73
  continue
75
74
 
@@ -80,25 +79,21 @@ class PatternDetector:
80
79
  return anti_patterns
81
80
 
82
81
  async def _analyze_file(self, file_path: Path) -> list[AntiPattern]:
83
- """Analyze a single file for anti-patterns."""
84
82
  anti_patterns = []
85
83
 
86
84
  try:
87
85
  content = file_path.read_text(encoding="utf-8")
88
86
 
89
- # Parse AST for analysis
90
87
  try:
91
88
  tree = ast.parse(content, filename=str(file_path))
92
89
  except SyntaxError as e:
93
- # File has syntax errors, skip analysis
94
90
  self.logger.warning(f"Syntax error in {file_path}: {e}")
95
91
  return []
96
92
 
97
- # Run all anti-pattern detectors
98
93
  for pattern_name, pattern_info in self._anti_patterns.items():
99
- detector = pattern_info["detector"]
94
+ detector_method = pattern_info["detector"]
100
95
  try:
101
- detected = await detector(file_path, content, tree) # type: ignore[operator]
96
+ detected = await detector_method(file_path, content, tree)
102
97
  anti_patterns.extend(detected)
103
98
  except Exception as e:
104
99
  self.logger.warning(
@@ -113,18 +108,14 @@ class PatternDetector:
113
108
  async def _detect_complexity_hotspots(
114
109
  self, file_path: Path, content: str, tree: ast.AST
115
110
  ) -> list[AntiPattern]:
116
- """Detect functions that are approaching complexity limits."""
117
111
  anti_patterns = []
118
112
 
119
113
  class ComplexityVisitor(ast.NodeVisitor):
120
114
  def __init__(self) -> None:
121
- self.functions: list[
122
- tuple[str, int, int]
123
- ] = [] # name, line, complexity_estimate
115
+ self.functions: list[tuple[str, int, int]] = []
124
116
 
125
117
  def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
126
- # Simple complexity estimation based on control structures
127
- complexity = 1 # Base complexity
118
+ complexity = 1
128
119
 
129
120
  for child in ast.walk(node):
130
121
  if isinstance(child, ast.If | ast.For | ast.While | ast.With):
@@ -142,16 +133,15 @@ class PatternDetector:
142
133
  visitor = ComplexityVisitor()
143
134
  visitor.visit(tree)
144
135
 
145
- # Flag functions approaching the complexity limit (13)
146
136
  for func_name, line_no, complexity in visitor.functions:
147
- if complexity >= 10: # Warn before hitting the limit
137
+ if complexity >= 10:
148
138
  anti_patterns.append(
149
139
  AntiPattern(
150
140
  pattern_type="complexity_hotspot",
151
141
  severity=Priority.HIGH if complexity >= 12 else Priority.MEDIUM,
152
142
  file_path=str(file_path),
153
143
  line_number=line_no,
154
- description=f"Function '{func_name}' has complexity {complexity} (approaching limit of 13)",
144
+ description=f"Function '{func_name}' has complexity {complexity} (approaching limit of 15)",
155
145
  suggestion=f"Break down '{func_name}' into smaller helper methods",
156
146
  prevention_strategy="extract_method",
157
147
  )
@@ -162,10 +152,8 @@ class PatternDetector:
162
152
  async def _detect_code_duplication(
163
153
  self, file_path: Path, content: str, tree: ast.AST
164
154
  ) -> list[AntiPattern]:
165
- """Detect potential code duplication patterns."""
166
155
  anti_patterns = []
167
156
 
168
- # Simple heuristic: look for repeated string literals or similar patterns
169
157
  lines = content.split("\n")
170
158
  line_groups = {}
171
159
 
@@ -177,9 +165,8 @@ class PatternDetector:
177
165
  else:
178
166
  line_groups[stripped] = [i]
179
167
 
180
- # Flag lines that appear multiple times
181
168
  for line_content, line_numbers in line_groups.items():
182
- if len(line_numbers) >= 3: # Appears 3+ times
169
+ if len(line_numbers) >= 3:
183
170
  anti_patterns.append(
184
171
  AntiPattern(
185
172
  pattern_type="code_duplication",
@@ -197,29 +184,24 @@ class PatternDetector:
197
184
  async def _detect_performance_issues(
198
185
  self, file_path: Path, content: str, tree: ast.AST
199
186
  ) -> list[AntiPattern]:
200
- """Detect potential performance anti-patterns."""
201
187
  anti_patterns = []
202
188
 
203
189
  class PerformanceVisitor(ast.NodeVisitor):
204
190
  def __init__(self) -> None:
205
- self.issues: list[
206
- tuple[int, str, str]
207
- ] = [] # line, description, suggestion
191
+ self.issues: list[tuple[int, str, str]] = []
208
192
 
209
193
  def visit_For(self, node: ast.For) -> None:
210
- # Check for nested loops (potential O(n²))
211
194
  for child in ast.walk(node.body[0] if node.body else node):
212
195
  if isinstance(child, ast.For | ast.While) and child != node:
213
196
  self.issues.append(
214
197
  (
215
198
  node.lineno,
216
- "Nested loop detected - potential O(n²) complexity",
199
+ "Nested loop detected-potential O(n²) complexity",
217
200
  "Consider using dictionary lookups or set operations",
218
201
  )
219
202
  )
220
203
  break
221
204
 
222
- # Check for list concatenation in loops
223
205
  for stmt in node.body:
224
206
  if (
225
207
  isinstance(stmt, ast.AugAssign)
@@ -229,7 +211,7 @@ class PatternDetector:
229
211
  self.issues.append(
230
212
  (
231
213
  stmt.lineno,
232
- "List concatenation in loop - inefficient",
214
+ "List concatenation in loop-inefficient",
233
215
  "Use list.append() and join at the end",
234
216
  )
235
217
  )
@@ -257,14 +239,11 @@ class PatternDetector:
257
239
  async def _detect_security_risks(
258
240
  self, file_path: Path, content: str, tree: ast.AST
259
241
  ) -> list[AntiPattern]:
260
- """Detect potential security anti-patterns."""
261
242
  anti_patterns = []
262
243
 
263
- # Check for hardcoded paths
264
244
  hardcoded_path_patterns = self._check_hardcoded_paths(file_path, content)
265
245
  anti_patterns.extend(hardcoded_path_patterns)
266
246
 
267
- # Check for subprocess security issues
268
247
  subprocess_patterns = self._check_subprocess_security(file_path, tree)
269
248
  anti_patterns.extend(subprocess_patterns)
270
249
 
@@ -273,7 +252,6 @@ class PatternDetector:
273
252
  def _check_hardcoded_paths(
274
253
  self, file_path: Path, content: str
275
254
  ) -> list[AntiPattern]:
276
- """Check for hardcoded paths (common security issue)."""
277
255
  anti_patterns = []
278
256
 
279
257
  if "/tmp/" in content or "C:\\" in content: # nosec B108
@@ -286,7 +264,7 @@ class PatternDetector:
286
264
  severity=Priority.HIGH,
287
265
  file_path=str(file_path),
288
266
  line_number=i,
289
- description="Hardcoded path detected - potential security risk",
267
+ description="Hardcoded path detected-potential security risk",
290
268
  suggestion="Use tempfile module for temporary files",
291
269
  prevention_strategy="use_secure_temp_files",
292
270
  )
@@ -298,7 +276,6 @@ class PatternDetector:
298
276
  def _check_subprocess_security(
299
277
  self, file_path: Path, tree: ast.AST
300
278
  ) -> list[AntiPattern]:
301
- """Check for shell=True in subprocess calls."""
302
279
  anti_patterns = []
303
280
 
304
281
  class SecurityVisitor(ast.NodeVisitor):
@@ -306,7 +283,6 @@ class PatternDetector:
306
283
  self.issues: list[tuple[int, str, str]] = []
307
284
 
308
285
  def visit_Call(self, node: ast.Call) -> None:
309
- # Check for subprocess with shell=True
310
286
  if (
311
287
  isinstance(node.func, ast.Attribute)
312
288
  and isinstance(node.func.value, ast.Name)
@@ -321,7 +297,7 @@ class PatternDetector:
321
297
  self.issues.append(
322
298
  (
323
299
  node.lineno,
324
- "subprocess with shell=True - security risk",
300
+ "subprocess with shell=True-security risk",
325
301
  "Avoid shell=True or validate inputs carefully",
326
302
  )
327
303
  )
@@ -349,7 +325,6 @@ class PatternDetector:
349
325
  async def _detect_import_complexity(
350
326
  self, file_path: Path, content: str, tree: ast.AST
351
327
  ) -> list[AntiPattern]:
352
- """Detect complex or problematic import patterns."""
353
328
  anti_patterns = []
354
329
 
355
330
  class ImportVisitor(ast.NodeVisitor):
@@ -360,16 +335,14 @@ class PatternDetector:
360
335
  def visit_Import(self, node: ast.Import) -> None:
361
336
  self.import_count += len(node.names)
362
337
  for alias in node.names:
363
- if alias.name.count(".") > 2: # Deep import
338
+ if alias.name.count(".") > 2:
364
339
  self.imports.append((node.lineno, f"Deep import: {alias.name}"))
365
340
  self.generic_visit(node)
366
341
 
367
342
  def visit_ImportFrom(self, node: ast.ImportFrom) -> None:
368
343
  if node.module:
369
344
  self.import_count += len(node.names) if node.names else 1
370
- if (
371
- node.names and len(node.names) > 10
372
- ): # Many imports from one module
345
+ if node.names and len(node.names) > 10:
373
346
  self.imports.append(
374
347
  (node.lineno, f"Many imports from {node.module}")
375
348
  )
@@ -378,7 +351,6 @@ class PatternDetector:
378
351
  visitor = ImportVisitor()
379
352
  visitor.visit(tree)
380
353
 
381
- # Flag files with excessive imports
382
354
  if visitor.import_count > 50:
383
355
  anti_patterns.append(
384
356
  AntiPattern(
@@ -386,13 +358,12 @@ class PatternDetector:
386
358
  severity=Priority.MEDIUM,
387
359
  file_path=str(file_path),
388
360
  line_number=1,
389
- description=f"File has {visitor.import_count} imports - may indicate tight coupling",
361
+ description=f"File has {visitor.import_count} imports-may indicate tight coupling",
390
362
  suggestion="Consider breaking file into smaller modules",
391
363
  prevention_strategy="modular_design",
392
364
  )
393
365
  )
394
366
 
395
- # Flag specific problematic imports
396
367
  for line_no, description in visitor.imports:
397
368
  anti_patterns.append(
398
369
  AntiPattern(
@@ -409,7 +380,6 @@ class PatternDetector:
409
380
  return anti_patterns
410
381
 
411
382
  def _should_skip_file(self, file_path: Path) -> bool:
412
- """Check if file should be skipped in analysis."""
413
383
  skip_patterns = [
414
384
  "__pycache__",
415
385
  ".git",
@@ -428,11 +398,9 @@ class PatternDetector:
428
398
  async def suggest_proactive_refactoring(
429
399
  self, anti_patterns: list[AntiPattern]
430
400
  ) -> list[Issue]:
431
- """Convert anti-patterns to proactive refactoring issues."""
432
401
  issues = []
433
402
 
434
403
  for anti_pattern in anti_patterns:
435
- # Map anti-pattern types to issue types
436
404
  issue_type_map = {
437
405
  "complexity_hotspot": IssueType.COMPLEXITY,
438
406
  "code_duplication": IssueType.DRY_VIOLATION,
@@ -466,7 +434,6 @@ class PatternDetector:
466
434
  async def get_cached_solutions(
467
435
  self, anti_patterns: list[AntiPattern]
468
436
  ) -> dict[str, CachedPattern]:
469
- """Get cached solutions for detected anti-patterns."""
470
437
  solutions = {}
471
438
 
472
439
  for anti_pattern in anti_patterns:
@@ -479,13 +446,11 @@ class PatternDetector:
479
446
  return solutions
480
447
 
481
448
  def _generate_solution_key(self, anti_pattern: AntiPattern) -> str:
482
- """Generate unique key for anti-pattern solution."""
483
449
  return f"{anti_pattern.pattern_type}_{anti_pattern.file_path}_{anti_pattern.line_number}"
484
450
 
485
451
  def _find_cached_pattern_for_anti_pattern(
486
452
  self, anti_pattern: AntiPattern
487
453
  ) -> CachedPattern | None:
488
- """Find cached pattern for a specific anti-pattern."""
489
454
  issue_type = self._map_anti_pattern_to_issue_type(anti_pattern.pattern_type)
490
455
  if not issue_type:
491
456
  return None
@@ -494,7 +459,6 @@ class PatternDetector:
494
459
  return self.pattern_cache.get_best_pattern_for_issue(temp_issue)
495
460
 
496
461
  def _map_anti_pattern_to_issue_type(self, pattern_type: str) -> IssueType | None:
497
- """Map anti-pattern type to issue type."""
498
462
  return {
499
463
  "complexity_hotspot": IssueType.COMPLEXITY,
500
464
  "code_duplication": IssueType.DRY_VIOLATION,
@@ -505,7 +469,6 @@ class PatternDetector:
505
469
  def _create_temp_issue_for_lookup(
506
470
  self, anti_pattern: AntiPattern, issue_type: IssueType
507
471
  ) -> Issue:
508
- """Create temporary issue for pattern cache lookup."""
509
472
  return Issue(
510
473
  id="temp",
511
474
  type=issue_type,
@@ -55,9 +55,8 @@ class PerformanceBenchmarkService:
55
55
  run_hooks: bool = True,
56
56
  iterations: int = 1,
57
57
  ) -> PerformanceReport:
58
- """Run comprehensive performance benchmark across all components."""
59
58
  self.console.print(
60
- "[cyan]🚀 Starting comprehensive performance benchmark...[/cyan]",
59
+ "[cyan]🚀 Starting comprehensive performance benchmark...[/ cyan]",
61
60
  )
62
61
 
63
62
  start_time = time.time()
@@ -69,7 +68,6 @@ class PerformanceBenchmarkService:
69
68
  return report
70
69
 
71
70
  def _initialize_performance_report(self) -> PerformanceReport:
72
- """Initialize a new performance report."""
73
71
  return PerformanceReport(total_duration=0.0)
74
72
 
75
73
  def _run_requested_benchmarks(
@@ -79,7 +77,6 @@ class PerformanceBenchmarkService:
79
77
  run_hooks: bool,
80
78
  iterations: int,
81
79
  ) -> None:
82
- """Run the requested benchmark types."""
83
80
  if run_tests:
84
81
  report.test_benchmarks = self._benchmark_test_suite(iterations)
85
82
 
@@ -94,14 +91,13 @@ class PerformanceBenchmarkService:
94
91
  report: PerformanceReport,
95
92
  start_time: float,
96
93
  ) -> None:
97
- """Finalize performance report with analysis and history."""
98
94
  report.total_duration = time.time() - start_time
99
95
  report.recommendations = self._generate_performance_recommendations(report)
100
96
  report.baseline_comparison = self._compare_with_baseline(report)
101
97
  self._save_performance_history(report)
102
98
 
103
99
  def _benchmark_test_suite(self, iterations: int = 1) -> dict[str, Any]:
104
- self.console.print("[dim]📊 Benchmarking test suite...[/dim]")
100
+ self.console.print("[dim]📊 Benchmarking test suite...[/ dim]")
105
101
 
106
102
  benchmark_results = {}
107
103
 
@@ -152,7 +148,7 @@ class PerformanceBenchmarkService:
152
148
  return benchmark_results
153
149
 
154
150
  def _benchmark_hooks(self, iterations: int = 1) -> dict[str, float]:
155
- self.console.print("[dim]🔧 Benchmarking hooks performance...[/dim]")
151
+ self.console.print("[dim]🔧 Benchmarking hooks performance...[/ dim]")
156
152
 
157
153
  hook_performance = {}
158
154
 
@@ -207,7 +203,7 @@ class PerformanceBenchmarkService:
207
203
  self,
208
204
  iterations: int = 1,
209
205
  ) -> list[BenchmarkResult]:
210
- self.console.print("[dim]⚙️ Benchmarking workflow components...[/dim]")
206
+ self.console.print("[dim]⚙️ Benchmarking workflow components...[/ dim]")
211
207
 
212
208
  results = []
213
209
 
@@ -260,7 +256,6 @@ class PerformanceBenchmarkService:
260
256
  self,
261
257
  report: PerformanceReport,
262
258
  ) -> list[str]:
263
- """Generate performance recommendations based on benchmark results."""
264
259
  recommendations = []
265
260
 
266
261
  self._add_test_suite_recommendations(report, recommendations)
@@ -275,19 +270,17 @@ class PerformanceBenchmarkService:
275
270
  report: PerformanceReport,
276
271
  recommendations: list[str],
277
272
  ) -> None:
278
- """Add recommendations for test suite performance."""
279
273
  if not report.test_benchmarks:
280
274
  return
281
275
 
282
276
  for iteration_data in report.test_benchmarks.values():
283
277
  if self._is_slow_test_iteration(iteration_data):
284
278
  recommendations.append(
285
- "Consider optimizing test suite - execution time exceeds 1 minute",
279
+ "Consider optimizing test suite-execution time exceeds 1 minute",
286
280
  )
287
281
  break
288
282
 
289
283
  def _is_slow_test_iteration(self, iteration_data: Any) -> bool:
290
- """Check if test iteration is slow."""
291
284
  return (
292
285
  isinstance(iteration_data, dict)
293
286
  and iteration_data.get("total_duration", 0) > 60
@@ -298,7 +291,6 @@ class PerformanceBenchmarkService:
298
291
  report: PerformanceReport,
299
292
  recommendations: list[str],
300
293
  ) -> None:
301
- """Add recommendations for hook performance."""
302
294
  slow_hooks = self._identify_slow_hooks(report.hook_performance)
303
295
  if slow_hooks:
304
296
  recommendations.append(self._format_slow_hooks_message(slow_hooks))
@@ -307,7 +299,6 @@ class PerformanceBenchmarkService:
307
299
  self,
308
300
  hook_performance: dict[str, float],
309
301
  ) -> list[tuple[str, float]]:
310
- """Identify hooks with slow performance."""
311
302
  slow_hooks = []
312
303
  for hook_name, perf_data in hook_performance.items():
313
304
  if isinstance(perf_data, dict):
@@ -317,8 +308,7 @@ class PerformanceBenchmarkService:
317
308
  return slow_hooks
318
309
 
319
310
  def _format_slow_hooks_message(self, slow_hooks: list[tuple[str, float]]) -> str:
320
- """Format message for slow hooks recommendation."""
321
- hooks_info = ", ".join(f"{h}({d:.1f}s)" for h, d in slow_hooks[:3])
311
+ hooks_info = ", ".join(f"{h}({d: .1f}s)" for h, d in slow_hooks[:3])
322
312
  return (
323
313
  f"Slow hooks detected: {hooks_info}. "
324
314
  "Consider hook optimization or selective execution."
@@ -329,7 +319,6 @@ class PerformanceBenchmarkService:
329
319
  report: PerformanceReport,
330
320
  recommendations: list[str],
331
321
  ) -> None:
332
- """Add recommendations for component performance."""
333
322
  slow_components = self._identify_slow_components(report.workflow_benchmarks)
334
323
  if slow_components:
335
324
  components_names = ", ".join(c.name for c in slow_components)
@@ -342,7 +331,6 @@ class PerformanceBenchmarkService:
342
331
  self,
343
332
  workflow_benchmarks: list[BenchmarkResult],
344
333
  ) -> list[BenchmarkResult]:
345
- """Identify slow workflow components."""
346
334
  return [b for b in workflow_benchmarks if b.duration_seconds > 5]
347
335
 
348
336
  def _add_overall_performance_recommendations(
@@ -350,7 +338,6 @@ class PerformanceBenchmarkService:
350
338
  report: PerformanceReport,
351
339
  recommendations: list[str],
352
340
  ) -> None:
353
- """Add recommendations for overall performance."""
354
341
  if report.total_duration > 300:
355
342
  recommendations.append(
356
343
  "Overall workflow execution is slow. Consider enabling --skip-hooks "
@@ -361,7 +348,6 @@ class PerformanceBenchmarkService:
361
348
  self,
362
349
  current_report: PerformanceReport,
363
350
  ) -> dict[str, float]:
364
- """Compare current performance with historical baseline."""
365
351
  baseline_comparison = {}
366
352
 
367
353
  try:
@@ -386,7 +372,6 @@ class PerformanceBenchmarkService:
386
372
  return baseline_comparison
387
373
 
388
374
  def _load_performance_history(self) -> list[dict[str, Any]] | None:
389
- """Load performance history from file."""
390
375
  if not self.history_file.exists():
391
376
  return None
392
377
 
@@ -401,7 +386,6 @@ class PerformanceBenchmarkService:
401
386
  history: list[dict[str, Any]],
402
387
  comparison: dict[str, Any],
403
388
  ) -> None:
404
- """Add overall performance comparison to baseline."""
405
389
  recent_runs = history[-5:]
406
390
  baseline_duration = statistics.median(
407
391
  [r["total_duration"] for r in recent_runs],
@@ -418,7 +402,6 @@ class PerformanceBenchmarkService:
418
402
  history: list[dict[str, Any]],
419
403
  comparison: dict[str, Any],
420
404
  ) -> None:
421
- """Add component-level performance comparison."""
422
405
  recent_runs = history[-5:]
423
406
  if not recent_runs:
424
407
  return
@@ -439,7 +422,6 @@ class PerformanceBenchmarkService:
439
422
  current_duration: float,
440
423
  old_duration: float,
441
424
  ) -> float:
442
- """Calculate performance change percentage."""
443
425
  return ((current_duration - old_duration) / old_duration) * 100
444
426
 
445
427
  def _save_performance_history(self, report: PerformanceReport) -> None:
@@ -471,11 +453,13 @@ class PerformanceBenchmarkService:
471
453
 
472
454
  except Exception as e:
473
455
  self.console.print(
474
- f"[yellow]⚠️[/yellow] Could not save performance history: {e}",
456
+ f"[yellow]⚠️[/ yellow] Could not save performance history: {e}",
475
457
  )
476
458
 
477
459
  def display_performance_report(self, report: PerformanceReport) -> None:
478
- self.console.print("\n[bold cyan]🚀 Performance Benchmark Report[/bold cyan]\n")
460
+ self.console.print(
461
+ "\n[bold cyan]🚀 Performance Benchmark Report[/ bold cyan]\n"
462
+ )
479
463
 
480
464
  self._display_overall_stats(report)
481
465
  self._display_workflow_components(report)
@@ -484,12 +468,12 @@ class PerformanceBenchmarkService:
484
468
  self._display_recommendations(report)
485
469
 
486
470
  self.console.print(
487
- f"\n[dim]📁 Benchmark data saved to: {self.benchmarks_dir}[/dim]",
471
+ f"\n[dim]📁 Benchmark data saved to: {self.benchmarks_dir}[/ dim]",
488
472
  )
489
473
 
490
474
  def _display_overall_stats(self, report: PerformanceReport) -> None:
491
475
  self.console.print(
492
- f"[green]⏱️ Total Duration: {report.total_duration:.2f}s[/green]",
476
+ f"[green]⏱️ Total Duration: {report.total_duration: .2f}s[/ green]",
493
477
  )
494
478
 
495
479
  def _display_workflow_components(self, report: PerformanceReport) -> None:
@@ -505,7 +489,7 @@ class PerformanceBenchmarkService:
505
489
  metadata_str = ", ".join(f"{k}={v}" for k, v in benchmark.metadata.items())
506
490
  table.add_row(
507
491
  benchmark.name,
508
- f"{benchmark.duration_seconds:.3f}",
492
+ f"{benchmark.duration_seconds: .3f}",
509
493
  metadata_str,
510
494
  )
511
495
 
@@ -526,9 +510,9 @@ class PerformanceBenchmarkService:
526
510
  if isinstance(perf_data, dict):
527
511
  table.add_row(
528
512
  hook_name,
529
- f"{perf_data.get('mean_duration', 0):.2f}",
530
- f"{perf_data.get('min_duration', 0):.2f}",
531
- f"{perf_data.get('max_duration', 0):.2f}",
513
+ f"{perf_data.get('mean_duration', 0): .2f}",
514
+ f"{perf_data.get('min_duration', 0): .2f}",
515
+ f"{perf_data.get('max_duration', 0): .2f}",
532
516
  )
533
517
 
534
518
  self.console.print(table)
@@ -543,31 +527,28 @@ class PerformanceBenchmarkService:
543
527
  self.console.print()
544
528
 
545
529
  def _print_comparison_header(self) -> None:
546
- """Print performance comparison header."""
547
- self.console.print("[bold]📊 Performance Comparison[/bold]")
530
+ self.console.print("[bold]📊 Performance Comparison[/ bold]")
548
531
 
549
532
  def _print_comparison_metrics(self, baseline_comparison: dict[str, t.Any]) -> None:
550
- """Print individual comparison metrics with appropriate colors."""
551
533
  for metric, value in baseline_comparison.items():
552
534
  if isinstance(value, float | int) and "percent" in metric:
553
535
  color = "green" if value < 0 else "red" if value > 10 else "yellow"
554
536
  direction = "faster" if value < 0 else "slower"
555
537
  self.console.print(
556
- f" {metric}: [{color}]{abs(value):.1f}% {direction}[/{color}]",
538
+ f" {metric}: [{color}]{abs(value): .1f}% {direction}[/{color}]",
557
539
  )
558
540
 
559
541
  def _display_recommendations(self, report: PerformanceReport) -> None:
560
542
  if report.recommendations:
561
543
  self.console.print(
562
- "[bold yellow]💡 Performance Recommendations[/bold yellow]",
544
+ "[bold yellow]💡 Performance Recommendations[/ bold yellow]",
563
545
  )
564
546
  for i, rec in enumerate(report.recommendations, 1):
565
547
  self.console.print(f" {i}. {rec}")
566
548
  else:
567
- self.console.print("[green]✨ No performance issues detected![/green]")
549
+ self.console.print("[green]✨ No performance issues detected ![/ green]")
568
550
 
569
551
  def get_performance_trends(self, days: int = 7) -> dict[str, Any]:
570
- """Get performance trends over specified time period."""
571
552
  try:
572
553
  recent_history = self._get_recent_history(days)
573
554
  if not recent_history:
@@ -584,7 +565,6 @@ class PerformanceBenchmarkService:
584
565
  return {"error": f"Could not analyze trends: {e}"}
585
566
 
586
567
  def _get_recent_history(self, days: int) -> list[dict[str, Any]] | None:
587
- """Get recent performance history within specified days."""
588
568
  if not self.history_file.exists():
589
569
  return None
590
570
 
@@ -597,7 +577,6 @@ class PerformanceBenchmarkService:
597
577
  return recent_history if len(recent_history) >= 2 else None
598
578
 
599
579
  def _handle_insufficient_trend_data(self) -> dict[str, str]:
600
- """Handle cases where insufficient data is available for trend analysis."""
601
580
  if not self.history_file.exists():
602
581
  return {"error": "No performance history available"}
603
582
  return {"error": "Insufficient data for trend analysis"}
@@ -605,7 +584,6 @@ class PerformanceBenchmarkService:
605
584
  def _add_duration_trends(
606
585
  self, recent_history: list[dict[str, Any]], trends: dict[str, Any]
607
586
  ) -> None:
608
- """Add overall duration trends to results."""
609
587
  durations = [r["total_duration"] for r in recent_history]
610
588
  trends["duration_trend"] = {
611
589
  "current": durations[-1],
@@ -616,7 +594,6 @@ class PerformanceBenchmarkService:
616
594
  def _add_component_trends(
617
595
  self, recent_history: list[dict[str, Any]], trends: dict[str, Any]
618
596
  ) -> None:
619
- """Add component-level trends to results."""
620
597
  component_trends = {}
621
598
  latest_components = recent_history[-1].get("component_durations", {})
622
599
 
@@ -639,7 +616,6 @@ class PerformanceBenchmarkService:
639
616
  recent_history: list[dict[str, Any]],
640
617
  component: str,
641
618
  ) -> list[float]:
642
- """Extract duration data for a specific component."""
643
619
  return [
644
620
  r.get("component_durations", {}).get(component)
645
621
  for r in recent_history
@@ -647,7 +623,6 @@ class PerformanceBenchmarkService:
647
623
  ]
648
624
 
649
625
  def _determine_trend_direction(self, durations: list[float]) -> str:
650
- """Determine if trend is improving or degrading."""
651
626
  current = durations[-1]
652
627
  historical_average = statistics.mean(durations[:-1])
653
628
  return "improving" if current < historical_average else "degrading"