crackerjack 0.33.0__py3-none-any.whl → 0.33.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

Files changed (198) hide show
  1. crackerjack/__main__.py +1350 -34
  2. crackerjack/adapters/__init__.py +17 -0
  3. crackerjack/adapters/lsp_client.py +358 -0
  4. crackerjack/adapters/rust_tool_adapter.py +194 -0
  5. crackerjack/adapters/rust_tool_manager.py +193 -0
  6. crackerjack/adapters/skylos_adapter.py +231 -0
  7. crackerjack/adapters/zuban_adapter.py +560 -0
  8. crackerjack/agents/base.py +7 -3
  9. crackerjack/agents/coordinator.py +271 -33
  10. crackerjack/agents/documentation_agent.py +9 -15
  11. crackerjack/agents/dry_agent.py +3 -15
  12. crackerjack/agents/formatting_agent.py +1 -1
  13. crackerjack/agents/import_optimization_agent.py +36 -180
  14. crackerjack/agents/performance_agent.py +17 -98
  15. crackerjack/agents/performance_helpers.py +7 -31
  16. crackerjack/agents/proactive_agent.py +1 -3
  17. crackerjack/agents/refactoring_agent.py +16 -85
  18. crackerjack/agents/refactoring_helpers.py +7 -42
  19. crackerjack/agents/security_agent.py +9 -48
  20. crackerjack/agents/test_creation_agent.py +356 -513
  21. crackerjack/agents/test_specialist_agent.py +0 -4
  22. crackerjack/api.py +6 -25
  23. crackerjack/cli/cache_handlers.py +204 -0
  24. crackerjack/cli/cache_handlers_enhanced.py +683 -0
  25. crackerjack/cli/facade.py +100 -0
  26. crackerjack/cli/handlers.py +224 -9
  27. crackerjack/cli/interactive.py +6 -4
  28. crackerjack/cli/options.py +642 -55
  29. crackerjack/cli/utils.py +2 -1
  30. crackerjack/code_cleaner.py +58 -117
  31. crackerjack/config/global_lock_config.py +8 -48
  32. crackerjack/config/hooks.py +53 -62
  33. crackerjack/core/async_workflow_orchestrator.py +24 -34
  34. crackerjack/core/autofix_coordinator.py +3 -17
  35. crackerjack/core/enhanced_container.py +4 -13
  36. crackerjack/core/file_lifecycle.py +12 -89
  37. crackerjack/core/performance.py +2 -2
  38. crackerjack/core/performance_monitor.py +15 -55
  39. crackerjack/core/phase_coordinator.py +104 -204
  40. crackerjack/core/resource_manager.py +14 -90
  41. crackerjack/core/service_watchdog.py +62 -95
  42. crackerjack/core/session_coordinator.py +149 -0
  43. crackerjack/core/timeout_manager.py +14 -72
  44. crackerjack/core/websocket_lifecycle.py +13 -78
  45. crackerjack/core/workflow_orchestrator.py +171 -174
  46. crackerjack/docs/INDEX.md +11 -0
  47. crackerjack/docs/generated/api/API_REFERENCE.md +10895 -0
  48. crackerjack/docs/generated/api/CLI_REFERENCE.md +109 -0
  49. crackerjack/docs/generated/api/CROSS_REFERENCES.md +1755 -0
  50. crackerjack/docs/generated/api/PROTOCOLS.md +3 -0
  51. crackerjack/docs/generated/api/SERVICES.md +1252 -0
  52. crackerjack/documentation/__init__.py +31 -0
  53. crackerjack/documentation/ai_templates.py +756 -0
  54. crackerjack/documentation/dual_output_generator.py +765 -0
  55. crackerjack/documentation/mkdocs_integration.py +518 -0
  56. crackerjack/documentation/reference_generator.py +977 -0
  57. crackerjack/dynamic_config.py +55 -50
  58. crackerjack/executors/async_hook_executor.py +10 -15
  59. crackerjack/executors/cached_hook_executor.py +117 -43
  60. crackerjack/executors/hook_executor.py +8 -34
  61. crackerjack/executors/hook_lock_manager.py +26 -183
  62. crackerjack/executors/individual_hook_executor.py +13 -11
  63. crackerjack/executors/lsp_aware_hook_executor.py +270 -0
  64. crackerjack/executors/tool_proxy.py +417 -0
  65. crackerjack/hooks/lsp_hook.py +79 -0
  66. crackerjack/intelligence/adaptive_learning.py +25 -10
  67. crackerjack/intelligence/agent_orchestrator.py +2 -5
  68. crackerjack/intelligence/agent_registry.py +34 -24
  69. crackerjack/intelligence/agent_selector.py +5 -7
  70. crackerjack/interactive.py +17 -6
  71. crackerjack/managers/async_hook_manager.py +0 -1
  72. crackerjack/managers/hook_manager.py +79 -1
  73. crackerjack/managers/publish_manager.py +44 -8
  74. crackerjack/managers/test_command_builder.py +1 -15
  75. crackerjack/managers/test_executor.py +1 -3
  76. crackerjack/managers/test_manager.py +98 -7
  77. crackerjack/managers/test_manager_backup.py +10 -9
  78. crackerjack/mcp/cache.py +2 -2
  79. crackerjack/mcp/client_runner.py +1 -1
  80. crackerjack/mcp/context.py +191 -68
  81. crackerjack/mcp/dashboard.py +7 -5
  82. crackerjack/mcp/enhanced_progress_monitor.py +31 -28
  83. crackerjack/mcp/file_monitor.py +30 -23
  84. crackerjack/mcp/progress_components.py +31 -21
  85. crackerjack/mcp/progress_monitor.py +50 -53
  86. crackerjack/mcp/rate_limiter.py +6 -6
  87. crackerjack/mcp/server_core.py +17 -16
  88. crackerjack/mcp/service_watchdog.py +2 -1
  89. crackerjack/mcp/state.py +4 -7
  90. crackerjack/mcp/task_manager.py +11 -9
  91. crackerjack/mcp/tools/core_tools.py +173 -32
  92. crackerjack/mcp/tools/error_analyzer.py +3 -2
  93. crackerjack/mcp/tools/execution_tools.py +8 -10
  94. crackerjack/mcp/tools/execution_tools_backup.py +42 -30
  95. crackerjack/mcp/tools/intelligence_tool_registry.py +7 -5
  96. crackerjack/mcp/tools/intelligence_tools.py +5 -2
  97. crackerjack/mcp/tools/monitoring_tools.py +33 -70
  98. crackerjack/mcp/tools/proactive_tools.py +24 -11
  99. crackerjack/mcp/tools/progress_tools.py +5 -8
  100. crackerjack/mcp/tools/utility_tools.py +20 -14
  101. crackerjack/mcp/tools/workflow_executor.py +62 -40
  102. crackerjack/mcp/websocket/app.py +8 -0
  103. crackerjack/mcp/websocket/endpoints.py +352 -357
  104. crackerjack/mcp/websocket/jobs.py +40 -57
  105. crackerjack/mcp/websocket/monitoring_endpoints.py +2935 -0
  106. crackerjack/mcp/websocket/server.py +7 -25
  107. crackerjack/mcp/websocket/websocket_handler.py +6 -17
  108. crackerjack/mixins/__init__.py +0 -2
  109. crackerjack/mixins/error_handling.py +1 -70
  110. crackerjack/models/config.py +12 -1
  111. crackerjack/models/config_adapter.py +49 -1
  112. crackerjack/models/protocols.py +122 -122
  113. crackerjack/models/resource_protocols.py +55 -210
  114. crackerjack/monitoring/ai_agent_watchdog.py +13 -13
  115. crackerjack/monitoring/metrics_collector.py +426 -0
  116. crackerjack/monitoring/regression_prevention.py +8 -8
  117. crackerjack/monitoring/websocket_server.py +643 -0
  118. crackerjack/orchestration/advanced_orchestrator.py +11 -6
  119. crackerjack/orchestration/coverage_improvement.py +3 -3
  120. crackerjack/orchestration/execution_strategies.py +26 -6
  121. crackerjack/orchestration/test_progress_streamer.py +8 -5
  122. crackerjack/plugins/base.py +2 -2
  123. crackerjack/plugins/hooks.py +7 -0
  124. crackerjack/plugins/managers.py +11 -8
  125. crackerjack/security/__init__.py +0 -1
  126. crackerjack/security/audit.py +6 -35
  127. crackerjack/services/anomaly_detector.py +392 -0
  128. crackerjack/services/api_extractor.py +615 -0
  129. crackerjack/services/backup_service.py +2 -2
  130. crackerjack/services/bounded_status_operations.py +15 -152
  131. crackerjack/services/cache.py +127 -1
  132. crackerjack/services/changelog_automation.py +395 -0
  133. crackerjack/services/config.py +15 -9
  134. crackerjack/services/config_merge.py +19 -80
  135. crackerjack/services/config_template.py +506 -0
  136. crackerjack/services/contextual_ai_assistant.py +48 -22
  137. crackerjack/services/coverage_badge_service.py +171 -0
  138. crackerjack/services/coverage_ratchet.py +27 -25
  139. crackerjack/services/debug.py +3 -3
  140. crackerjack/services/dependency_analyzer.py +460 -0
  141. crackerjack/services/dependency_monitor.py +14 -11
  142. crackerjack/services/documentation_generator.py +491 -0
  143. crackerjack/services/documentation_service.py +675 -0
  144. crackerjack/services/enhanced_filesystem.py +6 -5
  145. crackerjack/services/enterprise_optimizer.py +865 -0
  146. crackerjack/services/error_pattern_analyzer.py +676 -0
  147. crackerjack/services/file_hasher.py +1 -1
  148. crackerjack/services/git.py +8 -25
  149. crackerjack/services/health_metrics.py +10 -8
  150. crackerjack/services/heatmap_generator.py +735 -0
  151. crackerjack/services/initialization.py +11 -30
  152. crackerjack/services/input_validator.py +5 -97
  153. crackerjack/services/intelligent_commit.py +327 -0
  154. crackerjack/services/log_manager.py +15 -12
  155. crackerjack/services/logging.py +4 -3
  156. crackerjack/services/lsp_client.py +628 -0
  157. crackerjack/services/memory_optimizer.py +19 -87
  158. crackerjack/services/metrics.py +42 -33
  159. crackerjack/services/parallel_executor.py +9 -67
  160. crackerjack/services/pattern_cache.py +1 -1
  161. crackerjack/services/pattern_detector.py +6 -6
  162. crackerjack/services/performance_benchmarks.py +18 -59
  163. crackerjack/services/performance_cache.py +20 -81
  164. crackerjack/services/performance_monitor.py +27 -95
  165. crackerjack/services/predictive_analytics.py +510 -0
  166. crackerjack/services/quality_baseline.py +234 -0
  167. crackerjack/services/quality_baseline_enhanced.py +646 -0
  168. crackerjack/services/quality_intelligence.py +785 -0
  169. crackerjack/services/regex_patterns.py +618 -524
  170. crackerjack/services/regex_utils.py +43 -123
  171. crackerjack/services/secure_path_utils.py +5 -164
  172. crackerjack/services/secure_status_formatter.py +30 -141
  173. crackerjack/services/secure_subprocess.py +11 -92
  174. crackerjack/services/security.py +9 -41
  175. crackerjack/services/security_logger.py +12 -24
  176. crackerjack/services/server_manager.py +124 -16
  177. crackerjack/services/status_authentication.py +16 -159
  178. crackerjack/services/status_security_manager.py +4 -131
  179. crackerjack/services/thread_safe_status_collector.py +19 -125
  180. crackerjack/services/unified_config.py +21 -13
  181. crackerjack/services/validation_rate_limiter.py +5 -54
  182. crackerjack/services/version_analyzer.py +459 -0
  183. crackerjack/services/version_checker.py +1 -1
  184. crackerjack/services/websocket_resource_limiter.py +10 -144
  185. crackerjack/services/zuban_lsp_service.py +390 -0
  186. crackerjack/slash_commands/__init__.py +2 -7
  187. crackerjack/slash_commands/run.md +2 -2
  188. crackerjack/tools/validate_input_validator_patterns.py +14 -40
  189. crackerjack/tools/validate_regex_patterns.py +19 -48
  190. {crackerjack-0.33.0.dist-info → crackerjack-0.33.2.dist-info}/METADATA +196 -25
  191. crackerjack-0.33.2.dist-info/RECORD +229 -0
  192. crackerjack/CLAUDE.md +0 -207
  193. crackerjack/RULES.md +0 -380
  194. crackerjack/py313.py +0 -234
  195. crackerjack-0.33.0.dist-info/RECORD +0 -187
  196. {crackerjack-0.33.0.dist-info → crackerjack-0.33.2.dist-info}/WHEEL +0 -0
  197. {crackerjack-0.33.0.dist-info → crackerjack-0.33.2.dist-info}/entry_points.txt +0 -0
  198. {crackerjack-0.33.0.dist-info → crackerjack-0.33.2.dist-info}/licenses/LICENSE +0 -0
@@ -1,9 +1,3 @@
1
- """Parallel execution coordinator for hooks and other independent operations.
2
-
3
- This module provides safe parallel execution of hooks while respecting
4
- security levels, dependencies, and resource constraints.
5
- """
6
-
7
1
  import asyncio
8
2
  import time
9
3
  import typing as t
@@ -18,19 +12,13 @@ from crackerjack.services.performance_cache import get_performance_cache
18
12
 
19
13
 
20
14
  class ExecutionStrategy(Enum):
21
- """Execution strategy for parallel operations."""
22
-
23
15
  SEQUENTIAL = "sequential"
24
- PARALLEL_SAFE = (
25
- "parallel_safe" # Only parallel execution of safe, independent hooks
26
- )
27
- PARALLEL_AGGRESSIVE = "parallel_aggressive" # More aggressive parallelization
16
+ PARALLEL_SAFE = "parallel_safe"
17
+ PARALLEL_AGGRESSIVE = "parallel_aggressive"
28
18
 
29
19
 
30
20
  @dataclass
31
21
  class ExecutionGroup:
32
- """Group of operations that can be executed together."""
33
-
34
22
  name: str
35
23
  operations: list[t.Any]
36
24
  max_workers: int = 3
@@ -41,21 +29,17 @@ class ExecutionGroup:
41
29
 
42
30
  @dataclass
43
31
  class ExecutionResult:
44
- """Result of executing an operation."""
45
-
46
32
  operation_id: str
47
33
  success: bool
48
34
  duration_seconds: float
49
35
  output: str = ""
50
36
  error: str = ""
51
37
  exit_code: int = 0
52
- metadata: dict[str, t.Any] = field(default_factory=dict)
38
+ metadata: dict[str, t.Any] = field(default_factory=dict[str, t.Any])
53
39
 
54
40
 
55
41
  @dataclass
56
42
  class ParallelExecutionResult:
57
- """Result of parallel execution batch."""
58
-
59
43
  group_name: str
60
44
  total_operations: int
61
45
  successful_operations: int
@@ -65,7 +49,6 @@ class ParallelExecutionResult:
65
49
 
66
50
  @property
67
51
  def success_rate(self) -> float:
68
- """Calculate success rate."""
69
52
  return (
70
53
  self.successful_operations / self.total_operations
71
54
  if self.total_operations > 0
@@ -74,13 +57,10 @@ class ParallelExecutionResult:
74
57
 
75
58
  @property
76
59
  def overall_success(self) -> bool:
77
- """Check if all operations succeeded."""
78
60
  return self.failed_operations == 0
79
61
 
80
62
 
81
63
  class ParallelHookExecutor:
82
- """Parallel executor for hook operations with safety constraints."""
83
-
84
64
  def __init__(
85
65
  self,
86
66
  max_workers: int = 3,
@@ -97,7 +77,6 @@ class ParallelHookExecutor:
97
77
  self,
98
78
  hooks: list[HookDefinition],
99
79
  ) -> dict[str, list[HookDefinition]]:
100
- """Analyze hook dependencies and group hooks for parallel execution."""
101
80
  groups: dict[str, list[HookDefinition]] = {
102
81
  "formatting": [],
103
82
  "validation": [],
@@ -115,7 +94,6 @@ class ParallelHookExecutor:
115
94
  else:
116
95
  groups["comprehensive"].append(hook)
117
96
 
118
- # Remove empty groups
119
97
  return {k: v for k, v in groups.items() if v}
120
98
 
121
99
  def can_execute_in_parallel(
@@ -123,25 +101,18 @@ class ParallelHookExecutor:
123
101
  hook1: HookDefinition,
124
102
  hook2: HookDefinition,
125
103
  ) -> bool:
126
- """Check if two hooks can be safely executed in parallel."""
127
- # Never parallelize different security levels for safety
128
104
  if hook1.security_level != hook2.security_level:
129
105
  return False
130
106
 
131
- # Don't parallelize hooks that modify files with hooks that read files
132
107
  if hook1.is_formatting and not hook2.is_formatting:
133
108
  return False
134
109
 
135
- # Safe combinations for parallel execution
136
110
  safe_parallel_combinations = [
137
- # All formatting hooks can run in parallel
138
111
  (lambda h: h.is_formatting, lambda h: h.is_formatting),
139
- # Validation hooks can run in parallel
140
112
  (
141
113
  lambda h: h.name in {"check-yaml", "check-json", "check-toml"},
142
114
  lambda h: h.name in {"check-yaml", "check-json", "check-toml"},
143
115
  ),
144
- # Same security level non-formatting hooks
145
116
  (
146
117
  lambda h: not h.is_formatting
147
118
  and h.security_level == SecurityLevel.MEDIUM,
@@ -161,13 +132,11 @@ class ParallelHookExecutor:
161
132
  hooks: list[HookDefinition],
162
133
  hook_runner: t.Callable[[HookDefinition], t.Awaitable[ExecutionResult]],
163
134
  ) -> ParallelExecutionResult:
164
- """Execute hooks in parallel where safe, sequential otherwise."""
165
135
  start_time = time.time()
166
136
 
167
137
  if self.strategy == ExecutionStrategy.SEQUENTIAL:
168
138
  return await self._execute_sequential(hooks, hook_runner, start_time)
169
139
 
170
- # Group hooks for parallel execution
171
140
  groups = self.analyze_hook_dependencies(hooks)
172
141
  all_results: list[ExecutionResult] = []
173
142
 
@@ -177,12 +146,10 @@ class ParallelHookExecutor:
177
146
 
178
147
  for group_name, group_hooks in groups.items():
179
148
  if len(group_hooks) == 1 or not self._can_parallelize_group(group_hooks):
180
- # Execute single hook or non-parallelizable group sequentially
181
149
  for hook in group_hooks:
182
150
  result = await hook_runner(hook)
183
151
  all_results.append(result)
184
152
  else:
185
- # Execute group in parallel
186
153
  group_results = await self._execute_group_parallel(
187
154
  group_hooks,
188
155
  hook_runner,
@@ -209,7 +176,6 @@ class ParallelHookExecutor:
209
176
  hook_runner: t.Callable[[HookDefinition], t.Awaitable[ExecutionResult]],
210
177
  start_time: float,
211
178
  ) -> ParallelExecutionResult:
212
- """Execute hooks sequentially (fallback)."""
213
179
  results: list[ExecutionResult] = []
214
180
 
215
181
  for hook in hooks:
@@ -230,11 +196,9 @@ class ParallelHookExecutor:
230
196
  )
231
197
 
232
198
  def _can_parallelize_group(self, hooks: list[HookDefinition]) -> bool:
233
- """Check if a group of hooks can be parallelized."""
234
199
  if len(hooks) < 2:
235
200
  return False
236
201
 
237
- # Check pairwise compatibility
238
202
  for i, hook1 in enumerate(hooks):
239
203
  for hook2 in hooks[i + 1 :]:
240
204
  if not self.can_execute_in_parallel(hook1, hook2):
@@ -248,28 +212,22 @@ class ParallelHookExecutor:
248
212
  hook_runner: t.Callable[[HookDefinition], t.Awaitable[ExecutionResult]],
249
213
  group_name: str,
250
214
  ) -> list[ExecutionResult]:
251
- """Execute a group of hooks in parallel."""
252
215
  self._logger.debug(f"Executing {len(hooks)} {group_name} hooks in parallel")
253
216
 
254
- # Limit parallelism for safety
255
217
  max_workers = min(self.max_workers, len(hooks))
256
218
 
257
- # Create semaphore to limit concurrent executions
258
219
  semaphore = asyncio.Semaphore(max_workers)
259
220
 
260
221
  async def run_with_semaphore(hook: HookDefinition) -> ExecutionResult:
261
222
  async with semaphore:
262
223
  return await hook_runner(hook)
263
224
 
264
- # Execute all hooks concurrently with limited parallelism
265
225
  tasks = [run_with_semaphore(hook) for hook in hooks]
266
226
  results = await asyncio.gather(*tasks, return_exceptions=True)
267
227
 
268
- # Process results and handle exceptions
269
228
  processed_results: list[ExecutionResult] = []
270
229
  for i, result in enumerate(results):
271
230
  if isinstance(result, Exception):
272
- # Create error result for exception
273
231
  error_result = ExecutionResult(
274
232
  operation_id=hooks[i].name,
275
233
  success=False,
@@ -281,8 +239,7 @@ class ParallelHookExecutor:
281
239
  f"Hook {hooks[i].name} failed with exception: {result}"
282
240
  )
283
241
  else:
284
- # result must be ExecutionResult here due to type narrowing
285
- processed_results.append(result) # type: ignore[arg-type]
242
+ processed_results.append(result)
286
243
 
287
244
  successful = sum(1 for r in processed_results if r.success)
288
245
  self._logger.info(
@@ -293,8 +250,6 @@ class ParallelHookExecutor:
293
250
 
294
251
 
295
252
  class AsyncCommandExecutor:
296
- """Asynchronous command executor with optimization and caching."""
297
-
298
253
  def __init__(
299
254
  self,
300
255
  max_workers: int = 4,
@@ -313,8 +268,6 @@ class AsyncCommandExecutor:
313
268
  timeout: int = 60,
314
269
  cache_ttl: int = 120,
315
270
  ) -> ExecutionResult:
316
- """Execute a command asynchronously with caching."""
317
- # Check cache first
318
271
  if self.cache_results:
319
272
  cached_result = await self._get_cached_result(command, cwd)
320
273
  if cached_result:
@@ -323,12 +276,10 @@ class AsyncCommandExecutor:
323
276
  )
324
277
  return cached_result
325
278
 
326
- # Execute command
327
279
  start_time = time.time()
328
280
  result = await self._run_command_async(command, cwd, timeout)
329
281
  result.duration_seconds = time.time() - start_time
330
282
 
331
- # Cache successful results
332
283
  if self.cache_results and result.success:
333
284
  await self._cache_result(command, result, cache_ttl, cwd)
334
285
 
@@ -339,14 +290,12 @@ class AsyncCommandExecutor:
339
290
  commands: list[tuple[list[str], Path | None]],
340
291
  timeout: int = 60,
341
292
  ) -> list[ExecutionResult]:
342
- """Execute multiple commands concurrently."""
343
293
  self._logger.info(f"Executing {len(commands)} commands in batch")
344
294
 
345
295
  tasks = [self.execute_command(cmd, cwd, timeout) for cmd, cwd in commands]
346
296
 
347
297
  results = await asyncio.gather(*tasks, return_exceptions=True)
348
298
 
349
- # Process results and handle exceptions
350
299
  processed_results: list[ExecutionResult] = []
351
300
  for i, result in enumerate(results):
352
301
  if isinstance(result, Exception):
@@ -358,8 +307,7 @@ class AsyncCommandExecutor:
358
307
  )
359
308
  processed_results.append(error_result)
360
309
  else:
361
- # result must be ExecutionResult here due to type narrowing
362
- processed_results.append(result) # type: ignore[arg-type]
310
+ processed_results.append(result)
363
311
 
364
312
  successful = sum(1 for r in processed_results if r.success)
365
313
  self._logger.info(
@@ -374,7 +322,6 @@ class AsyncCommandExecutor:
374
322
  cwd: Path | None = None,
375
323
  timeout: int = 60,
376
324
  ) -> ExecutionResult:
377
- """Run command asynchronously in thread pool."""
378
325
  loop = asyncio.get_event_loop()
379
326
 
380
327
  def run_sync_command() -> ExecutionResult:
@@ -393,7 +340,7 @@ class AsyncCommandExecutor:
393
340
  return ExecutionResult(
394
341
  operation_id=" ".join(command),
395
342
  success=result.returncode == 0,
396
- duration_seconds=0.0, # Set by caller
343
+ duration_seconds=0.0,
397
344
  output=result.stdout,
398
345
  error=result.stderr,
399
346
  exit_code=result.returncode,
@@ -423,10 +370,10 @@ class AsyncCommandExecutor:
423
370
  command: list[str],
424
371
  cwd: Path | None = None,
425
372
  ) -> ExecutionResult | None:
426
- """Get cached command result."""
427
373
  from crackerjack.services.performance_cache import get_command_cache
428
374
 
429
- return get_command_cache().get_command_result(command, cwd)
375
+ cache_result = get_command_cache().get_command_result(command, cwd)
376
+ return t.cast(ExecutionResult | None, cache_result)
430
377
 
431
378
  async def _cache_result(
432
379
  self,
@@ -435,19 +382,16 @@ class AsyncCommandExecutor:
435
382
  ttl_seconds: int,
436
383
  cwd: Path | None = None,
437
384
  ) -> None:
438
- """Cache command result."""
439
385
  from crackerjack.services.performance_cache import get_command_cache
440
386
 
441
387
  command_cache = get_command_cache()
442
388
  command_cache.set_command_result(command, result, cwd, ttl_seconds)
443
389
 
444
- def __del__(self):
445
- """Clean up thread pool."""
390
+ def __del__(self) -> None:
446
391
  if hasattr(self, "_thread_pool"):
447
392
  self._thread_pool.shutdown(wait=False)
448
393
 
449
394
 
450
- # Global executor instances
451
395
  _parallel_executor: ParallelHookExecutor | None = None
452
396
  _async_executor: AsyncCommandExecutor | None = None
453
397
 
@@ -456,7 +400,6 @@ def get_parallel_executor(
456
400
  max_workers: int = 3,
457
401
  strategy: ExecutionStrategy = ExecutionStrategy.PARALLEL_SAFE,
458
402
  ) -> ParallelHookExecutor:
459
- """Get global parallel hook executor instance."""
460
403
  global _parallel_executor
461
404
  if _parallel_executor is None:
462
405
  _parallel_executor = ParallelHookExecutor(
@@ -467,7 +410,6 @@ def get_parallel_executor(
467
410
 
468
411
 
469
412
  def get_async_executor(max_workers: int = 4) -> AsyncCommandExecutor:
470
- """Get global async command executor instance."""
471
413
  global _async_executor
472
414
  if _async_executor is None:
473
415
  _async_executor = AsyncCommandExecutor(max_workers=max_workers)
@@ -195,7 +195,7 @@ class PatternCache:
195
195
  if not self._patterns:
196
196
  return {"total_patterns": 0}
197
197
 
198
- patterns_by_type = {}
198
+ patterns_by_type: dict[str, int] = {}
199
199
  total_usage = 0
200
200
  avg_success_rate = 0.0
201
201
 
@@ -66,7 +66,7 @@ class PatternDetector:
66
66
  self.logger.info("Starting proactive anti-pattern analysis")
67
67
 
68
68
  anti_patterns = []
69
- python_files = list(self.project_path.glob("**/*.py"))
69
+ python_files = list[t.Any](self.project_path.glob("**/*.py"))
70
70
 
71
71
  for file_path in python_files:
72
72
  if self._should_skip_file(file_path):
@@ -155,7 +155,7 @@ class PatternDetector:
155
155
  anti_patterns = []
156
156
 
157
157
  lines = content.split("\n")
158
- line_groups = {}
158
+ line_groups: dict[str, list[int]] = {}
159
159
 
160
160
  for i, line in enumerate(lines, 1):
161
161
  stripped = line.strip()
@@ -197,7 +197,7 @@ class PatternDetector:
197
197
  (
198
198
  node.lineno,
199
199
  "Nested loop detected-potential O(n²) complexity",
200
- "Consider using dictionary lookups or set operations",
200
+ "Consider using dictionary lookups or set[t.Any] operations",
201
201
  )
202
202
  )
203
203
  break
@@ -212,7 +212,7 @@ class PatternDetector:
212
212
  (
213
213
  stmt.lineno,
214
214
  "List concatenation in loop-inefficient",
215
- "Use list.append() and join at the end",
215
+ "Use list[t.Any].append() and join at the end",
216
216
  )
217
217
  )
218
218
 
@@ -254,10 +254,10 @@ class PatternDetector:
254
254
  ) -> list[AntiPattern]:
255
255
  anti_patterns = []
256
256
 
257
- if "/tmp/" in content or "C:\\" in content: # nosec B108
257
+ if "/tmp/" in content or "C: \\" in content: # nosec B108
258
258
  lines = content.split("\n")
259
259
  for i, line in enumerate(lines, 1):
260
- if "/tmp/" in line or "C:\\" in line: # nosec B108
260
+ if "/tmp/" in line or "C: \\" in line: # nosec B108
261
261
  anti_patterns.append(
262
262
  AntiPattern(
263
263
  pattern_type="security_risks",
@@ -1,13 +1,8 @@
1
- """Performance benchmarking service to measure Phase 3 optimization improvements.
2
-
3
- This module provides comprehensive benchmarking capabilities to measure the performance
4
- gains from async workflows, caching, memory optimization, and parallel execution.
5
- """
6
-
7
1
  import asyncio
8
2
  import json
9
3
  import statistics
10
4
  import time
5
+ import typing as t
11
6
  from dataclasses import dataclass, field
12
7
  from datetime import datetime
13
8
  from pathlib import Path
@@ -20,8 +15,6 @@ from crackerjack.services.performance_monitor import get_performance_monitor
20
15
 
21
16
  @dataclass
22
17
  class BenchmarkResult:
23
- """Individual benchmark test result."""
24
-
25
18
  test_name: str
26
19
  baseline_time_seconds: float
27
20
  optimized_time_seconds: float
@@ -34,7 +27,6 @@ class BenchmarkResult:
34
27
 
35
28
  @property
36
29
  def time_improvement_percentage(self) -> float:
37
- """Calculate time improvement percentage."""
38
30
  if self.baseline_time_seconds == 0:
39
31
  return 0.0
40
32
  return (
@@ -45,7 +37,6 @@ class BenchmarkResult:
45
37
 
46
38
  @property
47
39
  def memory_improvement_percentage(self) -> float:
48
- """Calculate memory improvement percentage."""
49
40
  if self.memory_baseline_mb == 0:
50
41
  return 0.0
51
42
  return (
@@ -56,28 +47,23 @@ class BenchmarkResult:
56
47
 
57
48
  @property
58
49
  def cache_hit_ratio(self) -> float:
59
- """Calculate cache hit ratio."""
60
50
  total = self.cache_hits + self.cache_misses
61
51
  return self.cache_hits / total if total > 0 else 0.0
62
52
 
63
53
  @property
64
54
  def parallelization_ratio(self) -> float:
65
- """Calculate parallelization ratio."""
66
55
  total = self.parallel_operations + self.sequential_operations
67
56
  return self.parallel_operations / total if total > 0 else 0.0
68
57
 
69
58
 
70
59
  @dataclass
71
60
  class BenchmarkSuite:
72
- """Collection of benchmark results."""
73
-
74
61
  suite_name: str
75
62
  results: list[BenchmarkResult] = field(default_factory=list)
76
63
  run_timestamp: datetime = field(default_factory=datetime.now)
77
64
 
78
65
  @property
79
66
  def average_time_improvement(self) -> float:
80
- """Calculate average time improvement across all tests."""
81
67
  if not self.results:
82
68
  return 0.0
83
69
  improvements = [r.time_improvement_percentage for r in self.results]
@@ -85,7 +71,6 @@ class BenchmarkSuite:
85
71
 
86
72
  @property
87
73
  def average_memory_improvement(self) -> float:
88
- """Calculate average memory improvement across all tests."""
89
74
  if not self.results:
90
75
  return 0.0
91
76
  improvements = [r.memory_improvement_percentage for r in self.results]
@@ -93,81 +78,67 @@ class BenchmarkSuite:
93
78
 
94
79
  @property
95
80
  def overall_cache_hit_ratio(self) -> float:
96
- """Calculate overall cache hit ratio."""
97
81
  total_hits = sum(r.cache_hits for r in self.results)
98
82
  total_misses = sum(r.cache_misses for r in self.results)
99
83
  total = total_hits + total_misses
100
84
  return total_hits / total if total > 0 else 0.0
101
85
 
102
86
  def add_result(self, result: BenchmarkResult) -> None:
103
- """Add a benchmark result to the suite."""
104
87
  self.results.append(result)
105
88
 
106
89
 
107
90
  class PerformanceBenchmarker:
108
- """Service for benchmarking Phase 3 performance optimizations."""
109
-
110
- def __init__(self):
91
+ def __init__(self) -> None:
111
92
  self._logger = get_logger("crackerjack.benchmarker")
112
93
  self._monitor = get_performance_monitor()
113
94
  self._memory_optimizer = get_memory_optimizer()
114
95
  self._cache = get_performance_cache()
115
96
 
116
- # Benchmark configurations
117
97
  self._test_iterations = 3
118
98
  self._warmup_iterations = 1
119
99
 
120
100
  async def run_comprehensive_benchmark(self) -> BenchmarkSuite:
121
- """Run comprehensive benchmark suite comparing baseline vs optimized performance."""
122
101
  self._logger.info("Starting comprehensive performance benchmark")
123
102
 
124
103
  suite = BenchmarkSuite("Phase 3 Optimization Benchmark")
125
104
 
126
- # Memory optimization benchmark
127
105
  suite.add_result(await self._benchmark_memory_optimization())
128
106
 
129
- # Caching benchmark
130
107
  suite.add_result(await self._benchmark_caching_performance())
131
108
 
132
- # Async workflow benchmark
133
109
  suite.add_result(await self._benchmark_async_workflows())
134
110
 
135
111
  self._logger.info(
136
112
  f"Benchmark complete. Average improvements: "
137
- f"Time: {suite.average_time_improvement:.1f}%, "
138
- f"Memory: {suite.average_memory_improvement:.1f}%, "
139
- f"Cache ratio: {suite.overall_cache_hit_ratio:.2f}"
113
+ f"Time: {suite.average_time_improvement: .1f}%, "
114
+ f"Memory: {suite.average_memory_improvement: .1f}%, "
115
+ f"Cache ratio: {suite.overall_cache_hit_ratio: .2f}"
140
116
  )
141
117
 
142
118
  return suite
143
119
 
144
120
  async def _benchmark_memory_optimization(self) -> BenchmarkResult:
145
- """Benchmark memory optimization improvements."""
146
121
  self._logger.debug("Benchmarking memory optimization")
147
122
 
148
- # Baseline: Create objects without optimization
149
123
  baseline_start = time.time()
150
124
  baseline_memory_start = self._memory_optimizer.record_checkpoint(
151
125
  "baseline_start"
152
126
  )
153
127
 
154
- # Simulate heavy object creation (baseline)
155
128
  heavy_objects = []
156
- for i in range(50): # Reduced for faster testing
129
+ for i in range(50):
157
130
  obj = {
158
- "data": f"heavy_data_{i}" * 100, # Smaller for testing
131
+ "data": f"heavy_data_{i}" * 100,
159
132
  "metadata": {"created": time.time(), "index": i},
160
- "payload": list(range(100)),
133
+ "payload": list[t.Any](range(100)),
161
134
  }
162
135
  heavy_objects.append(obj)
163
136
 
164
137
  baseline_time = time.time() - baseline_start
165
138
  baseline_memory_peak = self._memory_optimizer.record_checkpoint("baseline_peak")
166
139
 
167
- # Clean up baseline objects
168
140
  del heavy_objects
169
141
 
170
- # Optimized: Use lazy loading
171
142
  optimized_start = time.time()
172
143
  optimized_memory_start = self._memory_optimizer.record_checkpoint(
173
144
  "optimized_start"
@@ -178,11 +149,11 @@ class PerformanceBenchmarker:
178
149
  lazy_objects = []
179
150
  for i in range(50):
180
151
 
181
- def create_heavy_object(index: int = i):
152
+ def create_heavy_object(index: int = i) -> dict[str, t.Any]:
182
153
  return {
183
154
  "data": f"heavy_data_{index}" * 100,
184
155
  "metadata": {"created": time.time(), "index": index},
185
- "payload": list(range(100)),
156
+ "payload": list[t.Any](range(100)),
186
157
  }
187
158
 
188
159
  lazy_obj = LazyLoader(create_heavy_object, f"heavy_object_{i}")
@@ -204,20 +175,17 @@ class PerformanceBenchmarker:
204
175
  )
205
176
 
206
177
  async def _benchmark_caching_performance(self) -> BenchmarkResult:
207
- """Benchmark caching performance improvements."""
208
178
  self._logger.debug("Benchmarking caching performance")
209
179
 
210
180
  self._cache.clear()
211
181
 
212
- # Baseline: No caching
213
182
  baseline_start = time.time()
214
183
 
215
- for i in range(10): # Reduced for testing
184
+ for i in range(10):
216
185
  await self._simulate_expensive_operation(f"operation_{i % 3}")
217
186
 
218
187
  baseline_time = time.time() - baseline_start
219
188
 
220
- # Optimized: With caching
221
189
  optimized_start = time.time()
222
190
  cache_stats_start = self._cache.get_stats()
223
191
 
@@ -241,18 +209,15 @@ class PerformanceBenchmarker:
241
209
  )
242
210
 
243
211
  async def _benchmark_async_workflows(self) -> BenchmarkResult:
244
- """Benchmark async workflow improvements."""
245
212
  self._logger.debug("Benchmarking async workflows")
246
213
 
247
- # Baseline: Sequential operations
248
214
  baseline_start = time.time()
249
215
 
250
- for i in range(5): # Reduced for testing
251
- await self._simulate_io_operation(f"seq_{i}", 0.01) # Reduced delay
216
+ for i in range(5):
217
+ await self._simulate_io_operation(f"seq_{i}", 0.01)
252
218
 
253
219
  baseline_time = time.time() - baseline_start
254
220
 
255
- # Optimized: Parallel operations
256
221
  optimized_start = time.time()
257
222
 
258
223
  tasks = [self._simulate_io_operation(f"par_{i}", 0.01) for i in range(5)]
@@ -271,37 +236,33 @@ class PerformanceBenchmarker:
271
236
  )
272
237
 
273
238
  async def _simulate_expensive_operation(self, operation_id: str) -> str:
274
- """Simulate an expensive operation without caching."""
275
- await asyncio.sleep(0.002) # 2ms delay for testing
239
+ await asyncio.sleep(0.002)
276
240
 
277
241
  result = ""
278
- for i in range(100): # Reduced computation
242
+ for i in range(100):
279
243
  result += f"{operation_id}_{i}"
280
244
 
281
245
  return result[:50]
282
246
 
283
247
  async def _simulate_cached_operation(self, operation_id: str) -> str:
284
- """Simulate an expensive operation with caching."""
285
- cached_result = await self._cache.get_async(f"expensive_op:{operation_id}")
248
+ cached_result = await self._cache.get_async(f"expensive_op: {operation_id}")
286
249
  if cached_result is not None:
287
- return cached_result
250
+ return str(cached_result)
288
251
 
289
252
  result = await self._simulate_expensive_operation(operation_id)
290
253
  await self._cache.set_async(
291
- f"expensive_op:{operation_id}", result, ttl_seconds=60
254
+ f"expensive_op: {operation_id}", result, ttl_seconds=60
292
255
  )
293
256
 
294
257
  return result
295
258
 
296
259
  async def _simulate_io_operation(self, operation_id: str, duration: float) -> str:
297
- """Simulate I/O bound operation."""
298
260
  await asyncio.sleep(duration)
299
261
  return f"result_{operation_id}"
300
262
 
301
263
  def export_benchmark_results(
302
264
  self, suite: BenchmarkSuite, output_path: Path
303
265
  ) -> None:
304
- """Export benchmark results to JSON file."""
305
266
  data = {
306
267
  "suite_name": suite.suite_name,
307
268
  "run_timestamp": suite.run_timestamp.isoformat(),
@@ -337,7 +298,5 @@ class PerformanceBenchmarker:
337
298
  self._logger.info(f"Exported benchmark results to {output_path}")
338
299
 
339
300
 
340
- # Global benchmarker instance
341
301
  def get_benchmarker() -> PerformanceBenchmarker:
342
- """Get performance benchmarker instance."""
343
302
  return PerformanceBenchmarker()