crackerjack 0.32.0__py3-none-any.whl → 0.33.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

Files changed (34) hide show
  1. crackerjack/core/enhanced_container.py +67 -0
  2. crackerjack/core/phase_coordinator.py +183 -44
  3. crackerjack/core/workflow_orchestrator.py +459 -138
  4. crackerjack/managers/publish_manager.py +22 -5
  5. crackerjack/managers/test_command_builder.py +4 -2
  6. crackerjack/managers/test_manager.py +15 -4
  7. crackerjack/mcp/server_core.py +162 -34
  8. crackerjack/mcp/tools/core_tools.py +1 -1
  9. crackerjack/mcp/tools/execution_tools.py +8 -3
  10. crackerjack/mixins/__init__.py +5 -0
  11. crackerjack/mixins/error_handling.py +214 -0
  12. crackerjack/models/config.py +9 -0
  13. crackerjack/models/protocols.py +69 -0
  14. crackerjack/models/task.py +3 -0
  15. crackerjack/security/__init__.py +1 -1
  16. crackerjack/security/audit.py +92 -78
  17. crackerjack/services/config.py +3 -2
  18. crackerjack/services/config_merge.py +11 -5
  19. crackerjack/services/coverage_ratchet.py +22 -0
  20. crackerjack/services/git.py +37 -24
  21. crackerjack/services/initialization.py +25 -9
  22. crackerjack/services/memory_optimizer.py +477 -0
  23. crackerjack/services/parallel_executor.py +474 -0
  24. crackerjack/services/performance_benchmarks.py +292 -577
  25. crackerjack/services/performance_cache.py +443 -0
  26. crackerjack/services/performance_monitor.py +633 -0
  27. crackerjack/services/security.py +63 -0
  28. crackerjack/services/security_logger.py +9 -1
  29. crackerjack/services/terminal_utils.py +0 -0
  30. {crackerjack-0.32.0.dist-info → crackerjack-0.33.0.dist-info}/METADATA +2 -2
  31. {crackerjack-0.32.0.dist-info → crackerjack-0.33.0.dist-info}/RECORD +34 -27
  32. {crackerjack-0.32.0.dist-info → crackerjack-0.33.0.dist-info}/WHEEL +0 -0
  33. {crackerjack-0.32.0.dist-info → crackerjack-0.33.0.dist-info}/entry_points.txt +0 -0
  34. {crackerjack-0.32.0.dist-info → crackerjack-0.33.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,474 @@
1
+ """Parallel execution coordinator for hooks and other independent operations.
2
+
3
+ This module provides safe parallel execution of hooks while respecting
4
+ security levels, dependencies, and resource constraints.
5
+ """
6
+
7
+ import asyncio
8
+ import time
9
+ import typing as t
10
+ from concurrent.futures import ThreadPoolExecutor
11
+ from dataclasses import dataclass, field
12
+ from enum import Enum
13
+ from pathlib import Path
14
+
15
+ from crackerjack.config.hooks import HookDefinition, SecurityLevel
16
+ from crackerjack.services.logging import get_logger
17
+ from crackerjack.services.performance_cache import get_performance_cache
18
+
19
+
20
+ class ExecutionStrategy(Enum):
21
+ """Execution strategy for parallel operations."""
22
+
23
+ SEQUENTIAL = "sequential"
24
+ PARALLEL_SAFE = (
25
+ "parallel_safe" # Only parallel execution of safe, independent hooks
26
+ )
27
+ PARALLEL_AGGRESSIVE = "parallel_aggressive" # More aggressive parallelization
28
+
29
+
30
+ @dataclass
31
+ class ExecutionGroup:
32
+ """Group of operations that can be executed together."""
33
+
34
+ name: str
35
+ operations: list[t.Any]
36
+ max_workers: int = 3
37
+ timeout_seconds: int = 300
38
+ dependencies: set[str] = field(default_factory=set)
39
+ security_level: SecurityLevel = SecurityLevel.MEDIUM
40
+
41
+
42
+ @dataclass
43
+ class ExecutionResult:
44
+ """Result of executing an operation."""
45
+
46
+ operation_id: str
47
+ success: bool
48
+ duration_seconds: float
49
+ output: str = ""
50
+ error: str = ""
51
+ exit_code: int = 0
52
+ metadata: dict[str, t.Any] = field(default_factory=dict)
53
+
54
+
55
+ @dataclass
56
+ class ParallelExecutionResult:
57
+ """Result of parallel execution batch."""
58
+
59
+ group_name: str
60
+ total_operations: int
61
+ successful_operations: int
62
+ failed_operations: int
63
+ total_duration_seconds: float
64
+ results: list[ExecutionResult]
65
+
66
+ @property
67
+ def success_rate(self) -> float:
68
+ """Calculate success rate."""
69
+ return (
70
+ self.successful_operations / self.total_operations
71
+ if self.total_operations > 0
72
+ else 0.0
73
+ )
74
+
75
+ @property
76
+ def overall_success(self) -> bool:
77
+ """Check if all operations succeeded."""
78
+ return self.failed_operations == 0
79
+
80
+
81
+ class ParallelHookExecutor:
82
+ """Parallel executor for hook operations with safety constraints."""
83
+
84
+ def __init__(
85
+ self,
86
+ max_workers: int = 3,
87
+ timeout_seconds: int = 300,
88
+ strategy: ExecutionStrategy = ExecutionStrategy.PARALLEL_SAFE,
89
+ ):
90
+ self.max_workers = max_workers
91
+ self.timeout_seconds = timeout_seconds
92
+ self.strategy = strategy
93
+ self._logger = get_logger("crackerjack.parallel_executor")
94
+ self._cache = get_performance_cache()
95
+
96
+ def analyze_hook_dependencies(
97
+ self,
98
+ hooks: list[HookDefinition],
99
+ ) -> dict[str, list[HookDefinition]]:
100
+ """Analyze hook dependencies and group hooks for parallel execution."""
101
+ groups: dict[str, list[HookDefinition]] = {
102
+ "formatting": [],
103
+ "validation": [],
104
+ "security": [],
105
+ "comprehensive": [],
106
+ }
107
+
108
+ for hook in hooks:
109
+ if hook.is_formatting or hook.security_level == SecurityLevel.LOW:
110
+ groups["formatting"].append(hook)
111
+ elif hook.security_level == SecurityLevel.CRITICAL:
112
+ groups["security"].append(hook)
113
+ elif hook.name in {"check-yaml", "check-json", "check-toml"}:
114
+ groups["validation"].append(hook)
115
+ else:
116
+ groups["comprehensive"].append(hook)
117
+
118
+ # Remove empty groups
119
+ return {k: v for k, v in groups.items() if v}
120
+
121
+ def can_execute_in_parallel(
122
+ self,
123
+ hook1: HookDefinition,
124
+ hook2: HookDefinition,
125
+ ) -> bool:
126
+ """Check if two hooks can be safely executed in parallel."""
127
+ # Never parallelize different security levels for safety
128
+ if hook1.security_level != hook2.security_level:
129
+ return False
130
+
131
+ # Don't parallelize hooks that modify files with hooks that read files
132
+ if hook1.is_formatting and not hook2.is_formatting:
133
+ return False
134
+
135
+ # Safe combinations for parallel execution
136
+ safe_parallel_combinations = [
137
+ # All formatting hooks can run in parallel
138
+ (lambda h: h.is_formatting, lambda h: h.is_formatting),
139
+ # Validation hooks can run in parallel
140
+ (
141
+ lambda h: h.name in {"check-yaml", "check-json", "check-toml"},
142
+ lambda h: h.name in {"check-yaml", "check-json", "check-toml"},
143
+ ),
144
+ # Same security level non-formatting hooks
145
+ (
146
+ lambda h: not h.is_formatting
147
+ and h.security_level == SecurityLevel.MEDIUM,
148
+ lambda h: not h.is_formatting
149
+ and h.security_level == SecurityLevel.MEDIUM,
150
+ ),
151
+ ]
152
+
153
+ for check1, check2 in safe_parallel_combinations:
154
+ if check1(hook1) and check2(hook2):
155
+ return True
156
+
157
+ return False
158
+
159
+ async def execute_hooks_parallel(
160
+ self,
161
+ hooks: list[HookDefinition],
162
+ hook_runner: t.Callable[[HookDefinition], t.Awaitable[ExecutionResult]],
163
+ ) -> ParallelExecutionResult:
164
+ """Execute hooks in parallel where safe, sequential otherwise."""
165
+ start_time = time.time()
166
+
167
+ if self.strategy == ExecutionStrategy.SEQUENTIAL:
168
+ return await self._execute_sequential(hooks, hook_runner, start_time)
169
+
170
+ # Group hooks for parallel execution
171
+ groups = self.analyze_hook_dependencies(hooks)
172
+ all_results: list[ExecutionResult] = []
173
+
174
+ self._logger.info(
175
+ f"Executing {len(hooks)} hooks in {len(groups)} parallel groups"
176
+ )
177
+
178
+ for group_name, group_hooks in groups.items():
179
+ if len(group_hooks) == 1 or not self._can_parallelize_group(group_hooks):
180
+ # Execute single hook or non-parallelizable group sequentially
181
+ for hook in group_hooks:
182
+ result = await hook_runner(hook)
183
+ all_results.append(result)
184
+ else:
185
+ # Execute group in parallel
186
+ group_results = await self._execute_group_parallel(
187
+ group_hooks,
188
+ hook_runner,
189
+ group_name,
190
+ )
191
+ all_results.extend(group_results)
192
+
193
+ total_duration = time.time() - start_time
194
+ successful = sum(1 for r in all_results if r.success)
195
+ failed = len(all_results) - successful
196
+
197
+ return ParallelExecutionResult(
198
+ group_name="all_hooks",
199
+ total_operations=len(hooks),
200
+ successful_operations=successful,
201
+ failed_operations=failed,
202
+ total_duration_seconds=total_duration,
203
+ results=all_results,
204
+ )
205
+
206
+ async def _execute_sequential(
207
+ self,
208
+ hooks: list[HookDefinition],
209
+ hook_runner: t.Callable[[HookDefinition], t.Awaitable[ExecutionResult]],
210
+ start_time: float,
211
+ ) -> ParallelExecutionResult:
212
+ """Execute hooks sequentially (fallback)."""
213
+ results: list[ExecutionResult] = []
214
+
215
+ for hook in hooks:
216
+ result = await hook_runner(hook)
217
+ results.append(result)
218
+
219
+ total_duration = time.time() - start_time
220
+ successful = sum(1 for r in results if r.success)
221
+ failed = len(results) - successful
222
+
223
+ return ParallelExecutionResult(
224
+ group_name="sequential",
225
+ total_operations=len(hooks),
226
+ successful_operations=successful,
227
+ failed_operations=failed,
228
+ total_duration_seconds=total_duration,
229
+ results=results,
230
+ )
231
+
232
+ def _can_parallelize_group(self, hooks: list[HookDefinition]) -> bool:
233
+ """Check if a group of hooks can be parallelized."""
234
+ if len(hooks) < 2:
235
+ return False
236
+
237
+ # Check pairwise compatibility
238
+ for i, hook1 in enumerate(hooks):
239
+ for hook2 in hooks[i + 1 :]:
240
+ if not self.can_execute_in_parallel(hook1, hook2):
241
+ return False
242
+
243
+ return True
244
+
245
+ async def _execute_group_parallel(
246
+ self,
247
+ hooks: list[HookDefinition],
248
+ hook_runner: t.Callable[[HookDefinition], t.Awaitable[ExecutionResult]],
249
+ group_name: str,
250
+ ) -> list[ExecutionResult]:
251
+ """Execute a group of hooks in parallel."""
252
+ self._logger.debug(f"Executing {len(hooks)} {group_name} hooks in parallel")
253
+
254
+ # Limit parallelism for safety
255
+ max_workers = min(self.max_workers, len(hooks))
256
+
257
+ # Create semaphore to limit concurrent executions
258
+ semaphore = asyncio.Semaphore(max_workers)
259
+
260
+ async def run_with_semaphore(hook: HookDefinition) -> ExecutionResult:
261
+ async with semaphore:
262
+ return await hook_runner(hook)
263
+
264
+ # Execute all hooks concurrently with limited parallelism
265
+ tasks = [run_with_semaphore(hook) for hook in hooks]
266
+ results = await asyncio.gather(*tasks, return_exceptions=True)
267
+
268
+ # Process results and handle exceptions
269
+ processed_results: list[ExecutionResult] = []
270
+ for i, result in enumerate(results):
271
+ if isinstance(result, Exception):
272
+ # Create error result for exception
273
+ error_result = ExecutionResult(
274
+ operation_id=hooks[i].name,
275
+ success=False,
276
+ duration_seconds=0.0,
277
+ error=str(result),
278
+ )
279
+ processed_results.append(error_result)
280
+ self._logger.error(
281
+ f"Hook {hooks[i].name} failed with exception: {result}"
282
+ )
283
+ else:
284
+ # result must be ExecutionResult here due to type narrowing
285
+ processed_results.append(result) # type: ignore[arg-type]
286
+
287
+ successful = sum(1 for r in processed_results if r.success)
288
+ self._logger.info(
289
+ f"Parallel {group_name} execution: {successful}/{len(hooks)} succeeded"
290
+ )
291
+
292
+ return processed_results
293
+
294
+
295
+ class AsyncCommandExecutor:
296
+ """Asynchronous command executor with optimization and caching."""
297
+
298
+ def __init__(
299
+ self,
300
+ max_workers: int = 4,
301
+ cache_results: bool = True,
302
+ ):
303
+ self.max_workers = max_workers
304
+ self.cache_results = cache_results
305
+ self._logger = get_logger("crackerjack.async_executor")
306
+ self._cache = get_performance_cache()
307
+ self._thread_pool = ThreadPoolExecutor(max_workers=max_workers)
308
+
309
+ async def execute_command(
310
+ self,
311
+ command: list[str],
312
+ cwd: Path | None = None,
313
+ timeout: int = 60,
314
+ cache_ttl: int = 120,
315
+ ) -> ExecutionResult:
316
+ """Execute a command asynchronously with caching."""
317
+ # Check cache first
318
+ if self.cache_results:
319
+ cached_result = await self._get_cached_result(command, cwd)
320
+ if cached_result:
321
+ self._logger.debug(
322
+ f"Using cached result for command: {' '.join(command)}"
323
+ )
324
+ return cached_result
325
+
326
+ # Execute command
327
+ start_time = time.time()
328
+ result = await self._run_command_async(command, cwd, timeout)
329
+ result.duration_seconds = time.time() - start_time
330
+
331
+ # Cache successful results
332
+ if self.cache_results and result.success:
333
+ await self._cache_result(command, result, cache_ttl, cwd)
334
+
335
+ return result
336
+
337
+ async def execute_commands_batch(
338
+ self,
339
+ commands: list[tuple[list[str], Path | None]],
340
+ timeout: int = 60,
341
+ ) -> list[ExecutionResult]:
342
+ """Execute multiple commands concurrently."""
343
+ self._logger.info(f"Executing {len(commands)} commands in batch")
344
+
345
+ tasks = [self.execute_command(cmd, cwd, timeout) for cmd, cwd in commands]
346
+
347
+ results = await asyncio.gather(*tasks, return_exceptions=True)
348
+
349
+ # Process results and handle exceptions
350
+ processed_results: list[ExecutionResult] = []
351
+ for i, result in enumerate(results):
352
+ if isinstance(result, Exception):
353
+ error_result = ExecutionResult(
354
+ operation_id=f"command_{i}",
355
+ success=False,
356
+ duration_seconds=0.0,
357
+ error=str(result),
358
+ )
359
+ processed_results.append(error_result)
360
+ else:
361
+ # result must be ExecutionResult here due to type narrowing
362
+ processed_results.append(result) # type: ignore[arg-type]
363
+
364
+ successful = sum(1 for r in processed_results if r.success)
365
+ self._logger.info(
366
+ f"Batch execution: {successful}/{len(commands)} commands succeeded"
367
+ )
368
+
369
+ return processed_results
370
+
371
+ async def _run_command_async(
372
+ self,
373
+ command: list[str],
374
+ cwd: Path | None = None,
375
+ timeout: int = 60,
376
+ ) -> ExecutionResult:
377
+ """Run command asynchronously in thread pool."""
378
+ loop = asyncio.get_event_loop()
379
+
380
+ def run_sync_command() -> ExecutionResult:
381
+ import subprocess
382
+
383
+ try:
384
+ result = subprocess.run(
385
+ command,
386
+ cwd=cwd,
387
+ capture_output=True,
388
+ text=True,
389
+ timeout=timeout,
390
+ check=False,
391
+ )
392
+
393
+ return ExecutionResult(
394
+ operation_id=" ".join(command),
395
+ success=result.returncode == 0,
396
+ duration_seconds=0.0, # Set by caller
397
+ output=result.stdout,
398
+ error=result.stderr,
399
+ exit_code=result.returncode,
400
+ )
401
+
402
+ except subprocess.TimeoutExpired:
403
+ return ExecutionResult(
404
+ operation_id=" ".join(command),
405
+ success=False,
406
+ duration_seconds=timeout,
407
+ error=f"Command timeout after {timeout}s",
408
+ exit_code=-1,
409
+ )
410
+ except Exception as e:
411
+ return ExecutionResult(
412
+ operation_id=" ".join(command),
413
+ success=False,
414
+ duration_seconds=0.0,
415
+ error=str(e),
416
+ exit_code=-1,
417
+ )
418
+
419
+ return await loop.run_in_executor(self._thread_pool, run_sync_command)
420
+
421
+ async def _get_cached_result(
422
+ self,
423
+ command: list[str],
424
+ cwd: Path | None = None,
425
+ ) -> ExecutionResult | None:
426
+ """Get cached command result."""
427
+ from crackerjack.services.performance_cache import get_command_cache
428
+
429
+ return get_command_cache().get_command_result(command, cwd)
430
+
431
+ async def _cache_result(
432
+ self,
433
+ command: list[str],
434
+ result: ExecutionResult,
435
+ ttl_seconds: int,
436
+ cwd: Path | None = None,
437
+ ) -> None:
438
+ """Cache command result."""
439
+ from crackerjack.services.performance_cache import get_command_cache
440
+
441
+ command_cache = get_command_cache()
442
+ command_cache.set_command_result(command, result, cwd, ttl_seconds)
443
+
444
+ def __del__(self):
445
+ """Clean up thread pool."""
446
+ if hasattr(self, "_thread_pool"):
447
+ self._thread_pool.shutdown(wait=False)
448
+
449
+
450
+ # Global executor instances
451
+ _parallel_executor: ParallelHookExecutor | None = None
452
+ _async_executor: AsyncCommandExecutor | None = None
453
+
454
+
455
+ def get_parallel_executor(
456
+ max_workers: int = 3,
457
+ strategy: ExecutionStrategy = ExecutionStrategy.PARALLEL_SAFE,
458
+ ) -> ParallelHookExecutor:
459
+ """Get global parallel hook executor instance."""
460
+ global _parallel_executor
461
+ if _parallel_executor is None:
462
+ _parallel_executor = ParallelHookExecutor(
463
+ max_workers=max_workers,
464
+ strategy=strategy,
465
+ )
466
+ return _parallel_executor
467
+
468
+
469
+ def get_async_executor(max_workers: int = 4) -> AsyncCommandExecutor:
470
+ """Get global async command executor instance."""
471
+ global _async_executor
472
+ if _async_executor is None:
473
+ _async_executor = AsyncCommandExecutor(max_workers=max_workers)
474
+ return _async_executor