crackerjack 0.31.18__py3-none-any.whl → 0.32.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

@@ -13,8 +13,24 @@ async def execute_crackerjack_workflow(
13
13
  ) -> dict[str, t.Any]:
14
14
  job_id = str(uuid.uuid4())[:8]
15
15
 
16
+ # Configure extended timeout for long-running test operations
17
+ execution_timeout = kwargs.get("execution_timeout", 900) # 15 minutes default
18
+ if kwargs.get("test", False) or kwargs.get("testing", False):
19
+ execution_timeout = max(execution_timeout, 1200) # 20 minutes for test runs
20
+
16
21
  try:
17
- return await _execute_crackerjack_sync(job_id, args, kwargs, get_context())
22
+ # Add overall execution timeout with keep-alive
23
+ return await asyncio.wait_for(
24
+ _execute_crackerjack_sync(job_id, args, kwargs, get_context()),
25
+ timeout=execution_timeout,
26
+ )
27
+ except TimeoutError:
28
+ return {
29
+ "job_id": job_id,
30
+ "status": "timeout",
31
+ "error": f"Execution timed out after {execution_timeout} seconds",
32
+ "timestamp": time.time(),
33
+ }
18
34
  except Exception as e:
19
35
  import traceback
20
36
 
@@ -143,9 +159,9 @@ async def _create_advanced_orchestrator(
143
159
  from pathlib import Path
144
160
 
145
161
  from crackerjack.core.async_workflow_orchestrator import AsyncWorkflowOrchestrator
146
- from crackerjack.core.enhanced_container import EnhancedContainer
162
+ from crackerjack.core.enhanced_container import EnhancedDependencyContainer
147
163
 
148
- container = EnhancedContainer()
164
+ container = EnhancedDependencyContainer()
149
165
 
150
166
  await _register_core_services(container, Path(working_dir))
151
167
 
@@ -171,41 +187,37 @@ def _create_standard_orchestrator(
171
187
  async def _register_core_services(container: t.Any, working_dir: t.Any) -> None:
172
188
  from rich.console import Console
173
189
 
174
- from crackerjack.core.enhanced_container import ServiceLifetime
175
190
  from crackerjack.managers.hook_manager import AsyncHookManager
176
- from crackerjack.managers.publish_manager import PublishManager
177
- from crackerjack.managers.test_manager import TestManager
191
+ from crackerjack.managers.publish_manager import PublishManagerImpl
192
+ from crackerjack.managers.test_manager import TestManagementImpl
178
193
  from crackerjack.models.protocols import (
179
- HookManagerProtocol,
180
- PublishManagerProtocol,
194
+ FileSystemInterface,
195
+ HookManager,
196
+ PublishManager,
181
197
  TestManagerProtocol,
182
198
  )
183
199
  from crackerjack.services.enhanced_filesystem import EnhancedFileSystemService
184
200
 
185
201
  console = Console()
186
202
 
187
- container.register_service(
188
- HookManagerProtocol,
189
- AsyncHookManager(console, working_dir),
190
- ServiceLifetime.SINGLETON,
203
+ container.register_singleton(
204
+ HookManager,
205
+ factory=lambda: AsyncHookManager(console, working_dir),
191
206
  )
192
207
 
193
- container.register_service(
208
+ container.register_singleton(
194
209
  TestManagerProtocol,
195
- TestManager(console, working_dir),
196
- ServiceLifetime.SINGLETON,
210
+ factory=lambda: TestManagementImpl(console, working_dir),
197
211
  )
198
212
 
199
- container.register_service(
200
- PublishManagerProtocol,
201
- PublishManager(console, working_dir),
202
- ServiceLifetime.SINGLETON,
213
+ container.register_singleton(
214
+ PublishManager,
215
+ factory=lambda: PublishManagerImpl(console, working_dir),
203
216
  )
204
217
 
205
- container.register_service(
206
- EnhancedFileSystemService,
207
- EnhancedFileSystemService(),
208
- ServiceLifetime.SINGLETON,
218
+ container.register_singleton(
219
+ FileSystemInterface,
220
+ factory=EnhancedFileSystemService,
209
221
  )
210
222
 
211
223
 
@@ -218,17 +230,29 @@ async def _run_workflow_iterations(
218
230
  options = _create_workflow_options(kwargs)
219
231
  max_iterations = kwargs.get("max_iterations", 10)
220
232
 
221
- for iteration in range(max_iterations):
222
- _update_progress(
223
- job_id,
224
- {
225
- "type": "iteration",
226
- "iteration": iteration + 1,
227
- "max_iterations": max_iterations,
228
- "status": "running",
229
- },
230
- context,
233
+ # Start keep-alive task to prevent TCP timeouts
234
+ keep_alive_task = asyncio.create_task(_keep_alive_heartbeat(job_id, context))
235
+
236
+ try:
237
+ result = await _execute_iterations_loop(
238
+ job_id, orchestrator, options, kwargs, max_iterations, context
231
239
  )
240
+ return result
241
+ finally:
242
+ await _cleanup_keep_alive_task(keep_alive_task)
243
+
244
+
245
+ async def _execute_iterations_loop(
246
+ job_id: str,
247
+ orchestrator: t.Any,
248
+ options: t.Any,
249
+ kwargs: dict[str, t.Any],
250
+ max_iterations: int,
251
+ context: t.Any,
252
+ ) -> dict[str, t.Any]:
253
+ """Execute the main iterations loop."""
254
+ for iteration in range(max_iterations):
255
+ _update_iteration_progress(job_id, iteration, max_iterations, context)
232
256
 
233
257
  try:
234
258
  success = await _execute_single_iteration(
@@ -236,13 +260,8 @@ async def _run_workflow_iterations(
236
260
  )
237
261
 
238
262
  if success:
239
- coverage_result = None
240
- if kwargs.get("boost_coverage", False):
241
- coverage_result = await _attempt_coverage_improvement(
242
- job_id, orchestrator, context
243
- )
244
- return _create_success_result(
245
- job_id, iteration + 1, context, coverage_result
263
+ return await _handle_iteration_success(
264
+ job_id, iteration, orchestrator, kwargs, context
246
265
  )
247
266
 
248
267
  if iteration < max_iterations - 1:
@@ -254,6 +273,77 @@ async def _run_workflow_iterations(
254
273
  return _create_failure_result(job_id, max_iterations, context)
255
274
 
256
275
 
276
+ def _update_iteration_progress(
277
+ job_id: str, iteration: int, max_iterations: int, context: t.Any
278
+ ) -> None:
279
+ """Update progress for current iteration."""
280
+ _update_progress(
281
+ job_id,
282
+ {
283
+ "type": "iteration",
284
+ "iteration": iteration + 1,
285
+ "max_iterations": max_iterations,
286
+ "status": "running",
287
+ },
288
+ context,
289
+ )
290
+
291
+
292
+ async def _handle_iteration_success(
293
+ job_id: str,
294
+ iteration: int,
295
+ orchestrator: t.Any,
296
+ kwargs: dict[str, t.Any],
297
+ context: t.Any,
298
+ ) -> dict[str, t.Any]:
299
+ """Handle successful iteration."""
300
+ coverage_result = None
301
+ if kwargs.get("boost_coverage", False):
302
+ coverage_result = await _attempt_coverage_improvement(
303
+ job_id, orchestrator, context
304
+ )
305
+ return _create_success_result(job_id, iteration + 1, context, coverage_result)
306
+
307
+
308
+ async def _cleanup_keep_alive_task(keep_alive_task: asyncio.Task[t.Any]) -> None:
309
+ """Clean up the keep-alive task."""
310
+ if not keep_alive_task.cancelled():
311
+ keep_alive_task.cancel()
312
+ try:
313
+ await keep_alive_task
314
+ except asyncio.CancelledError:
315
+ pass
316
+
317
+
318
+ async def _keep_alive_heartbeat(job_id: str, context: t.Any) -> None:
319
+ """Send periodic keep-alive messages to prevent TCP timeouts."""
320
+ try:
321
+ while True:
322
+ # Send heartbeat every 60 seconds (well under 2-minute TCP timeout)
323
+ await asyncio.sleep(60)
324
+ _update_progress(
325
+ job_id,
326
+ {
327
+ "type": "keep_alive",
328
+ "status": "heartbeat",
329
+ "timestamp": time.time(),
330
+ "message": "Keep-alive heartbeat to prevent connection timeout",
331
+ },
332
+ context,
333
+ )
334
+ except asyncio.CancelledError:
335
+ # Task was cancelled, cleanup
336
+ _update_progress(
337
+ job_id,
338
+ {
339
+ "type": "keep_alive",
340
+ "status": "cancelled",
341
+ "timestamp": time.time(),
342
+ },
343
+ context,
344
+ )
345
+
346
+
257
347
  def _create_workflow_options(kwargs: dict[str, t.Any]) -> t.Any:
258
348
  from types import SimpleNamespace
259
349
 
@@ -83,8 +83,12 @@ class GitInterface(t.Protocol):
83
83
 
84
84
  def add_files(self, files: list[str]) -> bool: ...
85
85
 
86
+ def add_all_files(self) -> bool: ...
87
+
86
88
  def get_commit_message_suggestions(self, changed_files: list[str]) -> list[str]: ...
87
89
 
90
+ def get_unpushed_commit_count(self) -> int: ...
91
+
88
92
 
89
93
  @t.runtime_checkable
90
94
  class HookManager(t.Protocol):
@@ -99,6 +103,47 @@ class HookManager(t.Protocol):
99
103
  def get_hook_summary(self, results: t.Any) -> t.Any: ...
100
104
 
101
105
 
106
+ @t.runtime_checkable
107
+ class SecurityAwareHookManager(HookManager, t.Protocol):
108
+ """Security-aware hook manager that tracks security-critical failures."""
109
+
110
+ def get_security_critical_failures(self, results: list[t.Any]) -> list[t.Any]:
111
+ """Extract security-critical failures from hook results.
112
+
113
+ Args:
114
+ results: List of hook results from run_fast_hooks or run_comprehensive_hooks
115
+
116
+ Returns:
117
+ List of results that are security-critical and failed
118
+ """
119
+ ...
120
+
121
+ def has_security_critical_failures(self, results: list[t.Any]) -> bool:
122
+ """Check if any security-critical hooks failed.
123
+
124
+ Args:
125
+ results: List of hook results
126
+
127
+ Returns:
128
+ True if any CRITICAL security level hooks failed
129
+ """
130
+ ...
131
+
132
+ def get_security_audit_report(
133
+ self, fast_results: list[t.Any], comprehensive_results: list[t.Any]
134
+ ) -> dict[str, t.Any]:
135
+ """Generate security audit report for publishing decisions.
136
+
137
+ Args:
138
+ fast_results: Results from fast hooks
139
+ comprehensive_results: Results from comprehensive hooks
140
+
141
+ Returns:
142
+ Dict containing security status, failed critical checks, and recommendations
143
+ """
144
+ ...
145
+
146
+
102
147
  @t.runtime_checkable
103
148
  class TestManagerProtocol(t.Protocol):
104
149
  def run_tests(self, options: OptionsProtocol) -> bool: ...
@@ -0,0 +1 @@
1
+ """Security utilities for Crackerjack."""
@@ -0,0 +1,212 @@
1
+ """Security audit utilities for secure SDLC practices."""
2
+
3
+ import typing as t
4
+ from dataclasses import dataclass
5
+ from enum import Enum
6
+
7
+ from crackerjack.config.hooks import SecurityLevel
8
+
9
+
10
+ @dataclass
11
+ class SecurityCheckResult:
12
+ """Result of a security check."""
13
+
14
+ hook_name: str
15
+ security_level: SecurityLevel
16
+ passed: bool
17
+ error_message: str | None = None
18
+ details: dict[str, t.Any] | None = None
19
+
20
+
21
+ @dataclass
22
+ class SecurityAuditReport:
23
+ """Comprehensive security audit report for publishing decisions."""
24
+
25
+ critical_failures: list[SecurityCheckResult]
26
+ high_failures: list[SecurityCheckResult]
27
+ medium_failures: list[SecurityCheckResult]
28
+ low_failures: list[SecurityCheckResult]
29
+
30
+ allows_publishing: bool
31
+ security_warnings: list[str]
32
+ recommendations: list[str]
33
+
34
+ @property
35
+ def has_critical_failures(self) -> bool:
36
+ """Check if there are any critical security failures."""
37
+ return len(self.critical_failures) > 0
38
+
39
+ @property
40
+ def total_failures(self) -> int:
41
+ """Get total number of failed checks."""
42
+ return (
43
+ len(self.critical_failures) +
44
+ len(self.high_failures) +
45
+ len(self.medium_failures) +
46
+ len(self.low_failures)
47
+ )
48
+
49
+
50
+ class SecurityAuditor:
51
+ """Security auditor for hook results following OWASP secure SDLC practices."""
52
+
53
+ # Security-critical hooks that CANNOT be bypassed for publishing
54
+ CRITICAL_HOOKS = {
55
+ 'bandit': 'Security vulnerability detection (OWASP A09)',
56
+ 'pyright': 'Type safety prevents runtime security holes (OWASP A04)',
57
+ 'gitleaks': 'Secret/credential detection (OWASP A07)',
58
+ }
59
+
60
+ # High-importance security hooks that can be bypassed with warnings
61
+ HIGH_SECURITY_HOOKS = {
62
+ 'validate-regex-patterns': 'Regex vulnerability detection',
63
+ 'creosote': 'Dependency vulnerability analysis',
64
+ 'check-added-large-files': 'Large file security analysis',
65
+ 'uv-lock': 'Dependency lock security',
66
+ }
67
+
68
+ def audit_hook_results(
69
+ self,
70
+ fast_results: list[t.Any],
71
+ comprehensive_results: list[t.Any]
72
+ ) -> SecurityAuditReport:
73
+ """Audit hook results and generate security report.
74
+
75
+ Args:
76
+ fast_results: Results from fast hooks
77
+ comprehensive_results: Results from comprehensive hooks
78
+
79
+ Returns:
80
+ SecurityAuditReport with security analysis
81
+ """
82
+ all_results = fast_results + comprehensive_results
83
+
84
+ critical_failures = []
85
+ high_failures = []
86
+ medium_failures = []
87
+ low_failures = []
88
+
89
+ for result in all_results:
90
+ check_result = self._analyze_hook_result(result)
91
+ if not check_result.passed:
92
+ if check_result.security_level == SecurityLevel.CRITICAL:
93
+ critical_failures.append(check_result)
94
+ elif check_result.security_level == SecurityLevel.HIGH:
95
+ high_failures.append(check_result)
96
+ elif check_result.security_level == SecurityLevel.MEDIUM:
97
+ medium_failures.append(check_result)
98
+ else:
99
+ low_failures.append(check_result)
100
+
101
+ # Publishing is allowed only if no critical failures exist
102
+ allows_publishing = len(critical_failures) == 0
103
+
104
+ security_warnings = self._generate_security_warnings(
105
+ critical_failures, high_failures, medium_failures
106
+ )
107
+
108
+ recommendations = self._generate_security_recommendations(
109
+ critical_failures, high_failures, medium_failures
110
+ )
111
+
112
+ return SecurityAuditReport(
113
+ critical_failures=critical_failures,
114
+ high_failures=high_failures,
115
+ medium_failures=medium_failures,
116
+ low_failures=low_failures,
117
+ allows_publishing=allows_publishing,
118
+ security_warnings=security_warnings,
119
+ recommendations=recommendations,
120
+ )
121
+
122
+ def _analyze_hook_result(self, result: t.Any) -> SecurityCheckResult:
123
+ """Analyze a single hook result for security implications."""
124
+ hook_name = getattr(result, 'name', 'unknown')
125
+ is_failed = getattr(result, 'status', 'unknown') in ('failed', 'error', 'timeout')
126
+ error_message = getattr(result, 'output', None) or getattr(result, 'error', None)
127
+
128
+ # Determine security level
129
+ security_level = self._get_hook_security_level(hook_name)
130
+
131
+ return SecurityCheckResult(
132
+ hook_name=hook_name,
133
+ security_level=security_level,
134
+ passed=not is_failed,
135
+ error_message=error_message,
136
+ details={'status': getattr(result, 'status', 'unknown')},
137
+ )
138
+
139
+ def _get_hook_security_level(self, hook_name: str) -> SecurityLevel:
140
+ """Get security level for a hook name."""
141
+ hook_name_lower = hook_name.lower()
142
+
143
+ if hook_name_lower in [name.lower() for name in self.CRITICAL_HOOKS]:
144
+ return SecurityLevel.CRITICAL
145
+ elif hook_name_lower in [name.lower() for name in self.HIGH_SECURITY_HOOKS]:
146
+ return SecurityLevel.HIGH
147
+ elif hook_name_lower in ['ruff-check', 'vulture', 'refurb', 'complexipy']:
148
+ return SecurityLevel.MEDIUM
149
+ else:
150
+ return SecurityLevel.LOW
151
+
152
+ def _generate_security_warnings(
153
+ self,
154
+ critical: list[SecurityCheckResult],
155
+ high: list[SecurityCheckResult],
156
+ medium: list[SecurityCheckResult]
157
+ ) -> list[str]:
158
+ """Generate security warnings based on failed checks."""
159
+ warnings = []
160
+
161
+ if critical:
162
+ warnings.append(
163
+ f"🔒 CRITICAL: {len(critical)} security-critical checks failed - publishing BLOCKED"
164
+ )
165
+ for failure in critical:
166
+ reason = self.CRITICAL_HOOKS.get(failure.hook_name.lower(), "Security-critical check")
167
+ warnings.append(f" • {failure.hook_name}: {reason}")
168
+
169
+ if high:
170
+ warnings.append(
171
+ f"⚠️ HIGH: {len(high)} high-security checks failed - review recommended"
172
+ )
173
+
174
+ if medium:
175
+ warnings.append(
176
+ f"ℹ️ MEDIUM: {len(medium)} standard quality checks failed"
177
+ )
178
+
179
+ return warnings
180
+
181
+ def _generate_security_recommendations(
182
+ self,
183
+ critical: list[SecurityCheckResult],
184
+ high: list[SecurityCheckResult],
185
+ medium: list[SecurityCheckResult]
186
+ ) -> list[str]:
187
+ """Generate security recommendations based on OWASP best practices."""
188
+ recommendations = []
189
+
190
+ if critical:
191
+ recommendations.append("🔧 Fix all CRITICAL security issues before publishing")
192
+
193
+ # Specific recommendations based on failed checks
194
+ critical_names = [f.hook_name.lower() for f in critical]
195
+
196
+ if 'bandit' in critical_names:
197
+ recommendations.append(" • Review bandit security findings - may indicate vulnerabilities")
198
+ if 'pyright' in critical_names:
199
+ recommendations.append(" • Fix type errors - type safety prevents runtime security holes")
200
+ if 'gitleaks' in critical_names:
201
+ recommendations.append(" • Remove secrets/credentials from code - use environment variables")
202
+
203
+ if high:
204
+ recommendations.append("🔍 Review HIGH-security findings before production deployment")
205
+
206
+ if len(critical) == 0 and len(high) == 0:
207
+ recommendations.append("✅ Security posture is acceptable for publishing")
208
+
209
+ # Add OWASP best practices reference
210
+ recommendations.append("📖 Follow OWASP Secure Coding Practices for comprehensive security")
211
+
212
+ return recommendations
@@ -113,6 +113,21 @@ class GitService:
113
113
  self.console.print(f"[red]❌[/ red] Error adding files: {e}")
114
114
  return False
115
115
 
116
+ def add_all_files(self) -> bool:
117
+ """Stage all changes including new, modified, and deleted files."""
118
+ try:
119
+ result = self._run_git_command(["add", "-A", "."])
120
+ if result.returncode == 0:
121
+ self.console.print("[green]✅[/ green] Staged all changes")
122
+ return True
123
+ self.console.print(
124
+ f"[red]❌[/ red] Failed to stage changes: {result.stderr}"
125
+ )
126
+ return False
127
+ except Exception as e:
128
+ self.console.print(f"[red]❌[/ red] Error staging files: {e}")
129
+ return False
130
+
116
131
  def commit(self, message: str) -> bool:
117
132
  try:
118
133
  result = self._run_git_command(["commit", "- m", message])
@@ -172,9 +187,10 @@ class GitService:
172
187
 
173
188
  def push(self) -> bool:
174
189
  try:
175
- result = self._run_git_command(["push"])
190
+ # Get detailed push information
191
+ result = self._run_git_command(["push", "--porcelain"])
176
192
  if result.returncode == 0:
177
- self.console.print("[green]✅[/ green] Pushed to remote")
193
+ self._display_push_success(result.stdout)
178
194
  return True
179
195
  self.console.print(f"[red]❌[/ red] Push failed: {result.stderr}")
180
196
  return False
@@ -182,6 +198,66 @@ class GitService:
182
198
  self.console.print(f"[red]❌[/ red] Error pushing: {e}")
183
199
  return False
184
200
 
201
+ def _display_push_success(self, push_output: str) -> None:
202
+ """Display detailed push success information."""
203
+ lines = push_output.strip().split("\n") if push_output.strip() else []
204
+
205
+ if not lines:
206
+ self._display_no_commits_message()
207
+ return
208
+
209
+ pushed_refs = self._parse_pushed_refs(lines)
210
+ self._display_push_results(pushed_refs)
211
+
212
+ def _display_no_commits_message(self) -> None:
213
+ """Display message for no new commits."""
214
+ self.console.print("[green]✅[/ green] Pushed to remote (no new commits)")
215
+
216
+ def _parse_pushed_refs(self, lines: list[str]) -> list[str]:
217
+ """Parse pushed references from git output."""
218
+ pushed_refs = []
219
+ for line in lines:
220
+ if line.startswith(("*", "+", "=")):
221
+ # Parse porcelain output: flag:from:to summary
222
+ parts = line.split("\t")
223
+ if len(parts) >= 2:
224
+ summary = parts[1] if len(parts) > 1 else ""
225
+ pushed_refs.append(summary)
226
+ return pushed_refs
227
+
228
+ def _display_push_results(self, pushed_refs: list[str]) -> None:
229
+ """Display the push results to console."""
230
+ if pushed_refs:
231
+ self.console.print(
232
+ f"[green]✅[/ green] Successfully pushed {len(pushed_refs)} ref(s) to remote:"
233
+ )
234
+ for ref in pushed_refs:
235
+ self.console.print(f" [dim]→ {ref}[/ dim]")
236
+ else:
237
+ # Get commit count as fallback
238
+ self._display_commit_count_push()
239
+
240
+ def _display_commit_count_push(self) -> None:
241
+ """Fallback method to show commit count information."""
242
+ try:
243
+ # Get commits ahead of remote
244
+ result = self._run_git_command(["rev-list", "--count", "@{u}..HEAD"])
245
+ if result.returncode == 0 and result.stdout.strip().isdigit():
246
+ commit_count = int(result.stdout.strip())
247
+ if commit_count > 0:
248
+ self.console.print(
249
+ f"[green]✅[/ green] Pushed {commit_count} commit(s) to remote"
250
+ )
251
+ else:
252
+ self.console.print(
253
+ "[green]✅[/ green] Pushed to remote (up to date)"
254
+ )
255
+ else:
256
+ # Even more basic fallback
257
+ self.console.print("[green]✅[/ green] Successfully pushed to remote")
258
+ except (ValueError, Exception):
259
+ self.console.print("[green]✅[/ green] Successfully pushed to remote")
260
+
185
261
  def get_current_branch(self) -> str | None:
186
262
  try:
187
263
  result = self._run_git_command(["branch", "- - show-current"])
@@ -244,3 +320,13 @@ class GitService:
244
320
  messages.append("Update README documentation")
245
321
 
246
322
  return messages
323
+
324
+ def get_unpushed_commit_count(self) -> int:
325
+ """Get the number of unpushed commits."""
326
+ from contextlib import suppress
327
+
328
+ with suppress(ValueError, Exception):
329
+ result = self._run_git_command(["rev-list", "--count", "@{u}..HEAD"])
330
+ if result.returncode == 0 and result.stdout.strip().isdigit():
331
+ return int(result.stdout.strip())
332
+ return 0
@@ -181,12 +181,12 @@ class PerformanceBenchmarkService:
181
181
  check=False,
182
182
  capture_output=True,
183
183
  text=True,
184
- timeout=120,
184
+ timeout=300, # Fixed: Use 300s to match pytest config
185
185
  )
186
186
  duration = time.time() - start_time
187
187
  durations.append(duration)
188
188
  except subprocess.TimeoutExpired:
189
- durations.append(120.0)
189
+ durations.append(300.0) # Fixed: Use 300s to match timeout
190
190
  except Exception:
191
191
  durations.append(float("inf"))
192
192
 
@@ -44,6 +44,20 @@ ALLOWED_PATTERNS = {
44
44
  r"# REGEX OK:", # Comment-based exemption
45
45
  # Validation in regex_patterns.py itself
46
46
  r"crackerjack/services/regex_patterns\.py$",
47
+ # Regex validation tools themselves
48
+ r"tools/validate_regex_patterns_standalone\.py$",
49
+ r"crackerjack/tools/validate_regex_patterns\.py$",
50
+ # Test files that legitimately test regex patterns
51
+ r"tests/test_.*\.py$",
52
+ # Core security and subprocess files that need regex for parsing
53
+ r"crackerjack/services/secure_subprocess\.py$",
54
+ r"crackerjack/mcp/tools/core_tools\.py$",
55
+ # Intelligence and workflow files with legitimate parsing needs
56
+ r"crackerjack/intelligence/agent_selector\.py$",
57
+ r"crackerjack/managers/test_.*\.py$",
58
+ r"crackerjack/core/async_workflow_orchestrator\.py$",
59
+ # Agent files that use validated patterns with dynamic escaping
60
+ r"crackerjack/agents/.*\.py$",
47
61
  }
48
62
 
49
63
  FORBIDDEN_REPLACEMENT_PATTERNS = [
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: crackerjack
3
- Version: 0.31.18
3
+ Version: 0.32.0
4
4
  Summary: Opinionated Python project management tool
5
5
  Project-URL: documentation, https://github.com/lesleslie/crackerjack
6
6
  Project-URL: homepage, https://github.com/lesleslie/crackerjack