crackerjack 0.31.17__py3-none-any.whl → 0.32.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

@@ -40,6 +40,7 @@ class WorkflowPipeline:
40
40
  self.session = session
41
41
  self.phases = phases
42
42
  self._mcp_state_manager: t.Any = None
43
+ self._last_security_audit: t.Any = None # Store security audit report
43
44
 
44
45
  self.logger = get_logger("crackerjack.pipeline")
45
46
  self._debugger = None
@@ -166,9 +167,15 @@ class WorkflowPipeline:
166
167
 
167
168
  # Code cleaning is now integrated into the quality phase
168
169
  # to run after fast hooks but before comprehensive hooks
169
- if not await self._execute_quality_phase(options):
170
+ quality_success = await self._execute_quality_phase(options)
171
+ if not quality_success:
170
172
  success = False
171
- return False
173
+ # Don't return early - continue to publishing/commit phases if requested
174
+ # This allows -p and -c flags to work even when quality checks fail
175
+ if not (options.publish or options.all or options.commit):
176
+ # Only exit early if no publishing/commit operations are requested
177
+ return False
178
+
172
179
  if not self.phases.run_publishing_phase(options):
173
180
  success = False
174
181
  self.session.fail_task("workflow", "Publishing failed")
@@ -239,15 +246,50 @@ class WorkflowPipeline:
239
246
  testing_passed: bool,
240
247
  comprehensive_passed: bool,
241
248
  ) -> bool:
242
- if not testing_passed or not comprehensive_passed:
249
+ # Check security gates for publishing operations
250
+ publishing_requested, security_blocks = (
251
+ self._check_security_gates_for_publishing(options)
252
+ )
253
+
254
+ if publishing_requested and security_blocks:
255
+ # Try AI fixing for security issues, then re-check
256
+ security_fix_result = await self._handle_security_gate_failure(
257
+ options, allow_ai_fixing=True
258
+ )
259
+ if not security_fix_result:
260
+ return False
261
+ # If AI fixing resolved security issues, continue with normal flow
262
+
263
+ # Determine if we need AI fixing based on publishing requirements
264
+ needs_ai_fixing = self._determine_ai_fixing_needed(
265
+ testing_passed, comprehensive_passed, publishing_requested
266
+ )
267
+
268
+ if needs_ai_fixing:
243
269
  success = await self._run_ai_agent_fixing_phase(options)
244
270
  if self._should_debug():
245
271
  self.debugger.log_iteration_end(iteration, success)
246
272
  return success
247
273
 
274
+ # Determine final success based on publishing requirements
275
+ final_success = self._determine_workflow_success(
276
+ testing_passed,
277
+ comprehensive_passed,
278
+ publishing_requested,
279
+ workflow_type="ai",
280
+ )
281
+
282
+ # Show security audit warning for partial success in publishing workflows
283
+ if (
284
+ publishing_requested
285
+ and final_success
286
+ and not (testing_passed and comprehensive_passed)
287
+ ):
288
+ self._show_security_audit_warning()
289
+
248
290
  if self._should_debug():
249
- self.debugger.log_iteration_end(iteration, True)
250
- return True
291
+ self.debugger.log_iteration_end(iteration, final_success)
292
+ return final_success
251
293
 
252
294
  def _handle_standard_workflow(
253
295
  self,
@@ -256,20 +298,38 @@ class WorkflowPipeline:
256
298
  testing_passed: bool,
257
299
  comprehensive_passed: bool,
258
300
  ) -> bool:
259
- success = testing_passed and comprehensive_passed
301
+ # Check security gates for publishing operations
302
+ publishing_requested, security_blocks = (
303
+ self._check_security_gates_for_publishing(options)
304
+ )
260
305
 
261
- if not success and getattr(options, "verbose", False):
306
+ if publishing_requested and security_blocks:
307
+ # Standard workflow cannot bypass security gates
308
+ return self._handle_security_gate_failure(options, allow_ai_fixing=False)
309
+
310
+ # Determine success based on publishing requirements
311
+ success = self._determine_workflow_success(
312
+ testing_passed,
313
+ comprehensive_passed,
314
+ publishing_requested,
315
+ workflow_type="standard",
316
+ )
317
+
318
+ # Show security audit warning for partial success in publishing workflows
319
+ if (
320
+ publishing_requested
321
+ and success
322
+ and not (testing_passed and comprehensive_passed)
323
+ ):
324
+ self._show_security_audit_warning()
325
+ elif publishing_requested and not success:
262
326
  self.console.print(
263
- f"[yellow]⚠️ Workflow stopped-testing_passed: {testing_passed}, comprehensive_passed: {comprehensive_passed}[/ yellow]"
327
+ "[red] Both tests and comprehensive hooks failed - cannot proceed to publishing[/red]"
264
328
  )
265
- if not testing_passed:
266
- self.console.print(
267
- "[yellow] Tests reported failure despite appearing successful[/ yellow]"
268
- )
269
- if not comprehensive_passed:
270
- self.console.print(
271
- "[yellow] → Comprehensive hooks reported failure despite appearing successful[/ yellow]"
272
- )
329
+
330
+ # Show verbose failure details if requested
331
+ if not success and getattr(options, "verbose", False):
332
+ self._show_verbose_failure_details(testing_passed, comprehensive_passed)
273
333
 
274
334
  if options.ai_agent and self._should_debug():
275
335
  self.debugger.log_iteration_end(iteration, success)
@@ -995,6 +1055,276 @@ class WorkflowPipeline:
995
1055
  self.debugger.log_test_failures(test_count)
996
1056
  self.debugger.log_hook_failures(hook_count)
997
1057
 
1058
+ def _check_security_gates_for_publishing(
1059
+ self, options: OptionsProtocol
1060
+ ) -> tuple[bool, bool]:
1061
+ """Check if publishing is requested and if security gates block it.
1062
+
1063
+ Returns:
1064
+ tuple[bool, bool]: (publishing_requested, security_blocks_publishing)
1065
+ """
1066
+ publishing_requested = bool(options.publish or options.all or options.commit)
1067
+
1068
+ if not publishing_requested:
1069
+ return False, False
1070
+
1071
+ # Check security gates for publishing operations
1072
+ try:
1073
+ security_blocks_publishing = self._check_security_critical_failures()
1074
+ return publishing_requested, security_blocks_publishing
1075
+ except Exception as e:
1076
+ # Fail securely if security check fails
1077
+ self.logger.warning(f"Security check failed: {e} - blocking publishing")
1078
+ self.console.print(
1079
+ "[red]🔒 SECURITY CHECK FAILED: Unable to verify security status - publishing BLOCKED[/red]"
1080
+ )
1081
+ # Return True for security_blocks to fail securely
1082
+ return publishing_requested, True
1083
+
1084
+ async def _handle_security_gate_failure(
1085
+ self, options: OptionsProtocol, allow_ai_fixing: bool = False
1086
+ ) -> bool:
1087
+ """Handle security gate failures with optional AI fixing.
1088
+
1089
+ Args:
1090
+ options: Workflow options
1091
+ allow_ai_fixing: Whether AI fixing is allowed for security issues
1092
+
1093
+ Returns:
1094
+ bool: True if security issues resolved, False if still blocked
1095
+ """
1096
+ self.console.print(
1097
+ "[red]🔒 SECURITY GATE: Critical security checks failed[/red]"
1098
+ )
1099
+
1100
+ if allow_ai_fixing:
1101
+ self.console.print(
1102
+ "[red]Security-critical hooks (bandit, pyright, gitleaks) must pass before publishing[/red]"
1103
+ )
1104
+ self.console.print(
1105
+ "[yellow]🤖 Attempting AI-assisted security issue resolution...[/yellow]"
1106
+ )
1107
+
1108
+ # Try AI fixing for security issues
1109
+ ai_fix_success = await self._run_ai_agent_fixing_phase(options)
1110
+ if ai_fix_success:
1111
+ # Re-check security after AI fixing
1112
+ try:
1113
+ security_still_blocks = self._check_security_critical_failures()
1114
+ if not security_still_blocks:
1115
+ self.console.print(
1116
+ "[green]✅ AI agents resolved security issues - publishing allowed[/green]"
1117
+ )
1118
+ return True
1119
+ else:
1120
+ self.console.print(
1121
+ "[red]🔒 Security issues persist after AI fixing - publishing still BLOCKED[/red]"
1122
+ )
1123
+ return False
1124
+ except Exception as e:
1125
+ self.logger.warning(
1126
+ f"Security re-check failed: {e} - blocking publishing"
1127
+ )
1128
+ return False
1129
+ return False
1130
+ else:
1131
+ # Standard workflow cannot bypass security gates
1132
+ self.console.print(
1133
+ "[red]Security-critical hooks (bandit, pyright, gitleaks) must pass before publishing[/red]"
1134
+ )
1135
+ return False
1136
+
1137
+ def _determine_ai_fixing_needed(
1138
+ self,
1139
+ testing_passed: bool,
1140
+ comprehensive_passed: bool,
1141
+ publishing_requested: bool,
1142
+ ) -> bool:
1143
+ """Determine if AI fixing is needed based on test results and publishing requirements."""
1144
+ if publishing_requested:
1145
+ # For publish/commit workflows, only trigger AI fixing if both fail
1146
+ return not testing_passed and not comprehensive_passed
1147
+ else:
1148
+ # For regular workflows, trigger AI fixing if either fails
1149
+ return not testing_passed or not comprehensive_passed
1150
+
1151
+ def _determine_workflow_success(
1152
+ self,
1153
+ testing_passed: bool,
1154
+ comprehensive_passed: bool,
1155
+ publishing_requested: bool,
1156
+ workflow_type: str,
1157
+ ) -> bool:
1158
+ """Determine workflow success based on test results and workflow type."""
1159
+ if publishing_requested:
1160
+ # For publishing workflows, either test or comprehensive passing is sufficient
1161
+ return testing_passed or comprehensive_passed
1162
+ else:
1163
+ # For regular workflows, both must pass
1164
+ return testing_passed and comprehensive_passed
1165
+
1166
+ def _show_verbose_failure_details(
1167
+ self, testing_passed: bool, comprehensive_passed: bool
1168
+ ) -> None:
1169
+ """Show detailed failure information in verbose mode."""
1170
+ self.console.print(
1171
+ f"[yellow]⚠️ Quality phase results - testing_passed: {testing_passed}, comprehensive_passed: {comprehensive_passed}[/yellow]"
1172
+ )
1173
+ if not testing_passed:
1174
+ self.console.print("[yellow] → Tests reported failure[/yellow]")
1175
+ if not comprehensive_passed:
1176
+ self.console.print(
1177
+ "[yellow] → Comprehensive hooks reported failure[/yellow]"
1178
+ )
1179
+
1180
+ def _check_security_critical_failures(self) -> bool:
1181
+ """Check if any security-critical hooks have failed.
1182
+
1183
+ Returns:
1184
+ True if security-critical hooks failed and block publishing
1185
+ """
1186
+ try:
1187
+ from crackerjack.security.audit import SecurityAuditor
1188
+
1189
+ auditor = SecurityAuditor()
1190
+
1191
+ # Get hook results - we need to be careful not to re-run hooks
1192
+ # Instead, check the session tracker for recent failures
1193
+ fast_results = self._get_recent_fast_hook_results()
1194
+ comprehensive_results = self._get_recent_comprehensive_hook_results()
1195
+
1196
+ # Generate security audit report
1197
+ audit_report = auditor.audit_hook_results(
1198
+ fast_results, comprehensive_results
1199
+ )
1200
+
1201
+ # Store audit report for later use
1202
+ self._last_security_audit = audit_report
1203
+
1204
+ # Block publishing if critical failures exist
1205
+ return audit_report.has_critical_failures
1206
+
1207
+ except Exception as e:
1208
+ # Fail securely - if we can't determine security status, block publishing
1209
+ self.logger.warning(f"Security audit failed: {e} - failing securely")
1210
+ # Re-raise the exception so it can be caught by the calling method
1211
+ raise
1212
+
1213
+ def _get_recent_fast_hook_results(self) -> list[t.Any]:
1214
+ """Get recent fast hook results from session tracker."""
1215
+ results = []
1216
+
1217
+ # Try to get results from session tracker
1218
+ if hasattr(self.session, "session_tracker") and self.session.session_tracker:
1219
+ for task_id, task_data in self.session.session_tracker.tasks.items():
1220
+ if task_id == "fast_hooks" and hasattr(task_data, "hook_results"):
1221
+ results.extend(task_data.hook_results)
1222
+
1223
+ # If no results from session, create mock failed results for critical hooks
1224
+ # This ensures we fail securely when we can't determine actual status
1225
+ if not results:
1226
+ critical_fast_hooks = ["gitleaks"]
1227
+ for hook_name in critical_fast_hooks:
1228
+ # Create a mock result that appears to have failed
1229
+ # This will trigger security blocking if we can't determine actual status
1230
+ mock_result = type(
1231
+ "MockResult",
1232
+ (),
1233
+ {
1234
+ "name": hook_name,
1235
+ "status": "unknown", # Unknown status = fail securely
1236
+ "output": "Unable to determine hook status",
1237
+ },
1238
+ )()
1239
+ results.append(mock_result)
1240
+
1241
+ return results
1242
+
1243
+ def _get_recent_comprehensive_hook_results(self) -> list[t.Any]:
1244
+ """Get recent comprehensive hook results from session tracker."""
1245
+ results = []
1246
+
1247
+ # Try to get results from session tracker
1248
+ if hasattr(self.session, "session_tracker") and self.session.session_tracker:
1249
+ for task_id, task_data in self.session.session_tracker.tasks.items():
1250
+ if task_id == "comprehensive_hooks" and hasattr(
1251
+ task_data, "hook_results"
1252
+ ):
1253
+ results.extend(task_data.hook_results)
1254
+
1255
+ # If no results from session, create mock failed results for critical hooks
1256
+ if not results:
1257
+ critical_comprehensive_hooks = ["bandit", "pyright"]
1258
+ for hook_name in critical_comprehensive_hooks:
1259
+ mock_result = type(
1260
+ "MockResult",
1261
+ (),
1262
+ {
1263
+ "name": hook_name,
1264
+ "status": "unknown", # Unknown status = fail securely
1265
+ "output": "Unable to determine hook status",
1266
+ },
1267
+ )()
1268
+ results.append(mock_result)
1269
+
1270
+ return results
1271
+
1272
+ def _is_security_critical_failure(self, result: t.Any) -> bool:
1273
+ """Check if a hook result represents a security-critical failure."""
1274
+
1275
+ # List of security-critical hook names (fail-safe approach)
1276
+ security_critical_hooks = {
1277
+ "bandit", # Security vulnerability scanning
1278
+ "pyright", # Type safety prevents security holes
1279
+ "gitleaks", # Secret detection
1280
+ }
1281
+
1282
+ hook_name = getattr(result, "name", "").lower()
1283
+ is_failed = getattr(result, "status", "unknown") in (
1284
+ "failed",
1285
+ "error",
1286
+ "timeout",
1287
+ )
1288
+
1289
+ return hook_name in security_critical_hooks and is_failed
1290
+
1291
+ def _show_security_audit_warning(self) -> None:
1292
+ """Show security audit warning when proceeding with partial success."""
1293
+ # Use stored audit report if available
1294
+ audit_report = getattr(self, "_last_security_audit", None)
1295
+
1296
+ if audit_report:
1297
+ self.console.print(
1298
+ "[yellow]⚠️ SECURITY AUDIT: Proceeding with partial quality success[/yellow]"
1299
+ )
1300
+
1301
+ # Show security status
1302
+ for warning in audit_report.security_warnings:
1303
+ if "CRITICAL" in warning:
1304
+ # This shouldn't happen if we're showing warnings, but fail-safe
1305
+ self.console.print(f"[red]{warning}[/red]")
1306
+ elif "HIGH" in warning:
1307
+ self.console.print(f"[yellow]{warning}[/yellow]")
1308
+ else:
1309
+ self.console.print(f"[blue]{warning}[/blue]")
1310
+
1311
+ # Show recommendations
1312
+ if audit_report.recommendations:
1313
+ self.console.print("[bold]Security Recommendations:[/bold]")
1314
+ for rec in audit_report.recommendations[:3]: # Show top 3
1315
+ self.console.print(f"[dim]{rec}[/dim]")
1316
+ else:
1317
+ # Fallback if no audit report available
1318
+ self.console.print(
1319
+ "[yellow]⚠️ SECURITY AUDIT: Proceeding with partial quality success[/yellow]"
1320
+ )
1321
+ self.console.print(
1322
+ "[yellow]✅ Security-critical checks (bandit, pyright, gitleaks) have passed[/yellow]"
1323
+ )
1324
+ self.console.print(
1325
+ "[yellow]⚠️ Some non-critical quality checks failed - consider reviewing before production deployment[/yellow]"
1326
+ )
1327
+
998
1328
 
999
1329
  class WorkflowOrchestrator:
1000
1330
  def __init__(
@@ -1063,8 +1393,8 @@ class WorkflowOrchestrator:
1063
1393
  session_id = getattr(self, "web_job_id", None) or str(int(time.time()))[:8]
1064
1394
  debug_log_file = log_manager.create_debug_log_file(session_id)
1065
1395
 
1066
- # Set log level based on verbosity - DEBUG only in verbose or debug mode
1067
- log_level = "DEBUG" if (self.verbose or self.debug) else "INFO"
1396
+ # Set log level based on debug flag only - verbose should not enable DEBUG logs
1397
+ log_level = "DEBUG" if self.debug else "INFO"
1068
1398
  setup_structured_logging(
1069
1399
  level=log_level, json_output=False, log_file=debug_log_file
1070
1400
  )
@@ -5,30 +5,6 @@ from pathlib import Path
5
5
  import jinja2
6
6
 
7
7
 
8
- def _get_project_root() -> Path:
9
- """Get the absolute path to the crackerjack project root."""
10
- # Start from this file and go up to find the real project root
11
- current_path = Path(__file__).resolve()
12
- for parent in current_path.parents:
13
- if (parent / "pyproject.toml").exists() and (parent / "tools").exists():
14
- # Verify it's the crackerjack project by checking for unique markers and tools dir
15
- try:
16
- pyproject_content = (parent / "pyproject.toml").read_text()
17
- if (
18
- 'name = "crackerjack"' in pyproject_content
19
- and (
20
- parent / "tools" / "validate_regex_patterns_standalone.py"
21
- ).exists()
22
- ):
23
- return parent
24
- except Exception:
25
- # If we can't read the file, continue searching
26
- continue
27
-
28
- # Fallback to the parent of the crackerjack package directory
29
- return Path(__file__).resolve().parent.parent
30
-
31
-
32
8
  class HookMetadata(t.TypedDict):
33
9
  id: str
34
10
  name: str | None
@@ -70,7 +46,7 @@ HOOKS_REGISTRY: dict[str, list[HookMetadata]] = {
70
46
  "additional_dependencies": None,
71
47
  "types_or": None,
72
48
  "language": "system",
73
- "entry": f"uv run python {_get_project_root() / 'tools' / 'validate_regex_patterns_standalone.py'}",
49
+ "entry": "uv run python -m crackerjack.tools.validate_regex_patterns",
74
50
  "experimental": False,
75
51
  },
76
52
  {
@@ -23,17 +23,21 @@ class TestCommandBuilder:
23
23
  if hasattr(options, "test_workers") and options.test_workers:
24
24
  return options.test_workers
25
25
 
26
- import multiprocessing
27
-
28
- cpu_count = multiprocessing.cpu_count()
29
-
30
- if cpu_count <= 2:
31
- return 1
32
- elif cpu_count <= 4:
33
- return 2
34
- elif cpu_count <= 8:
35
- return 3
36
- return 4
26
+ # Temporarily disable multi-worker execution due to pytest-xdist
27
+ # hanging issues with async tests. See GitHub issue for details.
28
+ # TODO: Re-enable after fixing async test timeout issues
29
+ return 1
30
+
31
+ # Original multi-worker logic (commented out):
32
+ # import multiprocessing
33
+ # cpu_count = multiprocessing.cpu_count()
34
+ # if cpu_count <= 2:
35
+ # return 1
36
+ # elif cpu_count <= 4:
37
+ # return 2
38
+ # elif cpu_count <= 8:
39
+ # return 3
40
+ # return 4
37
41
 
38
42
  def get_test_timeout(self, options: OptionsProtocol) -> int:
39
43
  if hasattr(options, "test_timeout") and options.test_timeout:
@@ -29,6 +29,14 @@ def _register_execute_crackerjack_tool(mcp_app: t.Any) -> None:
29
29
 
30
30
  extra_kwargs = kwargs_result["kwargs"]
31
31
 
32
+ # Add extended timeout for long-running operations
33
+ if "execution_timeout" not in extra_kwargs:
34
+ # Default to 15 minutes, extend to 20 minutes for test operations
35
+ if extra_kwargs.get("test", False) or extra_kwargs.get("testing", False):
36
+ extra_kwargs["execution_timeout"] = 1200 # 20 minutes for tests
37
+ else:
38
+ extra_kwargs["execution_timeout"] = 900 # 15 minutes default
39
+
32
40
  try:
33
41
  result = await execute_crackerjack_workflow(args, extra_kwargs)
34
42
  return json.dumps(result, indent=2)