crackerjack 0.37.9__py3-none-any.whl → 0.38.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

@@ -66,6 +66,7 @@ class WorkflowPipeline:
66
66
  self._cache = get_performance_cache()
67
67
 
68
68
  # Initialize quality intelligence for advanced decision making
69
+ self._quality_intelligence: QualityIntelligenceService | None
69
70
  try:
70
71
  quality_baseline = EnhancedQualityBaselineService()
71
72
  self._quality_intelligence = QualityIntelligenceService(quality_baseline)
@@ -74,6 +75,7 @@ class WorkflowPipeline:
74
75
  self._quality_intelligence = None
75
76
 
76
77
  # Initialize performance benchmarking for workflow analysis
78
+ self._performance_benchmarks: PerformanceBenchmarkService | None
77
79
  try:
78
80
  self._performance_benchmarks = PerformanceBenchmarkService(
79
81
  console, pkg_path
@@ -169,22 +171,33 @@ class WorkflowPipeline:
169
171
 
170
172
  def _initialize_zuban_lsp(self, options: OptionsProtocol) -> None:
171
173
  """Initialize Zuban LSP server if not disabled."""
172
- # Check if LSP is disabled via CLI flag or configuration
174
+ if self._should_skip_zuban_lsp(options):
175
+ return
176
+
177
+ if self._is_zuban_lsp_already_running():
178
+ return
179
+
180
+ self._start_zuban_lsp_server(options)
181
+
182
+ def _should_skip_zuban_lsp(self, options: OptionsProtocol) -> bool:
183
+ """Check if Zuban LSP server should be skipped."""
173
184
  if getattr(options, "no_zuban_lsp", False):
174
185
  self.logger.debug("Zuban LSP server disabled by --no-zuban-lsp flag")
175
- return
186
+ return True
176
187
 
177
- # Get configuration from options (will use config system if available)
178
188
  config = getattr(options, "zuban_lsp", None)
179
189
  if config and not config.enabled:
180
190
  self.logger.debug("Zuban LSP server disabled in configuration")
181
- return
191
+ return True
182
192
 
183
193
  if config and not config.auto_start:
184
194
  self.logger.debug("Zuban LSP server auto-start disabled in configuration")
185
- return
195
+ return True
186
196
 
187
- # Check if LSP server is already running to avoid duplicates
197
+ return False
198
+
199
+ def _is_zuban_lsp_already_running(self) -> bool:
200
+ """Check if LSP server is already running to avoid duplicates."""
188
201
  from crackerjack.services.server_manager import find_zuban_lsp_processes
189
202
 
190
203
  existing_processes = find_zuban_lsp_processes()
@@ -192,20 +205,17 @@ class WorkflowPipeline:
192
205
  self.logger.debug(
193
206
  f"Zuban LSP server already running (PID: {existing_processes[0]['pid']})"
194
207
  )
195
- return
208
+ return True
209
+ return False
196
210
 
197
- # Auto-start LSP server in background
211
+ def _start_zuban_lsp_server(self, options: OptionsProtocol) -> None:
212
+ """Start the Zuban LSP server in background."""
198
213
  try:
199
214
  import subprocess
200
215
  import sys
201
216
 
202
- # Use configuration values if available, otherwise fallback to CLI options
203
- if config:
204
- zuban_lsp_port = config.port
205
- zuban_lsp_mode = config.mode
206
- else:
207
- zuban_lsp_port = getattr(options, "zuban_lsp_port", 8677)
208
- zuban_lsp_mode = getattr(options, "zuban_lsp_mode", "stdio")
217
+ config = getattr(options, "zuban_lsp", None)
218
+ zuban_lsp_port, zuban_lsp_mode = self._get_zuban_lsp_config(options, config)
209
219
 
210
220
  cmd = [
211
221
  sys.executable,
@@ -232,6 +242,17 @@ class WorkflowPipeline:
232
242
  except Exception as e:
233
243
  self.logger.warning(f"Failed to auto-start Zuban LSP server: {e}")
234
244
 
245
+ def _get_zuban_lsp_config(
246
+ self, options: OptionsProtocol, config: t.Any
247
+ ) -> tuple[int, str]:
248
+ """Get Zuban LSP configuration values."""
249
+ if config:
250
+ return config.port, config.mode
251
+ return (
252
+ getattr(options, "zuban_lsp_port", 8677),
253
+ getattr(options, "zuban_lsp_mode", "stdio"),
254
+ )
255
+
235
256
  def _log_zuban_lsp_status(self) -> None:
236
257
  """Display current Zuban LSP server status during workflow startup."""
237
258
  from crackerjack.services.server_manager import find_zuban_lsp_processes
@@ -351,36 +372,9 @@ class WorkflowPipeline:
351
372
  return
352
373
 
353
374
  try:
354
- # Gather performance metrics from the workflow execution
355
- {
356
- "workflow_id": workflow_id,
357
- "total_duration": duration,
358
- "success": success,
359
- "cache_metrics": self._cache.get_stats() if self._cache else {},
360
- "memory_metrics": self._memory_optimizer.get_stats()
361
- if hasattr(self._memory_optimizer, "get_stats")
362
- else {},
363
- }
364
-
365
- # Generate benchmark comparison
375
+ self._gather_performance_metrics(workflow_id, duration, success)
366
376
  benchmark_results = await self._performance_benchmarks.run_benchmark_suite()
367
-
368
- # Display compact performance summary
369
- if benchmark_results:
370
- self.console.print("\n[cyan]📊 Performance Benchmark Summary[/cyan]")
371
- self.console.print(f"Workflow Duration: [bold]{duration:.2f}s[/bold]")
372
-
373
- # Show key performance improvements if available
374
- for result in benchmark_results.results[:3]: # Top 3 results
375
- if result.time_improvement_percentage > 0:
376
- self.console.print(
377
- f"[green]⚡[/green] {result.test_name}: {result.time_improvement_percentage:.1f}% faster"
378
- )
379
-
380
- if result.cache_hit_ratio > 0:
381
- self.console.print(
382
- f"[blue]🎯[/blue] Cache efficiency: {result.cache_hit_ratio:.0%}"
383
- )
377
+ self._display_benchmark_results(benchmark_results, duration)
384
378
 
385
379
  except Exception as e:
386
380
  self.console.print(
@@ -390,6 +384,52 @@ class WorkflowPipeline:
390
384
  if self.debugger.enabled:
391
385
  self.debugger.print_debug_summary()
392
386
 
387
+ def _gather_performance_metrics(
388
+ self, workflow_id: str, duration: float, success: bool
389
+ ) -> dict[str, t.Any]:
390
+ """Gather performance metrics from workflow execution."""
391
+ return {
392
+ "workflow_id": workflow_id,
393
+ "total_duration": duration,
394
+ "success": success,
395
+ "cache_metrics": self._cache.get_stats() if self._cache else {},
396
+ "memory_metrics": self._memory_optimizer.get_stats()
397
+ if hasattr(self._memory_optimizer, "get_stats")
398
+ else {},
399
+ }
400
+
401
+ def _display_benchmark_results(
402
+ self, benchmark_results: t.Any, duration: float
403
+ ) -> None:
404
+ """Display compact performance summary."""
405
+ if not benchmark_results:
406
+ return
407
+
408
+ self.console.print("\n[cyan]📊 Performance Benchmark Summary[/cyan]")
409
+ self.console.print(f"Workflow Duration: [bold]{duration:.2f}s[/bold]")
410
+
411
+ self._show_performance_improvements(benchmark_results)
412
+
413
+ def _show_performance_improvements(self, benchmark_results: t.Any) -> None:
414
+ """Show key performance improvements from benchmark results."""
415
+ for result in benchmark_results.results[:3]: # Top 3 results
416
+ self._display_time_improvement(result)
417
+ self._display_cache_efficiency(result)
418
+
419
+ def _display_time_improvement(self, result: t.Any) -> None:
420
+ """Display time improvement percentage if available."""
421
+ if result.time_improvement_percentage > 0:
422
+ self.console.print(
423
+ f"[green]⚡[/green] {result.test_name}: {result.time_improvement_percentage:.1f}% faster"
424
+ )
425
+
426
+ def _display_cache_efficiency(self, result: t.Any) -> None:
427
+ """Display cache hit ratio if available."""
428
+ if result.cache_hit_ratio > 0:
429
+ self.console.print(
430
+ f"[blue]🎯[/blue] Cache efficiency: {result.cache_hit_ratio:.0%}"
431
+ )
432
+
393
433
  def _handle_user_interruption(self) -> bool:
394
434
  self.console.print("Interrupted by user")
395
435
  self.session.fail_task("workflow", "Interrupted by user")
@@ -525,40 +565,52 @@ class WorkflowPipeline:
525
565
  if not self._quality_intelligence:
526
566
  return "Quality intelligence not available"
527
567
 
528
- # Analyze recent quality trends and anomalies
529
568
  anomalies = self._quality_intelligence.detect_anomalies()
530
569
  patterns = self._quality_intelligence.identify_patterns()
531
570
 
532
- # Make intelligent recommendations based on current state
533
- recommendations = []
534
- if anomalies:
535
- high_severity_anomalies = [
536
- a for a in anomalies if a.severity.name in ("CRITICAL", "HIGH")
537
- ]
538
- if high_severity_anomalies:
539
- recommendations.append(
540
- "comprehensive analysis recommended due to quality anomalies"
541
- )
542
- else:
543
- recommendations.append("standard quality checks sufficient")
544
-
545
- if patterns:
546
- improving_patterns = [
547
- p for p in patterns if p.trend_direction.name == "IMPROVING"
548
- ]
549
- if improving_patterns:
550
- recommendations.append("quality trending upward")
551
- else:
552
- recommendations.append("quality monitoring active")
553
-
554
- if not recommendations:
555
- recommendations.append("baseline quality analysis active")
556
-
571
+ recommendations = self._build_quality_recommendations(anomalies, patterns)
557
572
  return "; ".join(recommendations)
558
573
 
559
574
  except Exception as e:
560
575
  return f"Quality intelligence analysis failed: {str(e)[:50]}..."
561
576
 
577
+ def _build_quality_recommendations(
578
+ self, anomalies: t.Any, patterns: t.Any
579
+ ) -> list[str]:
580
+ """Build quality recommendations based on anomalies and patterns."""
581
+ recommendations = []
582
+
583
+ if anomalies:
584
+ recommendations.extend(self._analyze_anomalies(anomalies))
585
+
586
+ if patterns:
587
+ recommendations.extend(self._analyze_patterns(patterns))
588
+
589
+ if not recommendations:
590
+ recommendations.append("baseline quality analysis active")
591
+
592
+ return recommendations
593
+
594
+ def _analyze_anomalies(self, anomalies: t.Any) -> list[str]:
595
+ """Analyze anomalies and return recommendations."""
596
+ high_severity_anomalies = [
597
+ a for a in anomalies if a.severity.name in ("CRITICAL", "HIGH")
598
+ ]
599
+
600
+ if high_severity_anomalies:
601
+ return ["comprehensive analysis recommended due to quality anomalies"]
602
+ return ["standard quality checks sufficient"]
603
+
604
+ def _analyze_patterns(self, patterns: t.Any) -> list[str]:
605
+ """Analyze patterns and return recommendations."""
606
+ improving_patterns = [
607
+ p for p in patterns if p.trend_direction.name == "IMPROVING"
608
+ ]
609
+
610
+ if improving_patterns:
611
+ return ["quality trending upward"]
612
+ return ["quality monitoring active"]
613
+
562
614
  async def _execute_test_workflow(
563
615
  self, options: OptionsProtocol, workflow_id: str
564
616
  ) -> bool:
@@ -572,7 +624,7 @@ class WorkflowPipeline:
572
624
  comprehensive_passed,
573
625
  ) = await self._run_main_quality_phases_async(options, workflow_id)
574
626
 
575
- return await self._handle_workflow_completion(
627
+ return await self._handle_ai_workflow_completion(
576
628
  options, iteration, testing_passed, comprehensive_passed, workflow_id
577
629
  )
578
630
 
@@ -600,7 +652,7 @@ class WorkflowPipeline:
600
652
  self._mark_code_cleaning_complete()
601
653
  return True
602
654
 
603
- async def _handle_workflow_completion(
655
+ async def _handle_ai_workflow_completion(
604
656
  self,
605
657
  options: OptionsProtocol,
606
658
  iteration: int,
@@ -886,27 +938,36 @@ class WorkflowPipeline:
886
938
  def _execute_standard_hooks_workflow(self, options: OptionsProtocol) -> bool:
887
939
  self._update_hooks_status_running()
888
940
 
889
- fast_hooks_success = self._run_fast_hooks_phase(options)
890
- if not fast_hooks_success:
941
+ if not self._execute_fast_hooks_workflow(options):
891
942
  self._handle_hooks_completion(False)
892
943
  return False
893
944
 
894
- if getattr(options, "clean", False):
895
- if not self._run_code_cleaning_phase(options):
896
- self._handle_hooks_completion(False)
897
- return False
898
-
899
- if not self._run_post_cleaning_fast_hooks(options):
900
- self._handle_hooks_completion(False)
901
- return False
902
- self._mark_code_cleaning_complete()
945
+ if not self._execute_cleaning_workflow_if_needed(options):
946
+ self._handle_hooks_completion(False)
947
+ return False
903
948
 
904
949
  comprehensive_success = self._run_comprehensive_hooks_phase(options)
950
+ self._handle_hooks_completion(comprehensive_success)
951
+
952
+ return comprehensive_success
905
953
 
906
- hooks_success = fast_hooks_success and comprehensive_success
907
- self._handle_hooks_completion(hooks_success)
954
+ def _execute_fast_hooks_workflow(self, options: OptionsProtocol) -> bool:
955
+ """Execute fast hooks phase."""
956
+ return self._run_fast_hooks_phase(options)
908
957
 
909
- return hooks_success
958
+ def _execute_cleaning_workflow_if_needed(self, options: OptionsProtocol) -> bool:
959
+ """Execute cleaning workflow if requested."""
960
+ if not getattr(options, "clean", False):
961
+ return True
962
+
963
+ if not self._run_code_cleaning_phase(options):
964
+ return False
965
+
966
+ if not self._run_post_cleaning_fast_hooks(options):
967
+ return False
968
+
969
+ self._mark_code_cleaning_complete()
970
+ return True
910
971
 
911
972
  def _update_hooks_status_running(self) -> None:
912
973
  if self._has_mcp_state_manager():
@@ -1116,15 +1177,16 @@ class WorkflowPipeline:
1116
1177
  return test_success
1117
1178
 
1118
1179
  def _should_verify_hook_fixes(self, fixes_applied: list[str]) -> bool:
1119
- hook_fixes = [
1120
- f
1121
- for f in fixes_applied
1122
- if "hook" not in f.lower()
1123
- or "complexity" in f.lower()
1124
- or "type" in f.lower()
1125
- ]
1180
+ hook_fixes = [fix for fix in fixes_applied if self._is_hook_related_fix(fix)]
1126
1181
  return bool(hook_fixes)
1127
1182
 
1183
+ def _is_hook_related_fix(self, fix: str) -> bool:
1184
+ """Check if a fix is related to hooks and should trigger hook verification."""
1185
+ fix_lower = fix.lower()
1186
+ return (
1187
+ "hook" not in fix_lower or "complexity" in fix_lower or "type" in fix_lower
1188
+ )
1189
+
1128
1190
  async def _verify_hook_fixes(self, options: OptionsProtocol) -> bool:
1129
1191
  self.logger.info("Re-running comprehensive hooks to verify hook fixes")
1130
1192
  hook_success = self.phases.run_comprehensive_hooks_only(options)
@@ -1299,35 +1361,30 @@ class WorkflowPipeline:
1299
1361
  return issues
1300
1362
 
1301
1363
  def _parse_comprehensive_hook_errors(self, error_msg: str) -> list[Issue]:
1302
- issues: list[Issue] = []
1303
1364
  error_lower = error_msg.lower()
1365
+ error_checkers = self._get_comprehensive_error_checkers()
1304
1366
 
1305
- complexity_issue = self._check_complexity_error(error_lower)
1306
- if complexity_issue:
1307
- issues.append(complexity_issue)
1308
-
1309
- type_error_issue = self._check_type_error(error_lower)
1310
- if type_error_issue:
1311
- issues.append(type_error_issue)
1312
-
1313
- security_issue = self._check_security_error(error_lower)
1314
- if security_issue:
1315
- issues.append(security_issue)
1316
-
1317
- performance_issue = self._check_performance_error(error_lower)
1318
- if performance_issue:
1319
- issues.append(performance_issue)
1320
-
1321
- dead_code_issue = self._check_dead_code_error(error_lower)
1322
- if dead_code_issue:
1323
- issues.append(dead_code_issue)
1324
-
1325
- regex_issue = self._check_regex_validation_error(error_lower)
1326
- if regex_issue:
1327
- issues.append(regex_issue)
1367
+ issues = []
1368
+ for check_func in error_checkers:
1369
+ issue = check_func(error_lower)
1370
+ if issue:
1371
+ issues.append(issue)
1328
1372
 
1329
1373
  return issues
1330
1374
 
1375
+ def _get_comprehensive_error_checkers(
1376
+ self,
1377
+ ) -> list[t.Callable[[str], Issue | None]]:
1378
+ """Get list of error checking functions for comprehensive hooks."""
1379
+ return [
1380
+ self._check_complexity_error,
1381
+ self._check_type_error,
1382
+ self._check_security_error,
1383
+ self._check_performance_error,
1384
+ self._check_dead_code_error,
1385
+ self._check_regex_validation_error,
1386
+ ]
1387
+
1331
1388
  def _check_complexity_error(self, error_lower: str) -> Issue | None:
1332
1389
  if "complexipy" in error_lower or "c901" in error_lower:
1333
1390
  return Issue(
@@ -1426,24 +1483,66 @@ class WorkflowPipeline:
1426
1483
  def _classify_issue(self, issue_str: str) -> tuple[IssueType, Priority]:
1427
1484
  issue_lower = issue_str.lower()
1428
1485
 
1429
- if self._is_type_error(issue_lower):
1430
- return IssueType.TYPE_ERROR, Priority.HIGH
1431
- if self._is_security_issue(issue_lower):
1432
- return IssueType.SECURITY, Priority.HIGH
1433
- if self._is_complexity_issue(issue_lower):
1434
- return IssueType.COMPLEXITY, Priority.HIGH
1435
- if self._is_regex_validation_issue(issue_lower):
1436
- return IssueType.REGEX_VALIDATION, Priority.HIGH
1437
-
1438
- if self._is_dead_code_issue(issue_lower):
1439
- return IssueType.DEAD_CODE, Priority.MEDIUM
1440
- if self._is_performance_issue(issue_lower):
1441
- return IssueType.PERFORMANCE, Priority.MEDIUM
1442
- if self._is_import_error(issue_lower):
1443
- return IssueType.IMPORT_ERROR, Priority.MEDIUM
1486
+ # Check high priority issues first
1487
+ high_priority_result = self._check_high_priority_issues(issue_lower)
1488
+ if high_priority_result:
1489
+ return high_priority_result
1444
1490
 
1491
+ # Check medium priority issues
1492
+ medium_priority_result = self._check_medium_priority_issues(issue_lower)
1493
+ if medium_priority_result:
1494
+ return medium_priority_result
1495
+
1496
+ # Default to formatting issue
1445
1497
  return IssueType.FORMATTING, Priority.MEDIUM
1446
1498
 
1499
+ def _check_high_priority_issues(
1500
+ self, issue_lower: str
1501
+ ) -> tuple[IssueType, Priority] | None:
1502
+ """Check for high priority issue types.
1503
+
1504
+ Args:
1505
+ issue_lower: Lowercase issue string
1506
+
1507
+ Returns:
1508
+ Tuple of issue type and priority if found, None otherwise
1509
+ """
1510
+ high_priority_checks = [
1511
+ (self._is_type_error, IssueType.TYPE_ERROR),
1512
+ (self._is_security_issue, IssueType.SECURITY),
1513
+ (self._is_complexity_issue, IssueType.COMPLEXITY),
1514
+ (self._is_regex_validation_issue, IssueType.REGEX_VALIDATION),
1515
+ ]
1516
+
1517
+ for check_func, issue_type in high_priority_checks:
1518
+ if check_func(issue_lower):
1519
+ return issue_type, Priority.HIGH
1520
+
1521
+ return None
1522
+
1523
+ def _check_medium_priority_issues(
1524
+ self, issue_lower: str
1525
+ ) -> tuple[IssueType, Priority] | None:
1526
+ """Check for medium priority issue types.
1527
+
1528
+ Args:
1529
+ issue_lower: Lowercase issue string
1530
+
1531
+ Returns:
1532
+ Tuple of issue type and priority if found, None otherwise
1533
+ """
1534
+ medium_priority_checks = [
1535
+ (self._is_dead_code_issue, IssueType.DEAD_CODE),
1536
+ (self._is_performance_issue, IssueType.PERFORMANCE),
1537
+ (self._is_import_error, IssueType.IMPORT_ERROR),
1538
+ ]
1539
+
1540
+ for check_func, issue_type in medium_priority_checks:
1541
+ if check_func(issue_lower):
1542
+ return issue_type, Priority.MEDIUM
1543
+
1544
+ return None
1545
+
1447
1546
  def _is_type_error(self, issue_lower: str) -> bool:
1448
1547
  return any(
1449
1548
  keyword in issue_lower for keyword in ("type", "annotation", "pyright")
@@ -1512,44 +1611,77 @@ class WorkflowPipeline:
1512
1611
  async def _handle_security_gate_failure(
1513
1612
  self, options: OptionsProtocol, allow_ai_fixing: bool = False
1514
1613
  ) -> bool:
1614
+ self._display_security_gate_failure_message()
1615
+
1616
+ if allow_ai_fixing:
1617
+ return await self._attempt_ai_assisted_security_fix(options)
1618
+ return self._handle_manual_security_fix()
1619
+
1620
+ def _display_security_gate_failure_message(self) -> None:
1621
+ """Display initial security gate failure message."""
1515
1622
  self.console.print(
1516
1623
  "[red]🔒 SECURITY GATE: Critical security checks failed[/red]"
1517
1624
  )
1518
1625
 
1519
- if allow_ai_fixing:
1520
- self.console.print(
1521
- "[red]Security-critical hooks (bandit, pyright, gitleaks) must pass before publishing[/red]"
1522
- )
1523
- self.console.print(
1524
- "[yellow]🤖 Attempting AI-assisted security issue resolution...[/yellow]"
1525
- )
1626
+ async def _attempt_ai_assisted_security_fix(self, options: OptionsProtocol) -> bool:
1627
+ """Attempt to fix security issues using AI assistance.
1526
1628
 
1527
- ai_fix_success = await self._run_ai_agent_fixing_phase(options)
1528
- if ai_fix_success:
1529
- try:
1530
- security_still_blocks = self._check_security_critical_failures()
1531
- if not security_still_blocks:
1532
- self.console.print(
1533
- "[green]✅ AI agents resolved security issues - publishing allowed[/green]"
1534
- )
1535
- return True
1536
- else:
1537
- self.console.print(
1538
- "[red]🔒 Security issues persist after AI fixing - publishing still BLOCKED[/red]"
1539
- )
1540
- return False
1541
- except Exception as e:
1542
- self.logger.warning(
1543
- f"Security re-check failed: {e} - blocking publishing"
1544
- )
1545
- return False
1546
- return False
1547
- else:
1548
- self.console.print(
1549
- "[red]Security-critical hooks (bandit, pyright, gitleaks) must pass before publishing[/red]"
1550
- )
1629
+ Args:
1630
+ options: Configuration options
1631
+
1632
+ Returns:
1633
+ True if security issues were resolved, False otherwise
1634
+ """
1635
+ self._display_ai_fixing_messages()
1636
+
1637
+ ai_fix_success = await self._run_ai_agent_fixing_phase(options)
1638
+ if ai_fix_success:
1639
+ return self._verify_security_fix_success()
1640
+
1641
+ return False
1642
+
1643
+ def _display_ai_fixing_messages(self) -> None:
1644
+ """Display messages about AI-assisted security fixing."""
1645
+ self.console.print(
1646
+ "[red]Security-critical hooks (bandit, pyright, gitleaks) must pass before publishing[/red]"
1647
+ )
1648
+ self.console.print(
1649
+ "[yellow]🤖 Attempting AI-assisted security issue resolution...[/yellow]"
1650
+ )
1651
+
1652
+ def _verify_security_fix_success(self) -> bool:
1653
+ """Verify that AI fixes resolved the security issues.
1654
+
1655
+ Returns:
1656
+ True if security issues were resolved, False otherwise
1657
+ """
1658
+ try:
1659
+ security_still_blocks = self._check_security_critical_failures()
1660
+ if not security_still_blocks:
1661
+ self.console.print(
1662
+ "[green]✅ AI agents resolved security issues - publishing allowed[/green]"
1663
+ )
1664
+ return True
1665
+ else:
1666
+ self.console.print(
1667
+ "[red]🔒 Security issues persist after AI fixing - publishing still BLOCKED[/red]"
1668
+ )
1669
+ return False
1670
+ except Exception as e:
1671
+ self.logger.warning(f"Security re-check failed: {e} - blocking publishing")
1551
1672
  return False
1552
1673
 
1674
+ def _handle_manual_security_fix(self) -> bool:
1675
+ """Handle security fix when AI assistance is not allowed.
1676
+
1677
+ Returns:
1678
+ Always False since manual intervention is required
1679
+ """
1680
+ self.console.print(
1681
+ "[red]Security-critical hooks (bandit, pyright, gitleaks) must pass before publishing[/red]"
1682
+ )
1683
+ return False
1684
+
1553
1685
  def _determine_ai_fixing_needed(
1554
1686
  self,
1555
1687
  testing_passed: bool,