crackerjack 0.19.8__py3-none-any.whl → 0.20.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,8 +1,10 @@
1
1
  import io
2
2
  import os
3
3
  import platform
4
+ import queue
4
5
  import re
5
6
  import subprocess
7
+ import threading
6
8
  import time
7
9
  import tokenize
8
10
  import typing as t
@@ -42,6 +44,8 @@ class OptionsProtocol(t.Protocol):
42
44
  benchmark: bool
43
45
  benchmark_regression: bool
44
46
  benchmark_regression_threshold: float
47
+ test_workers: int = 0
48
+ test_timeout: int = 0
45
49
  publish: t.Any | None
46
50
  bump: t.Any | None
47
51
  all: t.Any | None
@@ -62,22 +66,77 @@ class CodeCleaner:
62
66
  self.clean_file(file_path)
63
67
 
64
68
  def clean_file(self, file_path: Path) -> None:
69
+ from .errors import CleaningError, ErrorCode, FileError, handle_error
70
+
65
71
  try:
66
72
  if file_path.resolve() == Path(__file__).resolve():
67
73
  self.console.print(f"Skipping cleaning of {file_path} (self file).")
68
74
  return
69
75
  except Exception as e:
70
- self.console.print(f"Error comparing file paths: {e}")
76
+ error = FileError(
77
+ message="Error comparing file paths",
78
+ error_code=ErrorCode.FILE_READ_ERROR,
79
+ details=f"Failed to compare {file_path} with the current file: {e}",
80
+ recovery="This is likely a file system permission issue. Check file permissions.",
81
+ exit_code=0, # Non-fatal error
82
+ )
83
+ handle_error(error, self.console, verbose=True, exit_on_error=False)
84
+ return
85
+
71
86
  try:
72
- code = file_path.read_text()
87
+ # Check if file exists and is readable
88
+ if not file_path.exists():
89
+ error = FileError(
90
+ message="File not found",
91
+ error_code=ErrorCode.FILE_NOT_FOUND,
92
+ details=f"The file {file_path} does not exist.",
93
+ recovery="Check the file path and ensure the file exists.",
94
+ exit_code=0, # Non-fatal error
95
+ )
96
+ handle_error(error, self.console, verbose=True, exit_on_error=False)
97
+ return
98
+
99
+ try:
100
+ code = file_path.read_text()
101
+ except Exception as e:
102
+ error = FileError(
103
+ message="Error reading file",
104
+ error_code=ErrorCode.FILE_READ_ERROR,
105
+ details=f"Failed to read {file_path}: {e}",
106
+ recovery="Check file permissions and ensure the file is not locked by another process.",
107
+ exit_code=0, # Non-fatal error
108
+ )
109
+ handle_error(error, self.console, verbose=True, exit_on_error=False)
110
+ return
111
+
112
+ # Process the file content
73
113
  code = self.remove_docstrings(code)
74
114
  code = self.remove_line_comments(code)
75
115
  code = self.remove_extra_whitespace(code)
76
116
  code = self.reformat_code(code)
77
- file_path.write_text(code) # type: ignore
78
- self.console.print(f"Cleaned: {file_path}")
117
+
118
+ try:
119
+ file_path.write_text(code) # type: ignore
120
+ self.console.print(f"Cleaned: {file_path}")
121
+ except Exception as e:
122
+ error = FileError(
123
+ message="Error writing file",
124
+ error_code=ErrorCode.FILE_WRITE_ERROR,
125
+ details=f"Failed to write to {file_path}: {e}",
126
+ recovery="Check file permissions and ensure the file is not locked by another process.",
127
+ exit_code=0, # Non-fatal error
128
+ )
129
+ handle_error(error, self.console, verbose=True, exit_on_error=False)
130
+
79
131
  except Exception as e:
80
- self.console.print(f"Error cleaning {file_path}: {e}")
132
+ error = CleaningError(
133
+ message="Error cleaning file",
134
+ error_code=ErrorCode.CODE_CLEANING_ERROR,
135
+ details=f"Failed to clean {file_path}: {e}",
136
+ recovery="This could be due to syntax errors in the file. Try manually checking the file for syntax errors.",
137
+ exit_code=0, # Non-fatal error
138
+ )
139
+ handle_error(error, self.console, verbose=True, exit_on_error=False)
81
140
 
82
141
  def remove_line_comments(self, code: str) -> str:
83
142
  new_lines = []
@@ -199,14 +258,30 @@ class CodeCleaner:
199
258
  return "\n".join(cleaned_lines)
200
259
 
201
260
  def reformat_code(self, code: str) -> str | None:
261
+ from .errors import CleaningError, ErrorCode, handle_error
262
+
202
263
  try:
203
264
  import tempfile
204
265
 
205
- with tempfile.NamedTemporaryFile(
206
- suffix=".py", mode="w+", delete=False
207
- ) as temp:
208
- temp_path = Path(temp.name)
209
- temp_path.write_text(code)
266
+ # Create a temporary file for formatting
267
+ try:
268
+ with tempfile.NamedTemporaryFile(
269
+ suffix=".py", mode="w+", delete=False
270
+ ) as temp:
271
+ temp_path = Path(temp.name)
272
+ temp_path.write_text(code)
273
+ except Exception as e:
274
+ error = CleaningError(
275
+ message="Failed to create temporary file for formatting",
276
+ error_code=ErrorCode.FORMATTING_ERROR,
277
+ details=f"Error: {e}",
278
+ recovery="Check disk space and permissions for the temp directory.",
279
+ exit_code=0, # Non-fatal
280
+ )
281
+ handle_error(error, self.console, verbose=True, exit_on_error=False)
282
+ return code
283
+
284
+ # Run Ruff to format the code
210
285
  try:
211
286
  result = subprocess.run(
212
287
  ["ruff", "format", str(temp_path)],
@@ -214,20 +289,58 @@ class CodeCleaner:
214
289
  capture_output=True,
215
290
  text=True,
216
291
  )
292
+
217
293
  if result.returncode == 0:
218
- formatted_code = temp_path.read_text()
294
+ try:
295
+ formatted_code = temp_path.read_text()
296
+ except Exception as e:
297
+ error = CleaningError(
298
+ message="Failed to read formatted code",
299
+ error_code=ErrorCode.FORMATTING_ERROR,
300
+ details=f"Error reading temporary file after formatting: {e}",
301
+ recovery="This might be a permissions issue. Check if Ruff is installed properly.",
302
+ exit_code=0, # Non-fatal
303
+ )
304
+ handle_error(
305
+ error, self.console, verbose=True, exit_on_error=False
306
+ )
307
+ formatted_code = code
219
308
  else:
220
- self.console.print(f"Ruff formatting failed: {result.stderr}")
309
+ error = CleaningError(
310
+ message="Ruff formatting failed",
311
+ error_code=ErrorCode.FORMATTING_ERROR,
312
+ details=f"Ruff output: {result.stderr}",
313
+ recovery="The file might contain syntax errors. Check the file manually.",
314
+ exit_code=0, # Non-fatal
315
+ )
316
+ handle_error(error, self.console, exit_on_error=False)
221
317
  formatted_code = code
222
318
  except Exception as e:
223
- self.console.print(f"Error running Ruff: {e}")
319
+ error = CleaningError(
320
+ message="Error running Ruff formatter",
321
+ error_code=ErrorCode.FORMATTING_ERROR,
322
+ details=f"Error: {e}",
323
+ recovery="Ensure Ruff is installed correctly. Run 'pip install ruff' to install it.",
324
+ exit_code=0, # Non-fatal
325
+ )
326
+ handle_error(error, self.console, verbose=True, exit_on_error=False)
224
327
  formatted_code = code
225
328
  finally:
329
+ # Clean up temporary file
226
330
  with suppress(FileNotFoundError):
227
331
  temp_path.unlink()
332
+
228
333
  return formatted_code
334
+
229
335
  except Exception as e:
230
- self.console.print(f"Error during reformatting: {e}")
336
+ error = CleaningError(
337
+ message="Unexpected error during code formatting",
338
+ error_code=ErrorCode.FORMATTING_ERROR,
339
+ details=f"Error: {e}",
340
+ recovery="This is an unexpected error. Please report this issue.",
341
+ exit_code=0, # Non-fatal
342
+ )
343
+ handle_error(error, self.console, verbose=True, exit_on_error=False)
231
344
  return code
232
345
 
233
346
 
@@ -368,18 +481,52 @@ class ProjectManager:
368
481
  dry_run: bool = False
369
482
 
370
483
  def run_interactive(self, hook: str) -> None:
484
+ from .errors import ErrorCode, ExecutionError, handle_error
485
+
371
486
  success: bool = False
372
- while not success:
373
- fail = self.execute_command(
487
+ attempts = 0
488
+ max_attempts = 3
489
+
490
+ while not success and attempts < max_attempts:
491
+ attempts += 1
492
+ result = self.execute_command(
374
493
  ["pre-commit", "run", hook.lower(), "--all-files"]
375
494
  )
376
- if fail.returncode > 0:
377
- retry = input(f"\n\n{hook.title()} failed. Retry? (y/N): ")
495
+
496
+ if result.returncode > 0:
497
+ self.console.print(
498
+ f"\n\n[yellow]Hook '{hook}' failed (attempt {attempts}/{max_attempts})[/yellow]"
499
+ )
500
+
501
+ # Give more detailed information about the failure
502
+ if result.stderr:
503
+ self.console.print(f"[red]Error details:[/red]\n{result.stderr}")
504
+
505
+ retry = input(f"Retry running {hook.title()}? (y/N): ")
378
506
  self.console.print()
379
- if retry.strip().lower() == "y":
380
- continue
381
- raise SystemExit(1)
382
- success = True
507
+
508
+ if retry.strip().lower() != "y":
509
+ error = ExecutionError(
510
+ message=f"Interactive hook '{hook}' failed",
511
+ error_code=ErrorCode.PRE_COMMIT_ERROR,
512
+ details=f"Hook execution output:\n{result.stderr or result.stdout}",
513
+ recovery=f"Try running the hook manually: pre-commit run {hook.lower()} --all-files",
514
+ exit_code=1,
515
+ )
516
+ handle_error(error=error, console=self.console)
517
+ else:
518
+ self.console.print(f"[green]✅ Hook '{hook}' succeeded![/green]")
519
+ success = True
520
+
521
+ if not success:
522
+ error = ExecutionError(
523
+ message=f"Interactive hook '{hook}' failed after {max_attempts} attempts",
524
+ error_code=ErrorCode.PRE_COMMIT_ERROR,
525
+ details="The hook continued to fail after multiple attempts.",
526
+ recovery=f"Fix the issues manually and run: pre-commit run {hook.lower()} --all-files",
527
+ exit_code=1,
528
+ )
529
+ handle_error(error=error, console=self.console)
383
530
 
384
531
  def update_pkg_configs(self) -> None:
385
532
  self.config_manager.copy_configs()
@@ -399,13 +546,25 @@ class ProjectManager:
399
546
  self.config_manager.update_pyproject_configs()
400
547
 
401
548
  def run_pre_commit(self) -> None:
549
+ from .errors import ErrorCode, ExecutionError, handle_error
550
+
402
551
  self.console.print("\nRunning pre-commit hooks...\n")
403
552
  check_all = self.execute_command(["pre-commit", "run", "--all-files"])
553
+
404
554
  if check_all.returncode > 0:
555
+ # First retry
556
+ self.console.print("\nSome pre-commit hooks failed. Retrying once...\n")
405
557
  check_all = self.execute_command(["pre-commit", "run", "--all-files"])
558
+
406
559
  if check_all.returncode > 0:
407
- self.console.print("\n\nPre-commit failed. Please fix errors.\n")
408
- raise SystemExit(1)
560
+ error = ExecutionError(
561
+ message="Pre-commit hooks failed",
562
+ error_code=ErrorCode.PRE_COMMIT_ERROR,
563
+ details="Pre-commit hooks failed even after a retry. Check the output above for specific hook failures.",
564
+ recovery="Review the error messages above. Manually fix the issues or run specific hooks interactively with 'pre-commit run <hook-id>'.",
565
+ exit_code=1,
566
+ )
567
+ handle_error(error=error, console=self.console, verbose=True)
409
568
 
410
569
  def execute_command(
411
570
  self, cmd: list[str], **kwargs: t.Any
@@ -463,6 +622,8 @@ class Crackerjack:
463
622
  self.project_manager.pkg_dir = self.pkg_dir
464
623
 
465
624
  def _update_project(self, options: OptionsProtocol) -> None:
625
+ from .errors import ErrorCode, ExecutionError, handle_error
626
+
466
627
  if not options.no_config_updates:
467
628
  self.project_manager.update_pkg_configs()
468
629
  result: CompletedProcess[str] = self.execute_command(
@@ -471,8 +632,21 @@ class Crackerjack:
471
632
  if result.returncode == 0:
472
633
  self.console.print("PDM installed: ✅\n")
473
634
  else:
474
- self.console.print(
475
- "\n\n❌ PDM installation failed. Is PDM is installed? Run `pipx install pdm` and try again.\n\n"
635
+ error = ExecutionError(
636
+ message="PDM installation failed",
637
+ error_code=ErrorCode.PDM_INSTALL_ERROR,
638
+ details=f"Command output:\n{result.stderr}",
639
+ recovery="Ensure PDM is installed. Run `pipx install pdm` and try again. Check for network issues or package conflicts.",
640
+ exit_code=1,
641
+ )
642
+
643
+ # Don't exit immediately - this isn't always fatal
644
+ handle_error(
645
+ error=error,
646
+ console=self.console,
647
+ verbose=options.verbose,
648
+ ai_agent=options.ai_agent,
649
+ exit_on_error=False,
476
650
  )
477
651
 
478
652
  def _update_precommit(self, options: OptionsProtocol) -> None:
@@ -488,10 +662,8 @@ class Crackerjack:
488
662
  if options.clean:
489
663
  if self.pkg_dir:
490
664
  self.code_cleaner.clean_files(self.pkg_dir)
491
- tests_dir = self.pkg_path / "tests"
492
- if tests_dir.exists() and tests_dir.is_dir():
493
- self.console.print("\nCleaning tests directory...\n")
494
- self.code_cleaner.clean_files(tests_dir)
665
+ # Skip cleaning test files as they may contain test data in docstrings and comments
666
+ # that are necessary for the tests to function properly
495
667
 
496
668
  def _prepare_pytest_command(self, options: OptionsProtocol) -> list[str]:
497
669
  """Prepare pytest command with appropriate options.
@@ -515,6 +687,22 @@ class Crackerjack:
515
687
  if options.verbose:
516
688
  test.append("-v")
517
689
 
690
+ # Detect project size to adjust timeouts and parallelization
691
+ project_size = self._detect_project_size()
692
+
693
+ # User can override the timeout, otherwise use project size to determine
694
+ if options.test_timeout > 0:
695
+ test_timeout = options.test_timeout
696
+ else:
697
+ # Use a longer timeout for larger projects
698
+ test_timeout = (
699
+ 300
700
+ if project_size == "large"
701
+ else 120
702
+ if project_size == "medium"
703
+ else 60
704
+ )
705
+
518
706
  test.extend(
519
707
  [
520
708
  "--capture=fd", # Capture stdout/stderr at file descriptor level
@@ -522,7 +710,7 @@ class Crackerjack:
522
710
  "--no-header", # Reduce output noise
523
711
  "--disable-warnings", # Disable warning capture
524
712
  "--durations=0", # Show slowest tests to identify potential hanging tests
525
- "--timeout=60", # 1-minute timeout for tests
713
+ f"--timeout={test_timeout}", # Dynamic timeout based on project size or user override
526
714
  ]
527
715
  )
528
716
 
@@ -543,11 +731,60 @@ class Crackerjack:
543
731
  ]
544
732
  )
545
733
  else:
546
- # No benchmarks - use parallel execution for speed
547
- test.append("-xvs")
734
+ # Use user-specified number of workers if provided
735
+ if options.test_workers > 0:
736
+ # User explicitly set number of workers
737
+ if options.test_workers == 1:
738
+ # Single worker means no parallelism, just use normal pytest mode
739
+ test.append("-vs")
740
+ else:
741
+ # Use specified number of workers
742
+ test.extend(["-xvs", "-n", str(options.test_workers)])
743
+ else:
744
+ # Auto-detect based on project size
745
+ if project_size == "large":
746
+ # For large projects, use a fixed number of workers to avoid overwhelming the system
747
+ test.extend(
748
+ ["-xvs", "-n", "2"]
749
+ ) # Only 2 parallel processes for large projects
750
+ elif project_size == "medium":
751
+ test.extend(
752
+ ["-xvs", "-n", "auto"]
753
+ ) # Auto-detect number of processes but limit it
754
+ else:
755
+ test.append("-xvs") # Default behavior for small projects
548
756
 
549
757
  return test
550
758
 
759
+ def _detect_project_size(self) -> str:
760
+ """Detect the approximate size of the project to adjust test parameters.
761
+
762
+ Returns:
763
+ "small", "medium", or "large" based on codebase size
764
+ """
765
+ # Check for known large projects by name
766
+ if self.pkg_name in ("acb", "fastblocks"):
767
+ return "large"
768
+
769
+ # Count Python files to estimate project size
770
+ try:
771
+ py_files = list(self.pkg_path.rglob("*.py"))
772
+ test_files = list(self.pkg_path.rglob("test_*.py"))
773
+
774
+ total_files = len(py_files)
775
+ num_test_files = len(test_files)
776
+
777
+ # Rough heuristics for project size
778
+ if total_files > 100 or num_test_files > 50:
779
+ return "large"
780
+ elif total_files > 50 or num_test_files > 20:
781
+ return "medium"
782
+ else:
783
+ return "small"
784
+ except Exception:
785
+ # Default to medium in case of error
786
+ return "medium"
787
+
551
788
  def _setup_test_environment(self) -> None:
552
789
  os.environ["PYTHONASYNCIO_DEBUG"] = "0" # Disable asyncio debug mode
553
790
  os.environ["RUNNING_UNDER_CRACKERJACK"] = "1" # Signal to conftest.py
@@ -557,7 +794,29 @@ class Crackerjack:
557
794
  def _run_pytest_process(
558
795
  self, test_command: list[str]
559
796
  ) -> subprocess.CompletedProcess[str]:
797
+ import queue
798
+
799
+ from .errors import ErrorCode, ExecutionError, handle_error
800
+
560
801
  try:
802
+ # Detect project size to determine appropriate timeout
803
+ project_size = self._detect_project_size()
804
+ # Longer timeouts for larger projects
805
+ global_timeout = (
806
+ 1200
807
+ if project_size == "large"
808
+ else 600
809
+ if project_size == "medium"
810
+ else 300
811
+ )
812
+
813
+ # Show timeout information
814
+ self.console.print(f"[blue]Project size detected as: {project_size}[/blue]")
815
+ self.console.print(
816
+ f"[blue]Using global timeout of {global_timeout} seconds[/blue]"
817
+ )
818
+
819
+ # Use non-blocking IO to avoid deadlocks
561
820
  process = subprocess.Popen(
562
821
  test_command,
563
822
  stdout=subprocess.PIPE,
@@ -566,57 +825,152 @@ class Crackerjack:
566
825
  bufsize=1,
567
826
  universal_newlines=True,
568
827
  )
569
- timeout = 300
570
- start_time = time.time()
828
+
571
829
  stdout_data = []
572
830
  stderr_data = []
831
+
832
+ # Output collection queues
833
+ stdout_queue = queue.Queue()
834
+ stderr_queue = queue.Queue()
835
+
836
+ # Use separate threads to read from stdout and stderr to prevent deadlocks
837
+ def read_output(
838
+ pipe: t.TextIO,
839
+ output_queue: "queue.Queue[str]",
840
+ data_collector: list[str],
841
+ ) -> None:
842
+ try:
843
+ for line in iter(pipe.readline, ""):
844
+ output_queue.put(line)
845
+ data_collector.append(line)
846
+ except (OSError, ValueError):
847
+ # Pipe has been closed
848
+ pass
849
+ finally:
850
+ pipe.close()
851
+
852
+ # Start output reader threads
853
+ stdout_thread = threading.Thread(
854
+ target=read_output,
855
+ args=(process.stdout, stdout_queue, stdout_data),
856
+ daemon=True,
857
+ )
858
+ stderr_thread = threading.Thread(
859
+ target=read_output,
860
+ args=(process.stderr, stderr_queue, stderr_data),
861
+ daemon=True,
862
+ )
863
+
864
+ stdout_thread.start()
865
+ stderr_thread.start()
866
+
867
+ # Start time for timeout tracking
868
+ start_time = time.time()
869
+
870
+ # Process is running, monitor and display output until completion or timeout
573
871
  while process.poll() is None:
574
- if time.time() - start_time > timeout:
872
+ # Check for timeout
873
+ elapsed = time.time() - start_time
874
+ if elapsed > global_timeout:
875
+ error = ExecutionError(
876
+ message=f"Test execution timed out after {global_timeout // 60} minutes.",
877
+ error_code=ErrorCode.COMMAND_TIMEOUT,
878
+ details=f"Command: {' '.join(test_command)}\nTimeout: {global_timeout} seconds",
879
+ recovery="Check for infinite loops or deadlocks in your tests. Consider increasing the timeout or optimizing your tests.",
880
+ )
881
+
575
882
  self.console.print(
576
- "[red]Test execution timed out after 5 minutes. Terminating...[/red]"
883
+ f"[red]Test execution timed out after {global_timeout // 60} minutes. Terminating...[/red]"
577
884
  )
578
885
  process.terminate()
579
886
  try:
580
887
  process.wait(timeout=5)
581
888
  except subprocess.TimeoutExpired:
582
889
  process.kill()
890
+ stderr_data.append(
891
+ "Process had to be forcefully terminated after timeout."
892
+ )
583
893
  break
584
- if process.stdout:
585
- line = process.stdout.readline()
586
- if line:
587
- stdout_data.append(line)
588
- self.console.print(line, end="")
589
- if process.stderr:
590
- line = process.stderr.readline()
591
- if line:
592
- stderr_data.append(line)
593
- self.console.print(f"[red]{line}[/red]", end="")
594
- time.sleep(0.1)
595
- if process.stdout:
596
- for line in process.stdout:
597
- stdout_data.append(line)
598
- self.console.print(line, end="")
599
- if process.stderr:
600
- for line in process.stderr:
601
- stderr_data.append(line)
602
- self.console.print(f"[red]{line}[/red]", end="")
894
+
895
+ # Print any available output
896
+ self._process_output_queue(stdout_queue, stderr_queue)
897
+
898
+ # Small sleep to avoid CPU spinning but still be responsive
899
+ time.sleep(0.05)
900
+
901
+ # Periodically output a heartbeat for very long-running tests
902
+ if elapsed > 60 and elapsed % 60 < 0.1: # Roughly every minute
903
+ self.console.print(
904
+ f"[blue]Tests still running, elapsed time: {int(elapsed)} seconds...[/blue]"
905
+ )
906
+
907
+ # Process has exited, get remaining output
908
+ time.sleep(0.1) # Allow threads to flush final output
909
+ self._process_output_queue(stdout_queue, stderr_queue)
910
+
911
+ # Ensure threads are done
912
+ if stdout_thread.is_alive():
913
+ stdout_thread.join(1.0)
914
+ if stderr_thread.is_alive():
915
+ stderr_thread.join(1.0)
916
+
603
917
  returncode = process.returncode or 0
604
918
  stdout = "".join(stdout_data)
605
919
  stderr = "".join(stderr_data)
920
+
606
921
  return subprocess.CompletedProcess(
607
922
  args=test_command, returncode=returncode, stdout=stdout, stderr=stderr
608
923
  )
609
924
 
610
925
  except Exception as e:
611
- self.console.print(f"[red]Error running tests: {e}[/red]")
926
+ error = ExecutionError(
927
+ message=f"Error running tests: {e}",
928
+ error_code=ErrorCode.TEST_EXECUTION_ERROR,
929
+ details=f"Command: {' '.join(test_command)}\nError: {e}",
930
+ recovery="Check if pytest is installed and that your test files are properly formatted.",
931
+ exit_code=1,
932
+ )
933
+
934
+ # Don't exit here, let the caller handle it
935
+ handle_error(
936
+ error=error, console=self.console, verbose=True, exit_on_error=False
937
+ )
938
+
612
939
  return subprocess.CompletedProcess(test_command, 1, "", str(e))
613
940
 
941
+ def _process_output_queue(
942
+ self, stdout_queue: "queue.Queue[str]", stderr_queue: "queue.Queue[str]"
943
+ ) -> None:
944
+ """Process and display output from the queues without blocking."""
945
+ # Process stdout
946
+ while not stdout_queue.empty():
947
+ try:
948
+ line = stdout_queue.get_nowait()
949
+ if line:
950
+ self.console.print(line, end="")
951
+ except queue.Empty:
952
+ break
953
+
954
+ # Process stderr
955
+ while not stderr_queue.empty():
956
+ try:
957
+ line = stderr_queue.get_nowait()
958
+ if line:
959
+ self.console.print(f"[red]{line}[/red]", end="")
960
+ except queue.Empty:
961
+ break
962
+
614
963
  def _report_test_results(
615
964
  self, result: subprocess.CompletedProcess[str], ai_agent: str
616
965
  ) -> None:
966
+ from .errors import ErrorCode, TestError, handle_error
967
+
617
968
  if result.returncode > 0:
969
+ error_details = None
618
970
  if result.stderr:
619
971
  self.console.print(result.stderr)
972
+ error_details = result.stderr
973
+
620
974
  if ai_agent:
621
975
  self.console.print(
622
976
  '[json]{"status": "failed", "action": "tests", "returncode": '
@@ -624,8 +978,19 @@ class Crackerjack:
624
978
  + "}[/json]"
625
979
  )
626
980
  else:
627
- self.console.print("\n\n❌ Tests failed. Please fix errors.\n")
628
- raise SystemExit(1)
981
+ # Use the structured error handler
982
+ error = TestError(
983
+ message="Tests failed. Please fix the errors.",
984
+ error_code=ErrorCode.TEST_FAILURE,
985
+ details=error_details,
986
+ recovery="Review the test output above for specific failures. Fix the issues in your code and run tests again.",
987
+ exit_code=1,
988
+ )
989
+ handle_error(
990
+ error=error,
991
+ console=self.console,
992
+ ai_agent=(ai_agent != ""),
993
+ )
629
994
 
630
995
  if ai_agent:
631
996
  self.console.print('[json]{"status": "success", "action": "tests"}[/json]')
@@ -653,25 +1018,68 @@ class Crackerjack:
653
1018
  break
654
1019
 
655
1020
  def _publish_project(self, options: OptionsProtocol) -> None:
1021
+ from .errors import ErrorCode, PublishError, handle_error
1022
+
656
1023
  if options.publish:
657
1024
  if platform.system() == "Darwin":
658
1025
  authorize = self.execute_command(
659
1026
  ["pdm", "self", "add", "keyring"], capture_output=True, text=True
660
1027
  )
661
1028
  if authorize.returncode > 0:
662
- self.console.print(
663
- "\n\nAuthorization failed. Please add your keyring credentials to PDM. Run `pdm self add keyring` and try again.\n\n"
1029
+ error = PublishError(
1030
+ message="Authentication setup failed",
1031
+ error_code=ErrorCode.AUTHENTICATION_ERROR,
1032
+ details=f"Failed to add keyring support to PDM.\nCommand output:\n{authorize.stderr}",
1033
+ recovery="Please manually add your keyring credentials to PDM. Run `pdm self add keyring` and try again.",
1034
+ exit_code=1,
664
1035
  )
665
- raise SystemExit(1)
1036
+ handle_error(
1037
+ error=error,
1038
+ console=self.console,
1039
+ verbose=options.verbose,
1040
+ ai_agent=options.ai_agent,
1041
+ )
1042
+
666
1043
  build = self.execute_command(
667
1044
  ["pdm", "build"], capture_output=True, text=True
668
1045
  )
669
1046
  self.console.print(build.stdout)
1047
+
670
1048
  if build.returncode > 0:
671
- self.console.print(build.stderr)
672
- self.console.print("\n\nBuild failed. Please fix errors.\n")
673
- raise SystemExit(1)
674
- self.execute_command(["pdm", "publish", "--no-build"])
1049
+ error = PublishError(
1050
+ message="Package build failed",
1051
+ error_code=ErrorCode.BUILD_ERROR,
1052
+ details=f"Command output:\n{build.stderr}",
1053
+ recovery="Review the error message above for details. Common issues include missing dependencies, invalid project structure, or incorrect metadata in pyproject.toml.",
1054
+ exit_code=1,
1055
+ )
1056
+ handle_error(
1057
+ error=error,
1058
+ console=self.console,
1059
+ verbose=options.verbose,
1060
+ ai_agent=options.ai_agent,
1061
+ )
1062
+
1063
+ publish_result = self.execute_command(
1064
+ ["pdm", "publish", "--no-build"], capture_output=True, text=True
1065
+ )
1066
+
1067
+ if publish_result.returncode > 0:
1068
+ error = PublishError(
1069
+ message="Package publication failed",
1070
+ error_code=ErrorCode.PUBLISH_ERROR,
1071
+ details=f"Command output:\n{publish_result.stderr}",
1072
+ recovery="Ensure you have the correct PyPI credentials configured. Check your internet connection and that the package name is available on PyPI.",
1073
+ exit_code=1,
1074
+ )
1075
+ handle_error(
1076
+ error=error,
1077
+ console=self.console,
1078
+ verbose=options.verbose,
1079
+ ai_agent=options.ai_agent,
1080
+ )
1081
+ else:
1082
+ self.console.print("[green]✅ Package published successfully![/green]")
675
1083
 
676
1084
  def _commit_and_push(self, options: OptionsProtocol) -> None:
677
1085
  if options.commit: