crackerjack 0.31.8__py3-none-any.whl → 0.31.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

@@ -6,6 +6,7 @@ from pathlib import Path
6
6
 
7
7
  from rich.console import Console
8
8
 
9
+ from crackerjack.agents.base import FixResult, Issue, IssueType, Priority
9
10
  from crackerjack.models.protocols import OptionsProtocol
10
11
 
11
12
  from .phase_coordinator import PhaseCoordinator
@@ -233,6 +234,113 @@ class AsyncWorkflowPipeline:
233
234
 
234
235
  return "continue"
235
236
 
237
+ def _parse_issues_for_agents(
238
+ self, test_issues: list[str], hook_issues: list[str]
239
+ ) -> list[Issue]:
240
+ """Parse string issues into structured Issue objects for AI agent processing."""
241
+ structured_issues = []
242
+
243
+ # Parse hook issues using dedicated parsers
244
+ for issue in hook_issues:
245
+ parsed_issue = self._parse_single_hook_issue(issue)
246
+ structured_issues.append(parsed_issue)
247
+
248
+ # Parse test issues using dedicated parser
249
+ for issue in test_issues:
250
+ parsed_issue = self._parse_single_test_issue(issue)
251
+ structured_issues.append(parsed_issue)
252
+
253
+ return structured_issues
254
+
255
+ def _parse_single_hook_issue(self, issue: str) -> Issue:
256
+ """Parse a single hook issue into structured format."""
257
+ from crackerjack.agents.base import IssueType, Priority
258
+
259
+ # Try refurb-specific parsing first
260
+ if "refurb:" in issue and "[FURB" in issue:
261
+ return self._parse_refurb_issue(issue)
262
+
263
+ # Use generic hook issue parsers
264
+ hook_type_mapping = {
265
+ "pyright:": (IssueType.TYPE_ERROR, Priority.HIGH, "pyright"),
266
+ "Type error": (IssueType.TYPE_ERROR, Priority.HIGH, "pyright"),
267
+ "bandit:": (IssueType.SECURITY, Priority.HIGH, "bandit"),
268
+ "vulture:": (IssueType.DEAD_CODE, Priority.MEDIUM, "vulture"),
269
+ "complexipy:": (IssueType.COMPLEXITY, Priority.MEDIUM, "complexipy"),
270
+ }
271
+
272
+ for keyword, (issue_type, priority, stage) in hook_type_mapping.items():
273
+ if keyword in issue:
274
+ return self._create_generic_issue(issue, issue_type, priority, stage)
275
+
276
+ # Default to generic hook issue
277
+ return self._create_generic_issue(
278
+ issue, IssueType.FORMATTING, Priority.MEDIUM, "hook"
279
+ )
280
+
281
+ def _parse_refurb_issue(self, issue: str) -> Issue:
282
+ """Parse refurb-specific issue format."""
283
+ import re
284
+ import uuid
285
+
286
+ from crackerjack.agents.base import Issue, IssueType, Priority
287
+
288
+ match = re.search(r"refurb:\s*(.+?):(\d+):(\d+)\s+\[(\w+)\]:\s*(.+)", issue)
289
+ if match:
290
+ file_path, line_num, _, error_code, message = match.groups()
291
+ return Issue(
292
+ id=str(uuid.uuid4()),
293
+ type=IssueType.FORMATTING,
294
+ severity=Priority.MEDIUM,
295
+ message=f"[{error_code}] {message}",
296
+ file_path=file_path,
297
+ line_number=int(line_num),
298
+ details=[issue],
299
+ stage="refurb",
300
+ )
301
+
302
+ # Fallback to generic parsing if regex fails
303
+ return self._create_generic_issue(
304
+ issue, IssueType.FORMATTING, Priority.MEDIUM, "refurb"
305
+ )
306
+
307
+ def _parse_single_test_issue(self, issue: str) -> Issue:
308
+ """Parse a single test issue into structured format."""
309
+ import uuid
310
+
311
+ from crackerjack.agents.base import Issue, IssueType, Priority
312
+
313
+ if "FAILED" in issue or "ERROR" in issue:
314
+ severity = Priority.HIGH
315
+ else:
316
+ severity = Priority.MEDIUM
317
+
318
+ return Issue(
319
+ id=str(uuid.uuid4()),
320
+ type=IssueType.TEST_FAILURE,
321
+ severity=severity,
322
+ message=issue,
323
+ details=[issue],
324
+ stage="test",
325
+ )
326
+
327
+ def _create_generic_issue(
328
+ self, issue: str, issue_type: IssueType, priority: Priority, stage: str
329
+ ) -> Issue:
330
+ """Create a generic Issue object with standard fields."""
331
+ import uuid
332
+
333
+ from crackerjack.agents.base import Issue
334
+
335
+ return Issue(
336
+ id=str(uuid.uuid4()),
337
+ type=issue_type,
338
+ severity=priority,
339
+ message=issue,
340
+ details=[issue],
341
+ stage=stage,
342
+ )
343
+
236
344
  async def _run_final_workflow_phases(self, options: OptionsProtocol) -> bool:
237
345
  """Run the final publishing and commit phases."""
238
346
  if not self.phases.run_publishing_phase(options):
@@ -263,7 +371,13 @@ class AsyncWorkflowPipeline:
263
371
  if success:
264
372
  return []
265
373
  else:
266
- return ["Test failures detected - see logs for details"]
374
+ # Get specific test failure details from test manager
375
+ test_failures = self.phases.test_manager.get_test_failures()
376
+ if test_failures:
377
+ return [f"Test failure: {failure}" for failure in test_failures]
378
+ else:
379
+ # Fallback if no specific failures captured
380
+ return ["Test failures detected - see logs for details"]
267
381
  except Exception as e:
268
382
  return [f"Test execution error: {e}"]
269
383
 
@@ -272,11 +386,25 @@ class AsyncWorkflowPipeline:
272
386
  ) -> list[str]:
273
387
  """Collect all comprehensive hook issues without stopping on first failure."""
274
388
  try:
275
- success = await self._run_comprehensive_hooks_async(options)
276
- if success:
277
- return []
278
- else:
279
- return ["Comprehensive hook failures detected - see logs for details"]
389
+ # Run hooks and capture detailed results
390
+ hook_results = await asyncio.to_thread(
391
+ self.phases.hook_manager.run_comprehensive_hooks,
392
+ )
393
+
394
+ # Extract specific issues from failed hooks
395
+ all_issues = []
396
+ for result in hook_results:
397
+ if (
398
+ result.status in ("failed", "error", "timeout")
399
+ and result.issues_found
400
+ ):
401
+ # Add hook context to each issue for better AI agent understanding
402
+ hook_context = f"{result.name}: "
403
+ for issue in result.issues_found:
404
+ all_issues.append(hook_context + issue)
405
+
406
+ return all_issues
407
+
280
408
  except Exception as e:
281
409
  return [f"Comprehensive hooks error: {e}"]
282
410
 
@@ -287,7 +415,7 @@ class AsyncWorkflowPipeline:
287
415
  hook_issues: list[str],
288
416
  iteration: int,
289
417
  ) -> bool:
290
- """Apply AI fixes for all collected issues in batch."""
418
+ """Apply AI fixes for all collected issues in batch using AgentCoordinator."""
291
419
  all_issues = test_issues + hook_issues
292
420
  if not all_issues:
293
421
  return True
@@ -296,24 +424,62 @@ class AsyncWorkflowPipeline:
296
424
  f"🔧 Applying AI fixes for {len(all_issues)} issues in iteration {iteration}"
297
425
  )
298
426
 
299
- # This would integrate with the AI agent system to actually apply fixes
300
- # For now, we'll simulate the fixing process
301
427
  try:
302
- # In a real implementation, this would:
303
- # 1. Analyze all collected issues
304
- # 2. Generate fixes using AI agents
305
- # 3. Apply the fixes to source code
306
- # 4. Return success/failure status
428
+ return await self._execute_ai_fix_workflow(
429
+ test_issues, hook_issues, iteration
430
+ )
431
+ except Exception as e:
432
+ return self._handle_ai_fix_error(e)
307
433
 
308
- # Placeholder for actual AI fixing logic
309
- await asyncio.sleep(0.1) # Simulate processing time
434
+ async def _execute_ai_fix_workflow(
435
+ self, test_issues: list[str], hook_issues: list[str], iteration: int
436
+ ) -> bool:
437
+ """Execute the AI fix workflow and return success status."""
438
+ structured_issues = self._parse_issues_for_agents(test_issues, hook_issues)
310
439
 
311
- self.console.print(f"✅ AI fixes applied for iteration {iteration}")
440
+ if not structured_issues:
441
+ self.console.print("⚠️ No actionable issues found for AI agents")
312
442
  return True
313
443
 
314
- except Exception as e:
315
- self.logger.error(f"AI fixing failed: {e}")
316
- return False
444
+ coordinator = self._create_agent_coordinator()
445
+ fix_result = await coordinator.handle_issues(structured_issues)
446
+
447
+ self._report_fix_results(fix_result, iteration)
448
+ return fix_result.success
449
+
450
+ def _create_agent_coordinator(self):
451
+ """Create and configure the AI agent coordinator."""
452
+ from crackerjack.agents.base import AgentContext
453
+ from crackerjack.agents.coordinator import AgentCoordinator
454
+
455
+ context = AgentContext(project_path=self.pkg_path)
456
+ return AgentCoordinator(context)
457
+
458
+ def _report_fix_results(self, fix_result: FixResult, iteration: int) -> None:
459
+ """Report the results of AI fix attempts to the console."""
460
+ if fix_result.success:
461
+ self._report_successful_fixes(fix_result, iteration)
462
+ else:
463
+ self._report_failed_fixes(fix_result, iteration)
464
+
465
+ def _report_successful_fixes(self, fix_result: FixResult, iteration: int) -> None:
466
+ """Report successful AI fixes to the console."""
467
+ self.console.print(f"✅ AI fixes applied successfully in iteration {iteration}")
468
+ if fix_result.fixes_applied:
469
+ self.console.print(f" Applied {len(fix_result.fixes_applied)} fixes")
470
+
471
+ def _report_failed_fixes(self, fix_result: FixResult, iteration: int) -> None:
472
+ """Report failed AI fixes to the console."""
473
+ self.console.print(f"⚠️ Some AI fixes failed in iteration {iteration}")
474
+ if fix_result.remaining_issues:
475
+ for error in fix_result.remaining_issues[:3]: # Show first 3 errors
476
+ self.console.print(f" Error: {error}")
477
+
478
+ def _handle_ai_fix_error(self, error: Exception) -> bool:
479
+ """Handle errors during AI fix execution."""
480
+ self.logger.error(f"AI fixing failed: {error}")
481
+ self.console.print(f"❌ AI agent system error: {error}")
482
+ return False
317
483
 
318
484
 
319
485
  class AsyncWorkflowOrchestrator:
@@ -62,74 +62,113 @@ class WorkflowPipeline:
62
62
  skip_hooks=getattr(options, "skip_hooks", False),
63
63
  ):
64
64
  start_time = time.time()
65
- self.session.initialize_session_tracking(options)
66
- self.session.track_task("workflow", "Complete crackerjack workflow")
67
-
68
- if self._should_debug():
69
- self.debugger.log_workflow_phase(
70
- "workflow_execution",
71
- "started",
72
- details={
73
- "testing": getattr(options, "testing", False),
74
- "skip_hooks": getattr(options, "skip_hooks", False),
75
- "ai_agent": getattr(options, "ai_agent", False),
76
- },
77
- )
78
-
79
- if hasattr(options, "cleanup"):
80
- self.session.set_cleanup_config(options.cleanup)
81
-
82
- self.logger.info(
83
- "Starting complete workflow execution",
84
- testing=getattr(options, "testing", False),
85
- skip_hooks=getattr(options, "skip_hooks", False),
86
- package_path=str(self.pkg_path),
87
- )
65
+ self._initialize_workflow_session(options)
88
66
 
89
67
  try:
90
- success = await self._execute_workflow_phases(options)
91
- self.session.finalize_session(start_time, success)
92
-
93
- duration = time.time() - start_time
94
- self.logger.info(
95
- "Workflow execution completed",
96
- success=success,
97
- duration_seconds=round(duration, 2),
98
- )
99
-
100
- if self._should_debug():
101
- # Set final workflow success status
102
- self.debugger.set_workflow_success(success)
103
-
104
- self.debugger.log_workflow_phase(
105
- "workflow_execution",
106
- "completed" if success else "failed",
107
- duration=duration,
108
- )
109
- if self.debugger.enabled:
110
- self.debugger.print_debug_summary()
111
-
68
+ success = await self._execute_workflow_with_timing(options, start_time)
112
69
  return success
113
70
 
114
71
  except KeyboardInterrupt:
115
- self.console.print("Interrupted by user")
116
- self.session.fail_task("workflow", "Interrupted by user")
117
- self.logger.warning("Workflow interrupted by user")
118
- return False
72
+ return self._handle_user_interruption()
119
73
 
120
74
  except Exception as e:
121
- self.console.print(f"Error: {e}")
122
- self.session.fail_task("workflow", f"Unexpected error: {e}")
123
- self.logger.exception(
124
- "Workflow execution failed",
125
- error=str(e),
126
- error_type=type(e).__name__,
127
- )
128
- return False
75
+ return self._handle_workflow_exception(e)
129
76
 
130
77
  finally:
131
78
  self.session.cleanup_resources()
132
79
 
80
+ def _initialize_workflow_session(self, options: OptionsProtocol) -> None:
81
+ """Initialize session tracking and debug logging for workflow execution."""
82
+ self.session.initialize_session_tracking(options)
83
+ self.session.track_task("workflow", "Complete crackerjack workflow")
84
+
85
+ self._log_workflow_startup_debug(options)
86
+ self._configure_session_cleanup(options)
87
+ self._log_workflow_startup_info(options)
88
+
89
+ def _log_workflow_startup_debug(self, options: OptionsProtocol) -> None:
90
+ """Log debug information for workflow startup."""
91
+ if not self._should_debug():
92
+ return
93
+
94
+ self.debugger.log_workflow_phase(
95
+ "workflow_execution",
96
+ "started",
97
+ details={
98
+ "testing": getattr(options, "testing", False),
99
+ "skip_hooks": getattr(options, "skip_hooks", False),
100
+ "ai_agent": getattr(options, "ai_agent", False),
101
+ },
102
+ )
103
+
104
+ def _configure_session_cleanup(self, options: OptionsProtocol) -> None:
105
+ """Configure session cleanup settings if specified."""
106
+ if hasattr(options, "cleanup"):
107
+ self.session.set_cleanup_config(options.cleanup)
108
+
109
+ def _log_workflow_startup_info(self, options: OptionsProtocol) -> None:
110
+ """Log informational message about workflow startup."""
111
+ self.logger.info(
112
+ "Starting complete workflow execution",
113
+ testing=getattr(options, "testing", False),
114
+ skip_hooks=getattr(options, "skip_hooks", False),
115
+ package_path=str(self.pkg_path),
116
+ )
117
+
118
+ async def _execute_workflow_with_timing(
119
+ self, options: OptionsProtocol, start_time: float
120
+ ) -> bool:
121
+ """Execute workflow phases and handle success/completion logging."""
122
+ success = await self._execute_workflow_phases(options)
123
+ self.session.finalize_session(start_time, success)
124
+
125
+ duration = time.time() - start_time
126
+ self._log_workflow_completion(success, duration)
127
+ self._log_workflow_completion_debug(success, duration)
128
+
129
+ return success
130
+
131
+ def _log_workflow_completion(self, success: bool, duration: float) -> None:
132
+ """Log workflow completion information."""
133
+ self.logger.info(
134
+ "Workflow execution completed",
135
+ success=success,
136
+ duration_seconds=round(duration, 2),
137
+ )
138
+
139
+ def _log_workflow_completion_debug(self, success: bool, duration: float) -> None:
140
+ """Log debug information for workflow completion."""
141
+ if not self._should_debug():
142
+ return
143
+
144
+ self.debugger.set_workflow_success(success)
145
+ self.debugger.log_workflow_phase(
146
+ "workflow_execution",
147
+ "completed" if success else "failed",
148
+ duration=duration,
149
+ )
150
+
151
+ if self.debugger.enabled:
152
+ self.debugger.print_debug_summary()
153
+
154
+ def _handle_user_interruption(self) -> bool:
155
+ """Handle KeyboardInterrupt gracefully."""
156
+ self.console.print("Interrupted by user")
157
+ self.session.fail_task("workflow", "Interrupted by user")
158
+ self.logger.warning("Workflow interrupted by user")
159
+ return False
160
+
161
+ def _handle_workflow_exception(self, error: Exception) -> bool:
162
+ """Handle unexpected workflow exceptions."""
163
+ self.console.print(f"Error: {error}")
164
+ self.session.fail_task("workflow", f"Unexpected error: {error}")
165
+ self.logger.exception(
166
+ "Workflow execution failed",
167
+ error=str(error),
168
+ error_type=type(error).__name__,
169
+ )
170
+ return False
171
+
133
172
  async def _execute_workflow_phases(self, options: OptionsProtocol) -> bool:
134
173
  success = True
135
174
  self.phases.run_configuration_phase(options)
@@ -560,6 +599,80 @@ class WorkflowPipeline:
560
599
  issues: list[Issue] = []
561
600
  hook_count = 0
562
601
 
602
+ try:
603
+ hook_results = self.phases.hook_manager.run_comprehensive_hooks()
604
+ issues, hook_count = self._process_hook_results(hook_results)
605
+ except Exception:
606
+ issues, hook_count = self._fallback_to_session_tracker()
607
+
608
+ return issues, hook_count
609
+
610
+ def _process_hook_results(self, hook_results: t.Any) -> tuple[list[Issue], int]:
611
+ """Process hook results and extract issues."""
612
+ issues: list[Issue] = []
613
+ hook_count = 0
614
+
615
+ for result in hook_results:
616
+ if not self._is_hook_result_failed(result):
617
+ continue
618
+
619
+ hook_count += 1
620
+ result_issues = self._extract_issues_from_hook_result(result)
621
+ issues.extend(result_issues)
622
+
623
+ return issues, hook_count
624
+
625
+ def _is_hook_result_failed(self, result: t.Any) -> bool:
626
+ """Check if hook result indicates failure."""
627
+ return result.status in ("failed", "error", "timeout")
628
+
629
+ def _extract_issues_from_hook_result(self, result: t.Any) -> list[Issue]:
630
+ """Extract issues from a single hook result."""
631
+ if result.issues_found:
632
+ return self._create_specific_issues_from_hook_result(result)
633
+ return [self._create_generic_issue_from_hook_result(result)]
634
+
635
+ def _create_specific_issues_from_hook_result(self, result: t.Any) -> list[Issue]:
636
+ """Create specific issues from hook result with detailed information."""
637
+ issues: list[Issue] = []
638
+ hook_context = f"{result.name}: "
639
+
640
+ for issue_text in result.issues_found:
641
+ parsed_issues = self._parse_issues_for_agents([hook_context + issue_text])
642
+ issues.extend(parsed_issues)
643
+
644
+ return issues
645
+
646
+ def _create_generic_issue_from_hook_result(self, result: t.Any) -> Issue:
647
+ """Create a generic issue for hook failure without specific details."""
648
+ issue_type = self._determine_hook_issue_type(result.name)
649
+ return Issue(
650
+ id=f"hook_failure_{result.name}",
651
+ type=issue_type,
652
+ severity=Priority.MEDIUM,
653
+ message=f"Hook {result.name} failed with no specific details",
654
+ stage="comprehensive",
655
+ )
656
+
657
+ def _determine_hook_issue_type(self, hook_name: str) -> IssueType:
658
+ """Determine issue type based on hook name."""
659
+ formatting_hooks = {
660
+ "trailing-whitespace",
661
+ "end-of-file-fixer",
662
+ "ruff-format",
663
+ "ruff-check",
664
+ }
665
+ return (
666
+ IssueType.FORMATTING
667
+ if hook_name in formatting_hooks
668
+ else IssueType.TYPE_ERROR
669
+ )
670
+
671
+ def _fallback_to_session_tracker(self) -> tuple[list[Issue], int]:
672
+ """Fallback to session tracker if hook manager fails."""
673
+ issues: list[Issue] = []
674
+ hook_count = 0
675
+
563
676
  if not self.session.session_tracker:
564
677
  return issues, hook_count
565
678
 
@@ -679,6 +792,61 @@ class WorkflowPipeline:
679
792
 
680
793
  return issues
681
794
 
795
+ def _parse_issues_for_agents(self, issue_strings: list[str]) -> list[Issue]:
796
+ """Parse string issues into structured Issue objects for AI agents."""
797
+ issues: list[Issue] = []
798
+
799
+ for i, issue_str in enumerate(issue_strings):
800
+ # Determine issue type from content patterns
801
+ issue_type = IssueType.FORMATTING
802
+ priority = Priority.MEDIUM
803
+
804
+ if any(
805
+ keyword in issue_str.lower()
806
+ for keyword in ("type", "annotation", "pyright")
807
+ ):
808
+ issue_type = IssueType.TYPE_ERROR
809
+ priority = Priority.HIGH
810
+ elif any(
811
+ keyword in issue_str.lower()
812
+ for keyword in ("security", "bandit", "hardcoded")
813
+ ):
814
+ issue_type = IssueType.SECURITY
815
+ priority = Priority.HIGH
816
+ elif any(
817
+ keyword in issue_str.lower() for keyword in ("complexity", "complexipy")
818
+ ):
819
+ issue_type = IssueType.COMPLEXITY
820
+ priority = Priority.HIGH
821
+ elif any(
822
+ keyword in issue_str.lower()
823
+ for keyword in ("unused", "dead", "vulture")
824
+ ):
825
+ issue_type = IssueType.DEAD_CODE
826
+ priority = Priority.MEDIUM
827
+ elif any(
828
+ keyword in issue_str.lower()
829
+ for keyword in ("performance", "refurb", "furb")
830
+ ):
831
+ issue_type = IssueType.PERFORMANCE
832
+ priority = Priority.MEDIUM
833
+ elif any(
834
+ keyword in issue_str.lower() for keyword in ("import", "creosote")
835
+ ):
836
+ issue_type = IssueType.IMPORT_ERROR
837
+ priority = Priority.MEDIUM
838
+
839
+ issue = Issue(
840
+ id=f"parsed_issue_{i}",
841
+ type=issue_type,
842
+ severity=priority,
843
+ message=issue_str.strip(),
844
+ stage="comprehensive",
845
+ )
846
+ issues.append(issue)
847
+
848
+ return issues
849
+
682
850
  def _log_failure_counts_if_debugging(
683
851
  self, test_count: int, hook_count: int
684
852
  ) -> None:
@@ -154,58 +154,106 @@ class HookExecutor:
154
154
  start_time = time.time()
155
155
 
156
156
  try:
157
- clean_env = self._get_clean_environment()
158
- result = subprocess.run(
159
- hook.get_command(),
160
- check=False,
161
- cwd=self.pkg_path,
162
- text=True,
163
- timeout=hook.timeout,
164
- env=clean_env,
165
- capture_output=True,
166
- )
167
-
157
+ result = self._run_hook_subprocess(hook)
168
158
  duration = time.time() - start_time
169
159
 
170
- # Only display raw output in verbose mode, and only for failed hooks
171
- if result.returncode != 0 and self.verbose:
172
- if result.stdout:
173
- self.console.print(result.stdout)
174
- if result.stderr:
175
- self.console.print(result.stderr)
176
-
177
- return HookResult(
178
- id=hook.name,
179
- name=hook.name,
180
- status="passed" if result.returncode == 0 else "failed",
181
- duration=duration,
182
- files_processed=0,
183
- issues_found=[]
184
- if result.returncode == 0
185
- else [f"Hook failed with code {result.returncode}"],
186
- stage=hook.stage.value,
187
- )
160
+ self._display_hook_output_if_needed(result)
161
+ return self._create_hook_result_from_process(hook, result, duration)
188
162
 
189
163
  except subprocess.TimeoutExpired:
190
- duration = time.time() - start_time
191
- return HookResult(
192
- id=hook.name,
193
- name=hook.name,
194
- status="timeout",
195
- duration=duration,
196
- issues_found=[f"Hook timed out after {duration:.1f}s"],
197
- stage=hook.stage.value,
198
- )
164
+ return self._create_timeout_result(hook, start_time)
165
+
199
166
  except Exception as e:
200
- duration = time.time() - start_time
201
- return HookResult(
202
- id=hook.name,
203
- name=hook.name,
204
- status="error",
205
- duration=duration,
206
- issues_found=[str(e)],
207
- stage=hook.stage.value,
208
- )
167
+ return self._create_error_result(hook, start_time, e)
168
+
169
+ def _run_hook_subprocess(
170
+ self, hook: HookDefinition
171
+ ) -> subprocess.CompletedProcess[str]:
172
+ """Execute hook subprocess with clean environment."""
173
+ clean_env = self._get_clean_environment()
174
+ return subprocess.run(
175
+ hook.get_command(),
176
+ check=False,
177
+ cwd=self.pkg_path,
178
+ text=True,
179
+ timeout=hook.timeout,
180
+ env=clean_env,
181
+ capture_output=True,
182
+ )
183
+
184
+ def _display_hook_output_if_needed(
185
+ self, result: subprocess.CompletedProcess[str]
186
+ ) -> None:
187
+ """Display hook output in verbose mode for failed hooks."""
188
+ if result.returncode == 0 or not self.verbose:
189
+ return
190
+
191
+ if result.stdout:
192
+ self.console.print(result.stdout)
193
+ if result.stderr:
194
+ self.console.print(result.stderr)
195
+
196
+ def _create_hook_result_from_process(
197
+ self,
198
+ hook: HookDefinition,
199
+ result: subprocess.CompletedProcess[str],
200
+ duration: float,
201
+ ) -> HookResult:
202
+ """Create HookResult from successful subprocess execution."""
203
+ status = "passed" if result.returncode == 0 else "failed"
204
+ issues_found = self._extract_issues_from_process_output(result)
205
+
206
+ return HookResult(
207
+ id=hook.name,
208
+ name=hook.name,
209
+ status=status,
210
+ duration=duration,
211
+ files_processed=0,
212
+ issues_found=issues_found,
213
+ stage=hook.stage.value,
214
+ )
215
+
216
+ def _extract_issues_from_process_output(
217
+ self, result: subprocess.CompletedProcess[str]
218
+ ) -> list[str]:
219
+ """Extract specific issues from subprocess output for failed hooks."""
220
+ if result.returncode == 0:
221
+ return []
222
+
223
+ error_output = (result.stdout + result.stderr).strip()
224
+ if error_output:
225
+ return [line.strip() for line in error_output.split("\n") if line.strip()]
226
+
227
+ # Fallback to generic message if no output captured
228
+ return [f"Hook failed with code {result.returncode}"]
229
+
230
+ def _create_timeout_result(
231
+ self, hook: HookDefinition, start_time: float
232
+ ) -> HookResult:
233
+ """Create HookResult for timeout scenarios."""
234
+ duration = time.time() - start_time
235
+ return HookResult(
236
+ id=hook.name,
237
+ name=hook.name,
238
+ status="timeout",
239
+ duration=duration,
240
+ issues_found=[f"Hook timed out after {duration:.1f}s"],
241
+ stage=hook.stage.value,
242
+ )
243
+
244
+ def _create_error_result(
245
+ self, hook: HookDefinition, start_time: float, error: Exception
246
+ ) -> HookResult:
247
+ """Create HookResult for general exception scenarios."""
248
+ duration = time.time() - start_time
249
+ return HookResult(
250
+ id=hook.name,
251
+ name=hook.name,
252
+ status="error",
253
+ duration=duration,
254
+ issues_found=[str(error)],
255
+ stage=hook.stage.value,
256
+ )
209
257
 
210
258
  def _parse_hook_output(
211
259
  self,
@@ -342,19 +342,20 @@ async def _execute_single_iteration(
342
342
  """Execute a single workflow iteration."""
343
343
  try:
344
344
  # Check for orchestrator workflow methods
345
- if hasattr(orchestrator, "run_complete_workflow"):
346
- # Standard WorkflowOrchestrator method is async
347
- result = orchestrator.run_complete_workflow(options)
345
+ if hasattr(orchestrator, "run_complete_workflow_async"):
346
+ # AsyncWorkflowOrchestrator - method returns awaitable
347
+ result = orchestrator.run_complete_workflow_async(options)
348
348
  if result is None:
349
349
  raise ValueError(
350
- "Method run_complete_workflow returned None instead of awaitable"
350
+ "Method run_complete_workflow_async returned None instead of awaitable"
351
351
  )
352
352
  return await result
353
- elif hasattr(orchestrator, "run_complete_workflow_async"):
354
- result = orchestrator.run_complete_workflow_async(options)
353
+ elif hasattr(orchestrator, "run_complete_workflow"):
354
+ # Standard WorkflowOrchestrator - method is async and returns awaitable boolean
355
+ result = orchestrator.run_complete_workflow(options)
355
356
  if result is None:
356
357
  raise ValueError(
357
- "Method run_complete_workflow_async returned None instead of awaitable"
358
+ "Method run_complete_workflow returned None instead of awaitable"
358
359
  )
359
360
  return await result
360
361
  elif hasattr(orchestrator, "execute_workflow"):
@@ -84,22 +84,58 @@ class GitService:
84
84
  self.console.print(f"[green]✅[/green] Committed: {message}")
85
85
  return True
86
86
 
87
- # When git commit fails due to pre-commit hooks, stderr contains hook output
88
- # Use a more appropriate error message for commit failures
89
- if "pre-commit" in result.stderr or "hook" in result.stderr.lower():
90
- self.console.print("[red]❌[/red] Commit blocked by pre-commit hooks")
91
- if result.stderr.strip():
92
- # Show hook output in a more readable way
93
- self.console.print(
94
- f"[yellow]Hook output:[/yellow]\n{result.stderr.strip()}"
95
- )
96
- else:
97
- self.console.print(f"[red]❌[/red] Commit failed: {result.stderr}")
98
- return False
87
+ return self._handle_commit_failure(result, message)
99
88
  except Exception as e:
100
89
  self.console.print(f"[red]❌[/red] Error committing: {e}")
101
90
  return False
102
91
 
92
+ def _handle_commit_failure(
93
+ self, result: subprocess.CompletedProcess[str], message: str
94
+ ) -> bool:
95
+ # Check if pre-commit hooks modified files and need re-staging
96
+ if "files were modified by this hook" in result.stderr:
97
+ return self._retry_commit_after_restage(message)
98
+
99
+ return self._handle_hook_error(result)
100
+
101
+ def _retry_commit_after_restage(self, message: str) -> bool:
102
+ self.console.print(
103
+ "[yellow]🔄[/yellow] Pre-commit hooks modified files - attempting to re-stage and retry commit"
104
+ )
105
+
106
+ # Re-stage all modified files
107
+ add_result = self._run_git_command(["add", "-u"])
108
+ if add_result.returncode != 0:
109
+ self.console.print(
110
+ f"[red]❌[/red] Failed to re-stage files: {add_result.stderr}"
111
+ )
112
+ return False
113
+
114
+ # Retry the commit
115
+ retry_result = self._run_git_command(["commit", "-m", message])
116
+ if retry_result.returncode == 0:
117
+ self.console.print(
118
+ f"[green]✅[/green] Committed after re-staging: {message}"
119
+ )
120
+ return True
121
+
122
+ self.console.print(
123
+ f"[red]❌[/red] Commit failed on retry: {retry_result.stderr}"
124
+ )
125
+ return False
126
+
127
+ def _handle_hook_error(self, result: subprocess.CompletedProcess[str]) -> bool:
128
+ # When git commit fails due to pre-commit hooks, stderr contains hook output
129
+ if "pre-commit" in result.stderr or "hook" in result.stderr.lower():
130
+ self.console.print("[red]❌[/red] Commit blocked by pre-commit hooks")
131
+ if result.stderr.strip():
132
+ self.console.print(
133
+ f"[yellow]Hook output:[/yellow]\n{result.stderr.strip()}"
134
+ )
135
+ else:
136
+ self.console.print(f"[red]❌[/red] Commit failed: {result.stderr}")
137
+ return False
138
+
103
139
  def push(self) -> bool:
104
140
  try:
105
141
  result = self._run_git_command(["push"])
@@ -543,6 +543,7 @@ python -m crackerjack -a patch
543
543
  self._smart_merge_pre_commit_config(
544
544
  source_file,
545
545
  target_file,
546
+ project_name,
546
547
  force,
547
548
  results,
548
549
  )
@@ -584,7 +585,7 @@ python -m crackerjack -a patch
584
585
  self._ensure_crackerjack_dev_dependency(target_config, source_config)
585
586
 
586
587
  # 2. Merge tool configurations
587
- self._merge_tool_configurations(target_config, source_config)
588
+ self._merge_tool_configurations(target_config, source_config, project_name)
588
589
 
589
590
  # 3. Remove any fixed coverage requirements (use ratchet system instead)
590
591
  self._remove_fixed_coverage_requirements(target_config)
@@ -638,6 +639,7 @@ python -m crackerjack -a patch
638
639
  self,
639
640
  target_config: dict[str, t.Any],
640
641
  source_config: dict[str, t.Any],
642
+ project_name: str,
641
643
  ) -> None:
642
644
  """Merge tool configurations, preserving existing settings."""
643
645
  source_tools = source_config.get("tool", {})
@@ -662,8 +664,10 @@ python -m crackerjack -a patch
662
664
  for tool_name in tools_to_merge:
663
665
  if tool_name in source_tools:
664
666
  if tool_name not in target_tools:
665
- # Tool missing, add it
666
- target_tools[tool_name] = source_tools[tool_name]
667
+ # Tool missing, add it with project-name replacement
668
+ target_tools[tool_name] = self._replace_project_name_in_tool_config(
669
+ source_tools[tool_name], project_name
670
+ )
667
671
  self.console.print(
668
672
  f"[green]➕[/green] Added [tool.{tool_name}] configuration",
669
673
  )
@@ -673,6 +677,7 @@ python -m crackerjack -a patch
673
677
  target_tools[tool_name],
674
678
  source_tools[tool_name],
675
679
  tool_name,
680
+ project_name,
676
681
  )
677
682
 
678
683
  # Special handling for pytest.ini_options markers
@@ -683,13 +688,16 @@ python -m crackerjack -a patch
683
688
  target_tool: dict[str, t.Any],
684
689
  source_tool: dict[str, t.Any],
685
690
  tool_name: str,
691
+ project_name: str,
686
692
  ) -> None:
687
693
  """Merge individual tool settings."""
688
694
  updated_keys = []
689
695
 
690
696
  for key, value in source_tool.items():
691
697
  if key not in target_tool:
692
- target_tool[key] = value
698
+ target_tool[key] = self._replace_project_name_in_config_value(
699
+ value, project_name
700
+ )
693
701
  updated_keys.append(key)
694
702
 
695
703
  if updated_keys:
@@ -779,6 +787,7 @@ python -m crackerjack -a patch
779
787
  self,
780
788
  source_file: Path,
781
789
  target_file: Path,
790
+ project_name: str,
782
791
  force: bool,
783
792
  results: dict[str, t.Any],
784
793
  ) -> None:
@@ -788,8 +797,12 @@ python -m crackerjack -a patch
788
797
  source_config = yaml.safe_load(f)
789
798
 
790
799
  if not target_file.exists():
791
- # No existing file, just copy with proper formatting
792
- content = source_file.read_text()
800
+ # No existing file, copy with project-specific replacements
801
+ content = self._read_and_process_content(
802
+ source_file,
803
+ True, # should_replace
804
+ project_name,
805
+ )
793
806
  # Clean trailing whitespace and ensure single trailing newline
794
807
  from crackerjack.services.filesystem import FileSystemService
795
808
 
@@ -871,3 +884,39 @@ python -m crackerjack -a patch
871
884
  )
872
885
  else:
873
886
  self._skip_existing_file(".pre-commit-config.yaml (no new repos)", results)
887
+
888
+ def _replace_project_name_in_tool_config(
889
+ self, tool_config: dict[str, t.Any], project_name: str
890
+ ) -> dict[str, t.Any]:
891
+ """Replace project name in entire tool configuration."""
892
+ if project_name == "crackerjack":
893
+ return tool_config # No replacement needed
894
+
895
+ # Deep copy to avoid modifying original
896
+ import copy
897
+
898
+ result = copy.deepcopy(tool_config)
899
+
900
+ # Recursively replace in the configuration
901
+ return self._replace_project_name_in_config_value(result, project_name)
902
+
903
+ def _replace_project_name_in_config_value(
904
+ self, value: t.Any, project_name: str
905
+ ) -> t.Any:
906
+ """Replace project name in a configuration value (recursive)."""
907
+ if project_name == "crackerjack":
908
+ return value # No replacement needed
909
+
910
+ if isinstance(value, str):
911
+ return value.replace("crackerjack", project_name)
912
+ elif isinstance(value, list):
913
+ return [
914
+ self._replace_project_name_in_config_value(item, project_name)
915
+ for item in value
916
+ ]
917
+ elif isinstance(value, dict):
918
+ return {
919
+ key: self._replace_project_name_in_config_value(val, project_name)
920
+ for key, val in value.items()
921
+ }
922
+ return value # Numbers, booleans, etc. - no replacement needed
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: crackerjack
3
- Version: 0.31.8
3
+ Version: 0.31.9
4
4
  Summary: Opinionated Python project management tool
5
5
  Project-URL: documentation, https://github.com/lesleslie/crackerjack
6
6
  Project-URL: homepage, https://github.com/lesleslie/crackerjack
@@ -32,7 +32,7 @@ crackerjack/cli/utils.py,sha256=VHR-PALgA8fsKiPytH0cl8feXrtWKCajjk9TS2piK5w,537
32
32
  crackerjack/config/__init__.py,sha256=b0481N2f_JvGufMPcbo5IXu2VjYd111r1BHw0oD3x7o,330
33
33
  crackerjack/config/hooks.py,sha256=6DHJkWRL5c5Ip2bw0tevRt_xzRFKSfeO7tQkGtoZtjs,5367
34
34
  crackerjack/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
35
- crackerjack/core/async_workflow_orchestrator.py,sha256=XCy_5L2xTRZaGHjO_ysuMV7WuMgRgLsSXiqBJO_nyXE,15069
35
+ crackerjack/core/async_workflow_orchestrator.py,sha256=r7E-F6hv5MvhcYFB2VB1HgBaOgDJZJXIGN9DCXk4pmo,21660
36
36
  crackerjack/core/autofix_coordinator.py,sha256=txbXTeGm-gJwuyRekVDbvvKgG0gQ1GqB3BeGdQ8vqpY,7921
37
37
  crackerjack/core/container.py,sha256=e9_1YnWHijUJ0yl23lpgf9mennVQy8NkgJBKtZstG-M,2889
38
38
  crackerjack/core/enhanced_container.py,sha256=fl5XvhNY0fzDnD5hSr16yQXGtL_AW01Wf4F4PL1L0P4,18169
@@ -40,11 +40,11 @@ crackerjack/core/performance.py,sha256=sL9g2-_JflofnsXW6LUCwp9MaUDQprfV8lSG18s9m
40
40
  crackerjack/core/phase_coordinator.py,sha256=iWPVB3A5CFfHwqc3wFXGEQwBhzaa-VlGsvhvRid5FKE,21236
41
41
  crackerjack/core/proactive_workflow.py,sha256=ML1amNJI4Gx0dFJK5AKdvB0zNc1chbq-ZyqnhUi4tms,12677
42
42
  crackerjack/core/session_coordinator.py,sha256=hJKLthZBzX7fXm8AmNMFLEjITNmKxDGqM58Om6p7fr0,9893
43
- crackerjack/core/workflow_orchestrator.py,sha256=eQl4y5ni0ajVuwtZf4z5cmsi10aTs-k7nVcEBgDHNEo,31412
43
+ crackerjack/core/workflow_orchestrator.py,sha256=YfqnVartb7qIHEF5qRpy_Fw1ViyICfBf2r0Le9UCiIM,37749
44
44
  crackerjack/executors/__init__.py,sha256=HF-DmXvKN45uKKDdiMxOT9bYxuy1B-Z91BihOhkK5lg,322
45
45
  crackerjack/executors/async_hook_executor.py,sha256=3U-AHToGNBojnlDsXK6HLv4CfJvv64UqTmCWYAoLcb8,15958
46
46
  crackerjack/executors/cached_hook_executor.py,sha256=LyrFINWbixB-0xEnaU0F2ZUBFUWrAdaTKvj_JW1Wss0,8186
47
- crackerjack/executors/hook_executor.py,sha256=uZ78CSaYXBRKOyHVlT9w_YW4u09vRxI1zHfWohkumtI,12020
47
+ crackerjack/executors/hook_executor.py,sha256=chdZg_DdPRq4q8P0Ed7v2L5q2X6Z9HVZsXfwVf0oj1I,13608
48
48
  crackerjack/executors/individual_hook_executor.py,sha256=Fm58XlKtGAzTvkvFIOv7ILmN01QQy6QQeScKZ84mgFw,23526
49
49
  crackerjack/intelligence/__init__.py,sha256=v5ovNZ9jLENYp4sizRINqxLw63b23lvzg6bGk8d0s-M,1106
50
50
  crackerjack/intelligence/adaptive_learning.py,sha256=VvOAYAEyuKvwSwhSOikuVgGlwSpOzvtaJFIkJuemQtQ,28687
@@ -88,7 +88,7 @@ crackerjack/mcp/tools/monitoring_tools.py,sha256=tcMIc0-qj35QbQp-5fNM47tnyLmjieY
88
88
  crackerjack/mcp/tools/proactive_tools.py,sha256=0ax3clOEJ9yoHTTnvE3IFoutjoDZIpvFgIBowL6ANfU,14067
89
89
  crackerjack/mcp/tools/progress_tools.py,sha256=l9MzVRUxO19FNpHn67zWbhyUeXyJOVxlnAQLJiVJBIg,6822
90
90
  crackerjack/mcp/tools/utility_tools.py,sha256=MIv1CGR8vLeQEdKzR8xsNYj5A9TG_LXWp86drdKReXY,11849
91
- crackerjack/mcp/tools/workflow_executor.py,sha256=_bA8lNV5ywxPVmSM_PY4NzdTUY3FH9ddHnPkrrFqksU,17756
91
+ crackerjack/mcp/tools/workflow_executor.py,sha256=JQSWmhxQBL4p7AwHUcBYBfbCBEGFO7OSyQFAKYuD0uM,17855
92
92
  crackerjack/mcp/websocket/__init__.py,sha256=lZzyfvYjywHfqyy5X3wWR_jgBkRUxYSpgjdKALBzZcI,345
93
93
  crackerjack/mcp/websocket/app.py,sha256=AXijAplfW-8NwWDrOON30Ol5qcMKdc64npTY2YEkX8s,1193
94
94
  crackerjack/mcp/websocket/endpoints.py,sha256=L5zZzqhZtLFKF-Eh928jnwQIAIwunBSMrIaBoyaOaAE,20888
@@ -123,9 +123,9 @@ crackerjack/services/dependency_monitor.py,sha256=axBXFGBdezoPK9ph5_ZGxIwhSJhurg
123
123
  crackerjack/services/enhanced_filesystem.py,sha256=MQj5zqvkNc5U6ZUmSVzgFQWKfnizD1lv4SJ2pt-w8W4,15424
124
124
  crackerjack/services/file_hasher.py,sha256=vHSJ6QbWU5Q5JPLYuQkyRMRXCpDC_hsxToaM83vI58U,5201
125
125
  crackerjack/services/filesystem.py,sha256=Re5VyP7H8W6T2tpDakoaghEivdR2VmshJgnZ9Y3QkH8,17932
126
- crackerjack/services/git.py,sha256=uR5uNnyW9mkOz_fRzJkhLQWErEV4ATtJLhIeX4MYMFE,7105
126
+ crackerjack/services/git.py,sha256=Jth_GiufRkEkUACUyOXELVx92CUXoPAdW5_6G-Mr8Sc,8366
127
127
  crackerjack/services/health_metrics.py,sha256=M2OBqwwnGvnJB3eXIXXh5SgMuckYCjHIrD0RkYFAbQU,21458
128
- crackerjack/services/initialization.py,sha256=iNXGn1OEVKfGNJAJXdqjcqGKvvXFKdaVfrDMnyTBAtw,31476
128
+ crackerjack/services/initialization.py,sha256=xcMqZmIi6pKufwYksJI17Tl7bZ7X7tOLqzNW7cvVZhg,33375
129
129
  crackerjack/services/log_manager.py,sha256=deM_i97biZVyuZJoHaGlnBitc5QV4WaaZHEb70N5LV0,8388
130
130
  crackerjack/services/logging.py,sha256=c15gVCLR_yRhqaza7f1pLLYL-xQ3Oi_OMWL_mR5G46k,5354
131
131
  crackerjack/services/metrics.py,sha256=kInkb2G0ML8hAtmEG1jK04b-F1hT_fZjHvYJKisyr1Y,22894
@@ -142,8 +142,8 @@ crackerjack/slash_commands/__init__.py,sha256=ZHfKjluj9dX88zDYN6Saj7tGUMdMnh37Q8
142
142
  crackerjack/slash_commands/init.md,sha256=mANRdCiFAzaTw29lKNrI1JFthK4pxVdtiFC5lN2SDSQ,4581
143
143
  crackerjack/slash_commands/run.md,sha256=bf_mEtnXagUuw3w8os5h3t1Yi3vjpfiNbkMJvuFEu-Y,6500
144
144
  crackerjack/slash_commands/status.md,sha256=U3qqppVLtIIm2lEiMYaKagaHYLI9UplL7OH1j6SRJGw,3921
145
- crackerjack-0.31.8.dist-info/METADATA,sha256=IoaiWGfqr7QWW5nPAVCq0taM0qHya4yh8UU4ptfHLso,22437
146
- crackerjack-0.31.8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
147
- crackerjack-0.31.8.dist-info/entry_points.txt,sha256=AJKNft0WXm9xoGUJ3Trl-iXHOWxRAYbagQiza3AILr4,57
148
- crackerjack-0.31.8.dist-info/licenses/LICENSE,sha256=fDt371P6_6sCu7RyqiZH_AhT1LdN3sN1zjBtqEhDYCk,1531
149
- crackerjack-0.31.8.dist-info/RECORD,,
145
+ crackerjack-0.31.9.dist-info/METADATA,sha256=Y15hY2DSOtPmmEzByI4qpJBfAjPAg2GdnGxCj8SoEvw,22437
146
+ crackerjack-0.31.9.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
147
+ crackerjack-0.31.9.dist-info/entry_points.txt,sha256=AJKNft0WXm9xoGUJ3Trl-iXHOWxRAYbagQiza3AILr4,57
148
+ crackerjack-0.31.9.dist-info/licenses/LICENSE,sha256=fDt371P6_6sCu7RyqiZH_AhT1LdN3sN1zjBtqEhDYCk,1531
149
+ crackerjack-0.31.9.dist-info/RECORD,,