crackerjack 0.31.8__py3-none-any.whl → 0.31.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crackerjack might be problematic. Click here for more details.
- crackerjack/core/async_workflow_orchestrator.py +186 -20
- crackerjack/core/phase_coordinator.py +1 -1
- crackerjack/core/workflow_orchestrator.py +242 -57
- crackerjack/executors/hook_executor.py +95 -47
- crackerjack/managers/test_manager.py +41 -5
- crackerjack/mcp/tools/workflow_executor.py +8 -7
- crackerjack/services/coverage_ratchet.py +9 -5
- crackerjack/services/git.py +60 -14
- crackerjack/services/initialization.py +64 -15
- {crackerjack-0.31.8.dist-info → crackerjack-0.31.10.dist-info}/METADATA +1 -1
- {crackerjack-0.31.8.dist-info → crackerjack-0.31.10.dist-info}/RECORD +14 -14
- {crackerjack-0.31.8.dist-info → crackerjack-0.31.10.dist-info}/WHEEL +0 -0
- {crackerjack-0.31.8.dist-info → crackerjack-0.31.10.dist-info}/entry_points.txt +0 -0
- {crackerjack-0.31.8.dist-info → crackerjack-0.31.10.dist-info}/licenses/LICENSE +0 -0
|
@@ -6,6 +6,7 @@ from pathlib import Path
|
|
|
6
6
|
|
|
7
7
|
from rich.console import Console
|
|
8
8
|
|
|
9
|
+
from crackerjack.agents.base import FixResult, Issue, IssueType, Priority
|
|
9
10
|
from crackerjack.models.protocols import OptionsProtocol
|
|
10
11
|
|
|
11
12
|
from .phase_coordinator import PhaseCoordinator
|
|
@@ -233,6 +234,113 @@ class AsyncWorkflowPipeline:
|
|
|
233
234
|
|
|
234
235
|
return "continue"
|
|
235
236
|
|
|
237
|
+
def _parse_issues_for_agents(
|
|
238
|
+
self, test_issues: list[str], hook_issues: list[str]
|
|
239
|
+
) -> list[Issue]:
|
|
240
|
+
"""Parse string issues into structured Issue objects for AI agent processing."""
|
|
241
|
+
structured_issues = []
|
|
242
|
+
|
|
243
|
+
# Parse hook issues using dedicated parsers
|
|
244
|
+
for issue in hook_issues:
|
|
245
|
+
parsed_issue = self._parse_single_hook_issue(issue)
|
|
246
|
+
structured_issues.append(parsed_issue)
|
|
247
|
+
|
|
248
|
+
# Parse test issues using dedicated parser
|
|
249
|
+
for issue in test_issues:
|
|
250
|
+
parsed_issue = self._parse_single_test_issue(issue)
|
|
251
|
+
structured_issues.append(parsed_issue)
|
|
252
|
+
|
|
253
|
+
return structured_issues
|
|
254
|
+
|
|
255
|
+
def _parse_single_hook_issue(self, issue: str) -> Issue:
|
|
256
|
+
"""Parse a single hook issue into structured format."""
|
|
257
|
+
from crackerjack.agents.base import IssueType, Priority
|
|
258
|
+
|
|
259
|
+
# Try refurb-specific parsing first
|
|
260
|
+
if "refurb:" in issue and "[FURB" in issue:
|
|
261
|
+
return self._parse_refurb_issue(issue)
|
|
262
|
+
|
|
263
|
+
# Use generic hook issue parsers
|
|
264
|
+
hook_type_mapping = {
|
|
265
|
+
"pyright:": (IssueType.TYPE_ERROR, Priority.HIGH, "pyright"),
|
|
266
|
+
"Type error": (IssueType.TYPE_ERROR, Priority.HIGH, "pyright"),
|
|
267
|
+
"bandit:": (IssueType.SECURITY, Priority.HIGH, "bandit"),
|
|
268
|
+
"vulture:": (IssueType.DEAD_CODE, Priority.MEDIUM, "vulture"),
|
|
269
|
+
"complexipy:": (IssueType.COMPLEXITY, Priority.MEDIUM, "complexipy"),
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
for keyword, (issue_type, priority, stage) in hook_type_mapping.items():
|
|
273
|
+
if keyword in issue:
|
|
274
|
+
return self._create_generic_issue(issue, issue_type, priority, stage)
|
|
275
|
+
|
|
276
|
+
# Default to generic hook issue
|
|
277
|
+
return self._create_generic_issue(
|
|
278
|
+
issue, IssueType.FORMATTING, Priority.MEDIUM, "hook"
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
def _parse_refurb_issue(self, issue: str) -> Issue:
|
|
282
|
+
"""Parse refurb-specific issue format."""
|
|
283
|
+
import re
|
|
284
|
+
import uuid
|
|
285
|
+
|
|
286
|
+
from crackerjack.agents.base import Issue, IssueType, Priority
|
|
287
|
+
|
|
288
|
+
match = re.search(r"refurb:\s*(.+?):(\d+):(\d+)\s+\[(\w+)\]:\s*(.+)", issue)
|
|
289
|
+
if match:
|
|
290
|
+
file_path, line_num, _, error_code, message = match.groups()
|
|
291
|
+
return Issue(
|
|
292
|
+
id=str(uuid.uuid4()),
|
|
293
|
+
type=IssueType.FORMATTING,
|
|
294
|
+
severity=Priority.MEDIUM,
|
|
295
|
+
message=f"[{error_code}] {message}",
|
|
296
|
+
file_path=file_path,
|
|
297
|
+
line_number=int(line_num),
|
|
298
|
+
details=[issue],
|
|
299
|
+
stage="refurb",
|
|
300
|
+
)
|
|
301
|
+
|
|
302
|
+
# Fallback to generic parsing if regex fails
|
|
303
|
+
return self._create_generic_issue(
|
|
304
|
+
issue, IssueType.FORMATTING, Priority.MEDIUM, "refurb"
|
|
305
|
+
)
|
|
306
|
+
|
|
307
|
+
def _parse_single_test_issue(self, issue: str) -> Issue:
|
|
308
|
+
"""Parse a single test issue into structured format."""
|
|
309
|
+
import uuid
|
|
310
|
+
|
|
311
|
+
from crackerjack.agents.base import Issue, IssueType, Priority
|
|
312
|
+
|
|
313
|
+
if "FAILED" in issue or "ERROR" in issue:
|
|
314
|
+
severity = Priority.HIGH
|
|
315
|
+
else:
|
|
316
|
+
severity = Priority.MEDIUM
|
|
317
|
+
|
|
318
|
+
return Issue(
|
|
319
|
+
id=str(uuid.uuid4()),
|
|
320
|
+
type=IssueType.TEST_FAILURE,
|
|
321
|
+
severity=severity,
|
|
322
|
+
message=issue,
|
|
323
|
+
details=[issue],
|
|
324
|
+
stage="test",
|
|
325
|
+
)
|
|
326
|
+
|
|
327
|
+
def _create_generic_issue(
|
|
328
|
+
self, issue: str, issue_type: IssueType, priority: Priority, stage: str
|
|
329
|
+
) -> Issue:
|
|
330
|
+
"""Create a generic Issue object with standard fields."""
|
|
331
|
+
import uuid
|
|
332
|
+
|
|
333
|
+
from crackerjack.agents.base import Issue
|
|
334
|
+
|
|
335
|
+
return Issue(
|
|
336
|
+
id=str(uuid.uuid4()),
|
|
337
|
+
type=issue_type,
|
|
338
|
+
severity=priority,
|
|
339
|
+
message=issue,
|
|
340
|
+
details=[issue],
|
|
341
|
+
stage=stage,
|
|
342
|
+
)
|
|
343
|
+
|
|
236
344
|
async def _run_final_workflow_phases(self, options: OptionsProtocol) -> bool:
|
|
237
345
|
"""Run the final publishing and commit phases."""
|
|
238
346
|
if not self.phases.run_publishing_phase(options):
|
|
@@ -263,7 +371,13 @@ class AsyncWorkflowPipeline:
|
|
|
263
371
|
if success:
|
|
264
372
|
return []
|
|
265
373
|
else:
|
|
266
|
-
|
|
374
|
+
# Get specific test failure details from test manager
|
|
375
|
+
test_failures = self.phases.test_manager.get_test_failures()
|
|
376
|
+
if test_failures:
|
|
377
|
+
return [f"Test failure: {failure}" for failure in test_failures]
|
|
378
|
+
else:
|
|
379
|
+
# Fallback if no specific failures captured
|
|
380
|
+
return ["Test failures detected - see logs for details"]
|
|
267
381
|
except Exception as e:
|
|
268
382
|
return [f"Test execution error: {e}"]
|
|
269
383
|
|
|
@@ -272,11 +386,25 @@ class AsyncWorkflowPipeline:
|
|
|
272
386
|
) -> list[str]:
|
|
273
387
|
"""Collect all comprehensive hook issues without stopping on first failure."""
|
|
274
388
|
try:
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
389
|
+
# Run hooks and capture detailed results
|
|
390
|
+
hook_results = await asyncio.to_thread(
|
|
391
|
+
self.phases.hook_manager.run_comprehensive_hooks,
|
|
392
|
+
)
|
|
393
|
+
|
|
394
|
+
# Extract specific issues from failed hooks
|
|
395
|
+
all_issues = []
|
|
396
|
+
for result in hook_results:
|
|
397
|
+
if (
|
|
398
|
+
result.status in ("failed", "error", "timeout")
|
|
399
|
+
and result.issues_found
|
|
400
|
+
):
|
|
401
|
+
# Add hook context to each issue for better AI agent understanding
|
|
402
|
+
hook_context = f"{result.name}: "
|
|
403
|
+
for issue in result.issues_found:
|
|
404
|
+
all_issues.append(hook_context + issue)
|
|
405
|
+
|
|
406
|
+
return all_issues
|
|
407
|
+
|
|
280
408
|
except Exception as e:
|
|
281
409
|
return [f"Comprehensive hooks error: {e}"]
|
|
282
410
|
|
|
@@ -287,7 +415,7 @@ class AsyncWorkflowPipeline:
|
|
|
287
415
|
hook_issues: list[str],
|
|
288
416
|
iteration: int,
|
|
289
417
|
) -> bool:
|
|
290
|
-
"""Apply AI fixes for all collected issues in batch."""
|
|
418
|
+
"""Apply AI fixes for all collected issues in batch using AgentCoordinator."""
|
|
291
419
|
all_issues = test_issues + hook_issues
|
|
292
420
|
if not all_issues:
|
|
293
421
|
return True
|
|
@@ -296,24 +424,62 @@ class AsyncWorkflowPipeline:
|
|
|
296
424
|
f"🔧 Applying AI fixes for {len(all_issues)} issues in iteration {iteration}"
|
|
297
425
|
)
|
|
298
426
|
|
|
299
|
-
# This would integrate with the AI agent system to actually apply fixes
|
|
300
|
-
# For now, we'll simulate the fixing process
|
|
301
427
|
try:
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
428
|
+
return await self._execute_ai_fix_workflow(
|
|
429
|
+
test_issues, hook_issues, iteration
|
|
430
|
+
)
|
|
431
|
+
except Exception as e:
|
|
432
|
+
return self._handle_ai_fix_error(e)
|
|
307
433
|
|
|
308
|
-
|
|
309
|
-
|
|
434
|
+
async def _execute_ai_fix_workflow(
|
|
435
|
+
self, test_issues: list[str], hook_issues: list[str], iteration: int
|
|
436
|
+
) -> bool:
|
|
437
|
+
"""Execute the AI fix workflow and return success status."""
|
|
438
|
+
structured_issues = self._parse_issues_for_agents(test_issues, hook_issues)
|
|
310
439
|
|
|
311
|
-
|
|
440
|
+
if not structured_issues:
|
|
441
|
+
self.console.print("⚠️ No actionable issues found for AI agents")
|
|
312
442
|
return True
|
|
313
443
|
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
444
|
+
coordinator = self._create_agent_coordinator()
|
|
445
|
+
fix_result = await coordinator.handle_issues(structured_issues)
|
|
446
|
+
|
|
447
|
+
self._report_fix_results(fix_result, iteration)
|
|
448
|
+
return fix_result.success
|
|
449
|
+
|
|
450
|
+
def _create_agent_coordinator(self):
|
|
451
|
+
"""Create and configure the AI agent coordinator."""
|
|
452
|
+
from crackerjack.agents.base import AgentContext
|
|
453
|
+
from crackerjack.agents.coordinator import AgentCoordinator
|
|
454
|
+
|
|
455
|
+
context = AgentContext(project_path=self.pkg_path)
|
|
456
|
+
return AgentCoordinator(context)
|
|
457
|
+
|
|
458
|
+
def _report_fix_results(self, fix_result: FixResult, iteration: int) -> None:
|
|
459
|
+
"""Report the results of AI fix attempts to the console."""
|
|
460
|
+
if fix_result.success:
|
|
461
|
+
self._report_successful_fixes(fix_result, iteration)
|
|
462
|
+
else:
|
|
463
|
+
self._report_failed_fixes(fix_result, iteration)
|
|
464
|
+
|
|
465
|
+
def _report_successful_fixes(self, fix_result: FixResult, iteration: int) -> None:
|
|
466
|
+
"""Report successful AI fixes to the console."""
|
|
467
|
+
self.console.print(f"✅ AI fixes applied successfully in iteration {iteration}")
|
|
468
|
+
if fix_result.fixes_applied:
|
|
469
|
+
self.console.print(f" Applied {len(fix_result.fixes_applied)} fixes")
|
|
470
|
+
|
|
471
|
+
def _report_failed_fixes(self, fix_result: FixResult, iteration: int) -> None:
|
|
472
|
+
"""Report failed AI fixes to the console."""
|
|
473
|
+
self.console.print(f"⚠️ Some AI fixes failed in iteration {iteration}")
|
|
474
|
+
if fix_result.remaining_issues:
|
|
475
|
+
for error in fix_result.remaining_issues[:3]: # Show first 3 errors
|
|
476
|
+
self.console.print(f" Error: {error}")
|
|
477
|
+
|
|
478
|
+
def _handle_ai_fix_error(self, error: Exception) -> bool:
|
|
479
|
+
"""Handle errors during AI fix execution."""
|
|
480
|
+
self.logger.error(f"AI fixing failed: {error}")
|
|
481
|
+
self.console.print(f"❌ AI agent system error: {error}")
|
|
482
|
+
return False
|
|
317
483
|
|
|
318
484
|
|
|
319
485
|
class AsyncWorkflowOrchestrator:
|
|
@@ -62,81 +62,122 @@ class WorkflowPipeline:
|
|
|
62
62
|
skip_hooks=getattr(options, "skip_hooks", False),
|
|
63
63
|
):
|
|
64
64
|
start_time = time.time()
|
|
65
|
-
self.
|
|
66
|
-
self.session.track_task("workflow", "Complete crackerjack workflow")
|
|
67
|
-
|
|
68
|
-
if self._should_debug():
|
|
69
|
-
self.debugger.log_workflow_phase(
|
|
70
|
-
"workflow_execution",
|
|
71
|
-
"started",
|
|
72
|
-
details={
|
|
73
|
-
"testing": getattr(options, "testing", False),
|
|
74
|
-
"skip_hooks": getattr(options, "skip_hooks", False),
|
|
75
|
-
"ai_agent": getattr(options, "ai_agent", False),
|
|
76
|
-
},
|
|
77
|
-
)
|
|
78
|
-
|
|
79
|
-
if hasattr(options, "cleanup"):
|
|
80
|
-
self.session.set_cleanup_config(options.cleanup)
|
|
81
|
-
|
|
82
|
-
self.logger.info(
|
|
83
|
-
"Starting complete workflow execution",
|
|
84
|
-
testing=getattr(options, "testing", False),
|
|
85
|
-
skip_hooks=getattr(options, "skip_hooks", False),
|
|
86
|
-
package_path=str(self.pkg_path),
|
|
87
|
-
)
|
|
65
|
+
self._initialize_workflow_session(options)
|
|
88
66
|
|
|
89
67
|
try:
|
|
90
|
-
success = await self.
|
|
91
|
-
self.session.finalize_session(start_time, success)
|
|
92
|
-
|
|
93
|
-
duration = time.time() - start_time
|
|
94
|
-
self.logger.info(
|
|
95
|
-
"Workflow execution completed",
|
|
96
|
-
success=success,
|
|
97
|
-
duration_seconds=round(duration, 2),
|
|
98
|
-
)
|
|
99
|
-
|
|
100
|
-
if self._should_debug():
|
|
101
|
-
# Set final workflow success status
|
|
102
|
-
self.debugger.set_workflow_success(success)
|
|
103
|
-
|
|
104
|
-
self.debugger.log_workflow_phase(
|
|
105
|
-
"workflow_execution",
|
|
106
|
-
"completed" if success else "failed",
|
|
107
|
-
duration=duration,
|
|
108
|
-
)
|
|
109
|
-
if self.debugger.enabled:
|
|
110
|
-
self.debugger.print_debug_summary()
|
|
111
|
-
|
|
68
|
+
success = await self._execute_workflow_with_timing(options, start_time)
|
|
112
69
|
return success
|
|
113
70
|
|
|
114
71
|
except KeyboardInterrupt:
|
|
115
|
-
self.
|
|
116
|
-
self.session.fail_task("workflow", "Interrupted by user")
|
|
117
|
-
self.logger.warning("Workflow interrupted by user")
|
|
118
|
-
return False
|
|
72
|
+
return self._handle_user_interruption()
|
|
119
73
|
|
|
120
74
|
except Exception as e:
|
|
121
|
-
self.
|
|
122
|
-
self.session.fail_task("workflow", f"Unexpected error: {e}")
|
|
123
|
-
self.logger.exception(
|
|
124
|
-
"Workflow execution failed",
|
|
125
|
-
error=str(e),
|
|
126
|
-
error_type=type(e).__name__,
|
|
127
|
-
)
|
|
128
|
-
return False
|
|
75
|
+
return self._handle_workflow_exception(e)
|
|
129
76
|
|
|
130
77
|
finally:
|
|
131
78
|
self.session.cleanup_resources()
|
|
132
79
|
|
|
80
|
+
def _initialize_workflow_session(self, options: OptionsProtocol) -> None:
|
|
81
|
+
"""Initialize session tracking and debug logging for workflow execution."""
|
|
82
|
+
self.session.initialize_session_tracking(options)
|
|
83
|
+
self.session.track_task("workflow", "Complete crackerjack workflow")
|
|
84
|
+
|
|
85
|
+
self._log_workflow_startup_debug(options)
|
|
86
|
+
self._configure_session_cleanup(options)
|
|
87
|
+
self._log_workflow_startup_info(options)
|
|
88
|
+
|
|
89
|
+
def _log_workflow_startup_debug(self, options: OptionsProtocol) -> None:
|
|
90
|
+
"""Log debug information for workflow startup."""
|
|
91
|
+
if not self._should_debug():
|
|
92
|
+
return
|
|
93
|
+
|
|
94
|
+
self.debugger.log_workflow_phase(
|
|
95
|
+
"workflow_execution",
|
|
96
|
+
"started",
|
|
97
|
+
details={
|
|
98
|
+
"testing": getattr(options, "testing", False),
|
|
99
|
+
"skip_hooks": getattr(options, "skip_hooks", False),
|
|
100
|
+
"ai_agent": getattr(options, "ai_agent", False),
|
|
101
|
+
},
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
def _configure_session_cleanup(self, options: OptionsProtocol) -> None:
|
|
105
|
+
"""Configure session cleanup settings if specified."""
|
|
106
|
+
if hasattr(options, "cleanup"):
|
|
107
|
+
self.session.set_cleanup_config(options.cleanup)
|
|
108
|
+
|
|
109
|
+
def _log_workflow_startup_info(self, options: OptionsProtocol) -> None:
|
|
110
|
+
"""Log informational message about workflow startup."""
|
|
111
|
+
self.logger.info(
|
|
112
|
+
"Starting complete workflow execution",
|
|
113
|
+
testing=getattr(options, "testing", False),
|
|
114
|
+
skip_hooks=getattr(options, "skip_hooks", False),
|
|
115
|
+
package_path=str(self.pkg_path),
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
async def _execute_workflow_with_timing(
|
|
119
|
+
self, options: OptionsProtocol, start_time: float
|
|
120
|
+
) -> bool:
|
|
121
|
+
"""Execute workflow phases and handle success/completion logging."""
|
|
122
|
+
success = await self._execute_workflow_phases(options)
|
|
123
|
+
self.session.finalize_session(start_time, success)
|
|
124
|
+
|
|
125
|
+
duration = time.time() - start_time
|
|
126
|
+
self._log_workflow_completion(success, duration)
|
|
127
|
+
self._log_workflow_completion_debug(success, duration)
|
|
128
|
+
|
|
129
|
+
return success
|
|
130
|
+
|
|
131
|
+
def _log_workflow_completion(self, success: bool, duration: float) -> None:
|
|
132
|
+
"""Log workflow completion information."""
|
|
133
|
+
self.logger.info(
|
|
134
|
+
"Workflow execution completed",
|
|
135
|
+
success=success,
|
|
136
|
+
duration_seconds=round(duration, 2),
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
def _log_workflow_completion_debug(self, success: bool, duration: float) -> None:
|
|
140
|
+
"""Log debug information for workflow completion."""
|
|
141
|
+
if not self._should_debug():
|
|
142
|
+
return
|
|
143
|
+
|
|
144
|
+
self.debugger.set_workflow_success(success)
|
|
145
|
+
self.debugger.log_workflow_phase(
|
|
146
|
+
"workflow_execution",
|
|
147
|
+
"completed" if success else "failed",
|
|
148
|
+
duration=duration,
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
if self.debugger.enabled:
|
|
152
|
+
self.debugger.print_debug_summary()
|
|
153
|
+
|
|
154
|
+
def _handle_user_interruption(self) -> bool:
|
|
155
|
+
"""Handle KeyboardInterrupt gracefully."""
|
|
156
|
+
self.console.print("Interrupted by user")
|
|
157
|
+
self.session.fail_task("workflow", "Interrupted by user")
|
|
158
|
+
self.logger.warning("Workflow interrupted by user")
|
|
159
|
+
return False
|
|
160
|
+
|
|
161
|
+
def _handle_workflow_exception(self, error: Exception) -> bool:
|
|
162
|
+
"""Handle unexpected workflow exceptions."""
|
|
163
|
+
self.console.print(f"Error: {error}")
|
|
164
|
+
self.session.fail_task("workflow", f"Unexpected error: {error}")
|
|
165
|
+
self.logger.exception(
|
|
166
|
+
"Workflow execution failed",
|
|
167
|
+
error=str(error),
|
|
168
|
+
error_type=type(error).__name__,
|
|
169
|
+
)
|
|
170
|
+
return False
|
|
171
|
+
|
|
133
172
|
async def _execute_workflow_phases(self, options: OptionsProtocol) -> bool:
|
|
134
173
|
success = True
|
|
135
174
|
self.phases.run_configuration_phase(options)
|
|
175
|
+
|
|
136
176
|
if not self.phases.run_cleaning_phase(options):
|
|
137
177
|
success = False
|
|
138
178
|
self.session.fail_task("workflow", "Cleaning phase failed")
|
|
139
179
|
return False
|
|
180
|
+
|
|
140
181
|
if not await self._execute_quality_phase(options):
|
|
141
182
|
success = False
|
|
142
183
|
return False
|
|
@@ -224,6 +265,21 @@ class WorkflowPipeline:
|
|
|
224
265
|
) -> bool:
|
|
225
266
|
"""Handle standard workflow where all phases must pass."""
|
|
226
267
|
success = testing_passed and comprehensive_passed
|
|
268
|
+
|
|
269
|
+
# Debug information for workflow continuation issues
|
|
270
|
+
if not success and getattr(options, "verbose", False):
|
|
271
|
+
self.console.print(
|
|
272
|
+
f"[yellow]⚠️ Workflow stopped - testing_passed: {testing_passed}, comprehensive_passed: {comprehensive_passed}[/yellow]"
|
|
273
|
+
)
|
|
274
|
+
if not testing_passed:
|
|
275
|
+
self.console.print(
|
|
276
|
+
"[yellow] → Tests reported failure despite appearing successful[/yellow]"
|
|
277
|
+
)
|
|
278
|
+
if not comprehensive_passed:
|
|
279
|
+
self.console.print(
|
|
280
|
+
"[yellow] → Comprehensive hooks reported failure despite appearing successful[/yellow]"
|
|
281
|
+
)
|
|
282
|
+
|
|
227
283
|
if options.ai_agent and self._should_debug():
|
|
228
284
|
self.debugger.log_iteration_end(iteration, success)
|
|
229
285
|
return success
|
|
@@ -560,6 +616,80 @@ class WorkflowPipeline:
|
|
|
560
616
|
issues: list[Issue] = []
|
|
561
617
|
hook_count = 0
|
|
562
618
|
|
|
619
|
+
try:
|
|
620
|
+
hook_results = self.phases.hook_manager.run_comprehensive_hooks()
|
|
621
|
+
issues, hook_count = self._process_hook_results(hook_results)
|
|
622
|
+
except Exception:
|
|
623
|
+
issues, hook_count = self._fallback_to_session_tracker()
|
|
624
|
+
|
|
625
|
+
return issues, hook_count
|
|
626
|
+
|
|
627
|
+
def _process_hook_results(self, hook_results: t.Any) -> tuple[list[Issue], int]:
|
|
628
|
+
"""Process hook results and extract issues."""
|
|
629
|
+
issues: list[Issue] = []
|
|
630
|
+
hook_count = 0
|
|
631
|
+
|
|
632
|
+
for result in hook_results:
|
|
633
|
+
if not self._is_hook_result_failed(result):
|
|
634
|
+
continue
|
|
635
|
+
|
|
636
|
+
hook_count += 1
|
|
637
|
+
result_issues = self._extract_issues_from_hook_result(result)
|
|
638
|
+
issues.extend(result_issues)
|
|
639
|
+
|
|
640
|
+
return issues, hook_count
|
|
641
|
+
|
|
642
|
+
def _is_hook_result_failed(self, result: t.Any) -> bool:
|
|
643
|
+
"""Check if hook result indicates failure."""
|
|
644
|
+
return result.status in ("failed", "error", "timeout")
|
|
645
|
+
|
|
646
|
+
def _extract_issues_from_hook_result(self, result: t.Any) -> list[Issue]:
|
|
647
|
+
"""Extract issues from a single hook result."""
|
|
648
|
+
if result.issues_found:
|
|
649
|
+
return self._create_specific_issues_from_hook_result(result)
|
|
650
|
+
return [self._create_generic_issue_from_hook_result(result)]
|
|
651
|
+
|
|
652
|
+
def _create_specific_issues_from_hook_result(self, result: t.Any) -> list[Issue]:
|
|
653
|
+
"""Create specific issues from hook result with detailed information."""
|
|
654
|
+
issues: list[Issue] = []
|
|
655
|
+
hook_context = f"{result.name}: "
|
|
656
|
+
|
|
657
|
+
for issue_text in result.issues_found:
|
|
658
|
+
parsed_issues = self._parse_issues_for_agents([hook_context + issue_text])
|
|
659
|
+
issues.extend(parsed_issues)
|
|
660
|
+
|
|
661
|
+
return issues
|
|
662
|
+
|
|
663
|
+
def _create_generic_issue_from_hook_result(self, result: t.Any) -> Issue:
|
|
664
|
+
"""Create a generic issue for hook failure without specific details."""
|
|
665
|
+
issue_type = self._determine_hook_issue_type(result.name)
|
|
666
|
+
return Issue(
|
|
667
|
+
id=f"hook_failure_{result.name}",
|
|
668
|
+
type=issue_type,
|
|
669
|
+
severity=Priority.MEDIUM,
|
|
670
|
+
message=f"Hook {result.name} failed with no specific details",
|
|
671
|
+
stage="comprehensive",
|
|
672
|
+
)
|
|
673
|
+
|
|
674
|
+
def _determine_hook_issue_type(self, hook_name: str) -> IssueType:
|
|
675
|
+
"""Determine issue type based on hook name."""
|
|
676
|
+
formatting_hooks = {
|
|
677
|
+
"trailing-whitespace",
|
|
678
|
+
"end-of-file-fixer",
|
|
679
|
+
"ruff-format",
|
|
680
|
+
"ruff-check",
|
|
681
|
+
}
|
|
682
|
+
return (
|
|
683
|
+
IssueType.FORMATTING
|
|
684
|
+
if hook_name in formatting_hooks
|
|
685
|
+
else IssueType.TYPE_ERROR
|
|
686
|
+
)
|
|
687
|
+
|
|
688
|
+
def _fallback_to_session_tracker(self) -> tuple[list[Issue], int]:
|
|
689
|
+
"""Fallback to session tracker if hook manager fails."""
|
|
690
|
+
issues: list[Issue] = []
|
|
691
|
+
hook_count = 0
|
|
692
|
+
|
|
563
693
|
if not self.session.session_tracker:
|
|
564
694
|
return issues, hook_count
|
|
565
695
|
|
|
@@ -679,6 +809,61 @@ class WorkflowPipeline:
|
|
|
679
809
|
|
|
680
810
|
return issues
|
|
681
811
|
|
|
812
|
+
def _parse_issues_for_agents(self, issue_strings: list[str]) -> list[Issue]:
|
|
813
|
+
"""Parse string issues into structured Issue objects for AI agents."""
|
|
814
|
+
issues: list[Issue] = []
|
|
815
|
+
|
|
816
|
+
for i, issue_str in enumerate(issue_strings):
|
|
817
|
+
# Determine issue type from content patterns
|
|
818
|
+
issue_type = IssueType.FORMATTING
|
|
819
|
+
priority = Priority.MEDIUM
|
|
820
|
+
|
|
821
|
+
if any(
|
|
822
|
+
keyword in issue_str.lower()
|
|
823
|
+
for keyword in ("type", "annotation", "pyright")
|
|
824
|
+
):
|
|
825
|
+
issue_type = IssueType.TYPE_ERROR
|
|
826
|
+
priority = Priority.HIGH
|
|
827
|
+
elif any(
|
|
828
|
+
keyword in issue_str.lower()
|
|
829
|
+
for keyword in ("security", "bandit", "hardcoded")
|
|
830
|
+
):
|
|
831
|
+
issue_type = IssueType.SECURITY
|
|
832
|
+
priority = Priority.HIGH
|
|
833
|
+
elif any(
|
|
834
|
+
keyword in issue_str.lower() for keyword in ("complexity", "complexipy")
|
|
835
|
+
):
|
|
836
|
+
issue_type = IssueType.COMPLEXITY
|
|
837
|
+
priority = Priority.HIGH
|
|
838
|
+
elif any(
|
|
839
|
+
keyword in issue_str.lower()
|
|
840
|
+
for keyword in ("unused", "dead", "vulture")
|
|
841
|
+
):
|
|
842
|
+
issue_type = IssueType.DEAD_CODE
|
|
843
|
+
priority = Priority.MEDIUM
|
|
844
|
+
elif any(
|
|
845
|
+
keyword in issue_str.lower()
|
|
846
|
+
for keyword in ("performance", "refurb", "furb")
|
|
847
|
+
):
|
|
848
|
+
issue_type = IssueType.PERFORMANCE
|
|
849
|
+
priority = Priority.MEDIUM
|
|
850
|
+
elif any(
|
|
851
|
+
keyword in issue_str.lower() for keyword in ("import", "creosote")
|
|
852
|
+
):
|
|
853
|
+
issue_type = IssueType.IMPORT_ERROR
|
|
854
|
+
priority = Priority.MEDIUM
|
|
855
|
+
|
|
856
|
+
issue = Issue(
|
|
857
|
+
id=f"parsed_issue_{i}",
|
|
858
|
+
type=issue_type,
|
|
859
|
+
severity=priority,
|
|
860
|
+
message=issue_str.strip(),
|
|
861
|
+
stage="comprehensive",
|
|
862
|
+
)
|
|
863
|
+
issues.append(issue)
|
|
864
|
+
|
|
865
|
+
return issues
|
|
866
|
+
|
|
682
867
|
def _log_failure_counts_if_debugging(
|
|
683
868
|
self, test_count: int, hook_count: int
|
|
684
869
|
) -> None:
|
|
@@ -154,58 +154,106 @@ class HookExecutor:
|
|
|
154
154
|
start_time = time.time()
|
|
155
155
|
|
|
156
156
|
try:
|
|
157
|
-
|
|
158
|
-
result = subprocess.run(
|
|
159
|
-
hook.get_command(),
|
|
160
|
-
check=False,
|
|
161
|
-
cwd=self.pkg_path,
|
|
162
|
-
text=True,
|
|
163
|
-
timeout=hook.timeout,
|
|
164
|
-
env=clean_env,
|
|
165
|
-
capture_output=True,
|
|
166
|
-
)
|
|
167
|
-
|
|
157
|
+
result = self._run_hook_subprocess(hook)
|
|
168
158
|
duration = time.time() - start_time
|
|
169
159
|
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
if result.stdout:
|
|
173
|
-
self.console.print(result.stdout)
|
|
174
|
-
if result.stderr:
|
|
175
|
-
self.console.print(result.stderr)
|
|
176
|
-
|
|
177
|
-
return HookResult(
|
|
178
|
-
id=hook.name,
|
|
179
|
-
name=hook.name,
|
|
180
|
-
status="passed" if result.returncode == 0 else "failed",
|
|
181
|
-
duration=duration,
|
|
182
|
-
files_processed=0,
|
|
183
|
-
issues_found=[]
|
|
184
|
-
if result.returncode == 0
|
|
185
|
-
else [f"Hook failed with code {result.returncode}"],
|
|
186
|
-
stage=hook.stage.value,
|
|
187
|
-
)
|
|
160
|
+
self._display_hook_output_if_needed(result)
|
|
161
|
+
return self._create_hook_result_from_process(hook, result, duration)
|
|
188
162
|
|
|
189
163
|
except subprocess.TimeoutExpired:
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
id=hook.name,
|
|
193
|
-
name=hook.name,
|
|
194
|
-
status="timeout",
|
|
195
|
-
duration=duration,
|
|
196
|
-
issues_found=[f"Hook timed out after {duration:.1f}s"],
|
|
197
|
-
stage=hook.stage.value,
|
|
198
|
-
)
|
|
164
|
+
return self._create_timeout_result(hook, start_time)
|
|
165
|
+
|
|
199
166
|
except Exception as e:
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
)
|
|
167
|
+
return self._create_error_result(hook, start_time, e)
|
|
168
|
+
|
|
169
|
+
def _run_hook_subprocess(
|
|
170
|
+
self, hook: HookDefinition
|
|
171
|
+
) -> subprocess.CompletedProcess[str]:
|
|
172
|
+
"""Execute hook subprocess with clean environment."""
|
|
173
|
+
clean_env = self._get_clean_environment()
|
|
174
|
+
return subprocess.run(
|
|
175
|
+
hook.get_command(),
|
|
176
|
+
check=False,
|
|
177
|
+
cwd=self.pkg_path,
|
|
178
|
+
text=True,
|
|
179
|
+
timeout=hook.timeout,
|
|
180
|
+
env=clean_env,
|
|
181
|
+
capture_output=True,
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
def _display_hook_output_if_needed(
|
|
185
|
+
self, result: subprocess.CompletedProcess[str]
|
|
186
|
+
) -> None:
|
|
187
|
+
"""Display hook output in verbose mode for failed hooks."""
|
|
188
|
+
if result.returncode == 0 or not self.verbose:
|
|
189
|
+
return
|
|
190
|
+
|
|
191
|
+
if result.stdout:
|
|
192
|
+
self.console.print(result.stdout)
|
|
193
|
+
if result.stderr:
|
|
194
|
+
self.console.print(result.stderr)
|
|
195
|
+
|
|
196
|
+
def _create_hook_result_from_process(
|
|
197
|
+
self,
|
|
198
|
+
hook: HookDefinition,
|
|
199
|
+
result: subprocess.CompletedProcess[str],
|
|
200
|
+
duration: float,
|
|
201
|
+
) -> HookResult:
|
|
202
|
+
"""Create HookResult from successful subprocess execution."""
|
|
203
|
+
status = "passed" if result.returncode == 0 else "failed"
|
|
204
|
+
issues_found = self._extract_issues_from_process_output(result)
|
|
205
|
+
|
|
206
|
+
return HookResult(
|
|
207
|
+
id=hook.name,
|
|
208
|
+
name=hook.name,
|
|
209
|
+
status=status,
|
|
210
|
+
duration=duration,
|
|
211
|
+
files_processed=0,
|
|
212
|
+
issues_found=issues_found,
|
|
213
|
+
stage=hook.stage.value,
|
|
214
|
+
)
|
|
215
|
+
|
|
216
|
+
def _extract_issues_from_process_output(
|
|
217
|
+
self, result: subprocess.CompletedProcess[str]
|
|
218
|
+
) -> list[str]:
|
|
219
|
+
"""Extract specific issues from subprocess output for failed hooks."""
|
|
220
|
+
if result.returncode == 0:
|
|
221
|
+
return []
|
|
222
|
+
|
|
223
|
+
error_output = (result.stdout + result.stderr).strip()
|
|
224
|
+
if error_output:
|
|
225
|
+
return [line.strip() for line in error_output.split("\n") if line.strip()]
|
|
226
|
+
|
|
227
|
+
# Fallback to generic message if no output captured
|
|
228
|
+
return [f"Hook failed with code {result.returncode}"]
|
|
229
|
+
|
|
230
|
+
def _create_timeout_result(
|
|
231
|
+
self, hook: HookDefinition, start_time: float
|
|
232
|
+
) -> HookResult:
|
|
233
|
+
"""Create HookResult for timeout scenarios."""
|
|
234
|
+
duration = time.time() - start_time
|
|
235
|
+
return HookResult(
|
|
236
|
+
id=hook.name,
|
|
237
|
+
name=hook.name,
|
|
238
|
+
status="timeout",
|
|
239
|
+
duration=duration,
|
|
240
|
+
issues_found=[f"Hook timed out after {duration:.1f}s"],
|
|
241
|
+
stage=hook.stage.value,
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
def _create_error_result(
|
|
245
|
+
self, hook: HookDefinition, start_time: float, error: Exception
|
|
246
|
+
) -> HookResult:
|
|
247
|
+
"""Create HookResult for general exception scenarios."""
|
|
248
|
+
duration = time.time() - start_time
|
|
249
|
+
return HookResult(
|
|
250
|
+
id=hook.name,
|
|
251
|
+
name=hook.name,
|
|
252
|
+
status="error",
|
|
253
|
+
duration=duration,
|
|
254
|
+
issues_found=[str(error)],
|
|
255
|
+
stage=hook.stage.value,
|
|
256
|
+
)
|
|
209
257
|
|
|
210
258
|
def _parse_hook_output(
|
|
211
259
|
self,
|
|
@@ -136,6 +136,36 @@ class TestManager:
|
|
|
136
136
|
except Exception:
|
|
137
137
|
return None
|
|
138
138
|
|
|
139
|
+
def get_coverage(self) -> dict[str, t.Any]:
|
|
140
|
+
"""Get coverage information as required by TestManagerProtocol."""
|
|
141
|
+
try:
|
|
142
|
+
# Get the ratchet status which includes coverage information
|
|
143
|
+
status = self.coverage_ratchet.get_status_report()
|
|
144
|
+
|
|
145
|
+
if status.get("status") == "not_initialized":
|
|
146
|
+
return {
|
|
147
|
+
"status": "not_initialized",
|
|
148
|
+
"coverage_percent": 0.0,
|
|
149
|
+
"message": "Coverage ratchet not initialized",
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
return {
|
|
153
|
+
"status": "active",
|
|
154
|
+
"coverage_percent": status.get("current_coverage", 0.0),
|
|
155
|
+
"target_coverage": status.get("target_coverage", 100.0),
|
|
156
|
+
"next_milestone": status.get("next_milestone"),
|
|
157
|
+
"progress_percent": status.get("progress_percent", 0.0),
|
|
158
|
+
"last_updated": status.get("last_updated"),
|
|
159
|
+
"milestones_achieved": status.get("milestones_achieved", []),
|
|
160
|
+
}
|
|
161
|
+
except Exception as e:
|
|
162
|
+
return {
|
|
163
|
+
"status": "error",
|
|
164
|
+
"coverage_percent": 0.0,
|
|
165
|
+
"error": str(e),
|
|
166
|
+
"message": "Failed to get coverage information",
|
|
167
|
+
}
|
|
168
|
+
|
|
139
169
|
def has_tests(self) -> bool:
|
|
140
170
|
"""Check if project has tests."""
|
|
141
171
|
test_directories = ["tests", "test"]
|
|
@@ -219,11 +249,17 @@ class TestManager:
|
|
|
219
249
|
self._handle_coverage_improvement(ratchet_result)
|
|
220
250
|
return True
|
|
221
251
|
else:
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
f"{ratchet_result
|
|
225
|
-
|
|
226
|
-
|
|
252
|
+
# Use the message from the ratchet result if available, or construct from available data
|
|
253
|
+
if "message" in ratchet_result:
|
|
254
|
+
self.console.print(f"[red]📉[/red] {ratchet_result['message']}")
|
|
255
|
+
else:
|
|
256
|
+
# Fallback to constructing message from available keys
|
|
257
|
+
current = ratchet_result.get("current_coverage", 0)
|
|
258
|
+
previous = ratchet_result.get("previous_coverage", 0)
|
|
259
|
+
self.console.print(
|
|
260
|
+
f"[red]📉[/red] Coverage regression: "
|
|
261
|
+
f"{current:.2f}% < {previous:.2f}%"
|
|
262
|
+
)
|
|
227
263
|
return False
|
|
228
264
|
|
|
229
265
|
def _handle_coverage_improvement(self, ratchet_result: dict[str, t.Any]) -> None:
|
|
@@ -342,19 +342,20 @@ async def _execute_single_iteration(
|
|
|
342
342
|
"""Execute a single workflow iteration."""
|
|
343
343
|
try:
|
|
344
344
|
# Check for orchestrator workflow methods
|
|
345
|
-
if hasattr(orchestrator, "
|
|
346
|
-
#
|
|
347
|
-
result = orchestrator.
|
|
345
|
+
if hasattr(orchestrator, "run_complete_workflow_async"):
|
|
346
|
+
# AsyncWorkflowOrchestrator - method returns awaitable
|
|
347
|
+
result = orchestrator.run_complete_workflow_async(options)
|
|
348
348
|
if result is None:
|
|
349
349
|
raise ValueError(
|
|
350
|
-
"Method
|
|
350
|
+
"Method run_complete_workflow_async returned None instead of awaitable"
|
|
351
351
|
)
|
|
352
352
|
return await result
|
|
353
|
-
elif hasattr(orchestrator, "
|
|
354
|
-
|
|
353
|
+
elif hasattr(orchestrator, "run_complete_workflow"):
|
|
354
|
+
# Standard WorkflowOrchestrator - method is async and returns awaitable boolean
|
|
355
|
+
result = orchestrator.run_complete_workflow(options)
|
|
355
356
|
if result is None:
|
|
356
357
|
raise ValueError(
|
|
357
|
-
"Method
|
|
358
|
+
"Method run_complete_workflow returned None instead of awaitable"
|
|
358
359
|
)
|
|
359
360
|
return await result
|
|
360
361
|
elif hasattr(orchestrator, "execute_workflow"):
|
|
@@ -332,11 +332,13 @@ class CoverageRatchetService:
|
|
|
332
332
|
# Look for .coverage file or coverage.json
|
|
333
333
|
coverage_file = self.pkg_path / "coverage.json"
|
|
334
334
|
if not coverage_file.exists():
|
|
335
|
-
#
|
|
335
|
+
# No coverage data - this is acceptable, return success
|
|
336
336
|
return {
|
|
337
|
-
"success":
|
|
338
|
-
"
|
|
339
|
-
"message": "
|
|
337
|
+
"success": True,
|
|
338
|
+
"status": "no_coverage_data",
|
|
339
|
+
"message": "No coverage data found - tests passed without coverage",
|
|
340
|
+
"allowed": True,
|
|
341
|
+
"baseline_updated": False,
|
|
340
342
|
}
|
|
341
343
|
|
|
342
344
|
# Parse coverage data (simplified for now)
|
|
@@ -346,7 +348,9 @@ class CoverageRatchetService:
|
|
|
346
348
|
)
|
|
347
349
|
|
|
348
350
|
# Update the ratchet
|
|
349
|
-
|
|
351
|
+
result = self.update_coverage(current_coverage)
|
|
352
|
+
result["success"] = result.get("allowed", True)
|
|
353
|
+
return result
|
|
350
354
|
|
|
351
355
|
except Exception as e:
|
|
352
356
|
return {
|
crackerjack/services/git.py
CHANGED
|
@@ -29,18 +29,27 @@ class GitService:
|
|
|
29
29
|
|
|
30
30
|
def get_changed_files(self) -> list[str]:
|
|
31
31
|
try:
|
|
32
|
-
|
|
32
|
+
# Get staged files excluding deletions
|
|
33
|
+
staged_result = self._run_git_command(
|
|
34
|
+
["diff", "--cached", "--name-only", "--diff-filter=ACMRT"]
|
|
35
|
+
)
|
|
33
36
|
staged_files = (
|
|
34
37
|
staged_result.stdout.strip().split("\n")
|
|
35
38
|
if staged_result.stdout.strip()
|
|
36
39
|
else []
|
|
37
40
|
)
|
|
38
|
-
|
|
41
|
+
|
|
42
|
+
# Get unstaged files excluding deletions
|
|
43
|
+
unstaged_result = self._run_git_command(
|
|
44
|
+
["diff", "--name-only", "--diff-filter=ACMRT"]
|
|
45
|
+
)
|
|
39
46
|
unstaged_files = (
|
|
40
47
|
unstaged_result.stdout.strip().split("\n")
|
|
41
48
|
if unstaged_result.stdout.strip()
|
|
42
49
|
else []
|
|
43
50
|
)
|
|
51
|
+
|
|
52
|
+
# Get untracked files
|
|
44
53
|
untracked_result = self._run_git_command(
|
|
45
54
|
["ls-files", "--others", "--exclude-standard"],
|
|
46
55
|
)
|
|
@@ -49,6 +58,7 @@ class GitService:
|
|
|
49
58
|
if untracked_result.stdout.strip()
|
|
50
59
|
else []
|
|
51
60
|
)
|
|
61
|
+
|
|
52
62
|
all_files = set(staged_files + unstaged_files + untracked_files)
|
|
53
63
|
return [f for f in all_files if f]
|
|
54
64
|
except Exception as e:
|
|
@@ -84,22 +94,58 @@ class GitService:
|
|
|
84
94
|
self.console.print(f"[green]✅[/green] Committed: {message}")
|
|
85
95
|
return True
|
|
86
96
|
|
|
87
|
-
|
|
88
|
-
# Use a more appropriate error message for commit failures
|
|
89
|
-
if "pre-commit" in result.stderr or "hook" in result.stderr.lower():
|
|
90
|
-
self.console.print("[red]❌[/red] Commit blocked by pre-commit hooks")
|
|
91
|
-
if result.stderr.strip():
|
|
92
|
-
# Show hook output in a more readable way
|
|
93
|
-
self.console.print(
|
|
94
|
-
f"[yellow]Hook output:[/yellow]\n{result.stderr.strip()}"
|
|
95
|
-
)
|
|
96
|
-
else:
|
|
97
|
-
self.console.print(f"[red]❌[/red] Commit failed: {result.stderr}")
|
|
98
|
-
return False
|
|
97
|
+
return self._handle_commit_failure(result, message)
|
|
99
98
|
except Exception as e:
|
|
100
99
|
self.console.print(f"[red]❌[/red] Error committing: {e}")
|
|
101
100
|
return False
|
|
102
101
|
|
|
102
|
+
def _handle_commit_failure(
|
|
103
|
+
self, result: subprocess.CompletedProcess[str], message: str
|
|
104
|
+
) -> bool:
|
|
105
|
+
# Check if pre-commit hooks modified files and need re-staging
|
|
106
|
+
if "files were modified by this hook" in result.stderr:
|
|
107
|
+
return self._retry_commit_after_restage(message)
|
|
108
|
+
|
|
109
|
+
return self._handle_hook_error(result)
|
|
110
|
+
|
|
111
|
+
def _retry_commit_after_restage(self, message: str) -> bool:
|
|
112
|
+
self.console.print(
|
|
113
|
+
"[yellow]🔄[/yellow] Pre-commit hooks modified files - attempting to re-stage and retry commit"
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
# Re-stage all modified files
|
|
117
|
+
add_result = self._run_git_command(["add", "-u"])
|
|
118
|
+
if add_result.returncode != 0:
|
|
119
|
+
self.console.print(
|
|
120
|
+
f"[red]❌[/red] Failed to re-stage files: {add_result.stderr}"
|
|
121
|
+
)
|
|
122
|
+
return False
|
|
123
|
+
|
|
124
|
+
# Retry the commit
|
|
125
|
+
retry_result = self._run_git_command(["commit", "-m", message])
|
|
126
|
+
if retry_result.returncode == 0:
|
|
127
|
+
self.console.print(
|
|
128
|
+
f"[green]✅[/green] Committed after re-staging: {message}"
|
|
129
|
+
)
|
|
130
|
+
return True
|
|
131
|
+
|
|
132
|
+
self.console.print(
|
|
133
|
+
f"[red]❌[/red] Commit failed on retry: {retry_result.stderr}"
|
|
134
|
+
)
|
|
135
|
+
return False
|
|
136
|
+
|
|
137
|
+
def _handle_hook_error(self, result: subprocess.CompletedProcess[str]) -> bool:
|
|
138
|
+
# When git commit fails due to pre-commit hooks, stderr contains hook output
|
|
139
|
+
if "pre-commit" in result.stderr or "hook" in result.stderr.lower():
|
|
140
|
+
self.console.print("[red]❌[/red] Commit blocked by pre-commit hooks")
|
|
141
|
+
if result.stderr.strip():
|
|
142
|
+
self.console.print(
|
|
143
|
+
f"[yellow]Hook output:[/yellow]\n{result.stderr.strip()}"
|
|
144
|
+
)
|
|
145
|
+
else:
|
|
146
|
+
self.console.print(f"[red]❌[/red] Commit failed: {result.stderr}")
|
|
147
|
+
return False
|
|
148
|
+
|
|
103
149
|
def push(self) -> bool:
|
|
104
150
|
try:
|
|
105
151
|
result = self._run_git_command(["push"])
|
|
@@ -72,7 +72,7 @@ class InitializationService:
|
|
|
72
72
|
"pyproject.toml": "smart_merge",
|
|
73
73
|
"CLAUDE.md": "smart_append",
|
|
74
74
|
"RULES.md": "replace_if_missing",
|
|
75
|
-
"mcp.json": "special", # Special handling: mcp.json -> .mcp.json with merging
|
|
75
|
+
"example.mcp.json": "special", # Special handling: example.mcp.json -> .mcp.json with merging
|
|
76
76
|
}
|
|
77
77
|
|
|
78
78
|
def _process_config_file(
|
|
@@ -84,8 +84,8 @@ class InitializationService:
|
|
|
84
84
|
force: bool,
|
|
85
85
|
results: dict[str, t.Any],
|
|
86
86
|
) -> None:
|
|
87
|
-
# Special handling for mcp.json -> .mcp.json
|
|
88
|
-
if file_name == "mcp.json":
|
|
87
|
+
# Special handling for example.mcp.json -> .mcp.json
|
|
88
|
+
if file_name == "example.mcp.json":
|
|
89
89
|
self._process_mcp_config(target_path, force, results)
|
|
90
90
|
return
|
|
91
91
|
|
|
@@ -248,14 +248,14 @@ class InitializationService:
|
|
|
248
248
|
force: bool,
|
|
249
249
|
results: dict[str, t.Any],
|
|
250
250
|
) -> None:
|
|
251
|
-
"""Handle special processing for mcp.json -> .mcp.json with merging."""
|
|
252
|
-
# Source: mcp.json in crackerjack package (contains servers to add to projects)
|
|
253
|
-
source_file = self.pkg_path / "mcp.json"
|
|
251
|
+
"""Handle special processing for example.mcp.json -> .mcp.json with merging."""
|
|
252
|
+
# Source: example.mcp.json in crackerjack package (contains servers to add to projects)
|
|
253
|
+
source_file = self.pkg_path / "example.mcp.json"
|
|
254
254
|
# Target: .mcp.json in target project
|
|
255
255
|
target_file = target_path / ".mcp.json"
|
|
256
256
|
|
|
257
257
|
if not source_file.exists():
|
|
258
|
-
self._handle_missing_source_file("mcp.json", results)
|
|
258
|
+
self._handle_missing_source_file("example.mcp.json", results)
|
|
259
259
|
return
|
|
260
260
|
|
|
261
261
|
try:
|
|
@@ -265,8 +265,8 @@ class InitializationService:
|
|
|
265
265
|
|
|
266
266
|
if not isinstance(source_config.get("mcpServers"), dict):
|
|
267
267
|
self._handle_file_processing_error(
|
|
268
|
-
"mcp.json",
|
|
269
|
-
ValueError("Invalid mcp.json format: missing mcpServers"),
|
|
268
|
+
"example.mcp.json",
|
|
269
|
+
ValueError("Invalid example.mcp.json format: missing mcpServers"),
|
|
270
270
|
results,
|
|
271
271
|
)
|
|
272
272
|
return
|
|
@@ -543,6 +543,7 @@ python -m crackerjack -a patch
|
|
|
543
543
|
self._smart_merge_pre_commit_config(
|
|
544
544
|
source_file,
|
|
545
545
|
target_file,
|
|
546
|
+
project_name,
|
|
546
547
|
force,
|
|
547
548
|
results,
|
|
548
549
|
)
|
|
@@ -584,7 +585,7 @@ python -m crackerjack -a patch
|
|
|
584
585
|
self._ensure_crackerjack_dev_dependency(target_config, source_config)
|
|
585
586
|
|
|
586
587
|
# 2. Merge tool configurations
|
|
587
|
-
self._merge_tool_configurations(target_config, source_config)
|
|
588
|
+
self._merge_tool_configurations(target_config, source_config, project_name)
|
|
588
589
|
|
|
589
590
|
# 3. Remove any fixed coverage requirements (use ratchet system instead)
|
|
590
591
|
self._remove_fixed_coverage_requirements(target_config)
|
|
@@ -638,6 +639,7 @@ python -m crackerjack -a patch
|
|
|
638
639
|
self,
|
|
639
640
|
target_config: dict[str, t.Any],
|
|
640
641
|
source_config: dict[str, t.Any],
|
|
642
|
+
project_name: str,
|
|
641
643
|
) -> None:
|
|
642
644
|
"""Merge tool configurations, preserving existing settings."""
|
|
643
645
|
source_tools = source_config.get("tool", {})
|
|
@@ -662,8 +664,10 @@ python -m crackerjack -a patch
|
|
|
662
664
|
for tool_name in tools_to_merge:
|
|
663
665
|
if tool_name in source_tools:
|
|
664
666
|
if tool_name not in target_tools:
|
|
665
|
-
# Tool missing, add it
|
|
666
|
-
target_tools[tool_name] =
|
|
667
|
+
# Tool missing, add it with project-name replacement
|
|
668
|
+
target_tools[tool_name] = self._replace_project_name_in_tool_config(
|
|
669
|
+
source_tools[tool_name], project_name
|
|
670
|
+
)
|
|
667
671
|
self.console.print(
|
|
668
672
|
f"[green]➕[/green] Added [tool.{tool_name}] configuration",
|
|
669
673
|
)
|
|
@@ -673,6 +677,7 @@ python -m crackerjack -a patch
|
|
|
673
677
|
target_tools[tool_name],
|
|
674
678
|
source_tools[tool_name],
|
|
675
679
|
tool_name,
|
|
680
|
+
project_name,
|
|
676
681
|
)
|
|
677
682
|
|
|
678
683
|
# Special handling for pytest.ini_options markers
|
|
@@ -683,13 +688,16 @@ python -m crackerjack -a patch
|
|
|
683
688
|
target_tool: dict[str, t.Any],
|
|
684
689
|
source_tool: dict[str, t.Any],
|
|
685
690
|
tool_name: str,
|
|
691
|
+
project_name: str,
|
|
686
692
|
) -> None:
|
|
687
693
|
"""Merge individual tool settings."""
|
|
688
694
|
updated_keys = []
|
|
689
695
|
|
|
690
696
|
for key, value in source_tool.items():
|
|
691
697
|
if key not in target_tool:
|
|
692
|
-
target_tool[key] =
|
|
698
|
+
target_tool[key] = self._replace_project_name_in_config_value(
|
|
699
|
+
value, project_name
|
|
700
|
+
)
|
|
693
701
|
updated_keys.append(key)
|
|
694
702
|
|
|
695
703
|
if updated_keys:
|
|
@@ -779,6 +787,7 @@ python -m crackerjack -a patch
|
|
|
779
787
|
self,
|
|
780
788
|
source_file: Path,
|
|
781
789
|
target_file: Path,
|
|
790
|
+
project_name: str,
|
|
782
791
|
force: bool,
|
|
783
792
|
results: dict[str, t.Any],
|
|
784
793
|
) -> None:
|
|
@@ -788,8 +797,12 @@ python -m crackerjack -a patch
|
|
|
788
797
|
source_config = yaml.safe_load(f)
|
|
789
798
|
|
|
790
799
|
if not target_file.exists():
|
|
791
|
-
# No existing file,
|
|
792
|
-
content =
|
|
800
|
+
# No existing file, copy with project-specific replacements
|
|
801
|
+
content = self._read_and_process_content(
|
|
802
|
+
source_file,
|
|
803
|
+
True, # should_replace
|
|
804
|
+
project_name,
|
|
805
|
+
)
|
|
793
806
|
# Clean trailing whitespace and ensure single trailing newline
|
|
794
807
|
from crackerjack.services.filesystem import FileSystemService
|
|
795
808
|
|
|
@@ -871,3 +884,39 @@ python -m crackerjack -a patch
|
|
|
871
884
|
)
|
|
872
885
|
else:
|
|
873
886
|
self._skip_existing_file(".pre-commit-config.yaml (no new repos)", results)
|
|
887
|
+
|
|
888
|
+
def _replace_project_name_in_tool_config(
|
|
889
|
+
self, tool_config: dict[str, t.Any], project_name: str
|
|
890
|
+
) -> dict[str, t.Any]:
|
|
891
|
+
"""Replace project name in entire tool configuration."""
|
|
892
|
+
if project_name == "crackerjack":
|
|
893
|
+
return tool_config # No replacement needed
|
|
894
|
+
|
|
895
|
+
# Deep copy to avoid modifying original
|
|
896
|
+
import copy
|
|
897
|
+
|
|
898
|
+
result = copy.deepcopy(tool_config)
|
|
899
|
+
|
|
900
|
+
# Recursively replace in the configuration
|
|
901
|
+
return self._replace_project_name_in_config_value(result, project_name)
|
|
902
|
+
|
|
903
|
+
def _replace_project_name_in_config_value(
|
|
904
|
+
self, value: t.Any, project_name: str
|
|
905
|
+
) -> t.Any:
|
|
906
|
+
"""Replace project name in a configuration value (recursive)."""
|
|
907
|
+
if project_name == "crackerjack":
|
|
908
|
+
return value # No replacement needed
|
|
909
|
+
|
|
910
|
+
if isinstance(value, str):
|
|
911
|
+
return value.replace("crackerjack", project_name)
|
|
912
|
+
elif isinstance(value, list):
|
|
913
|
+
return [
|
|
914
|
+
self._replace_project_name_in_config_value(item, project_name)
|
|
915
|
+
for item in value
|
|
916
|
+
]
|
|
917
|
+
elif isinstance(value, dict):
|
|
918
|
+
return {
|
|
919
|
+
key: self._replace_project_name_in_config_value(val, project_name)
|
|
920
|
+
for key, val in value.items()
|
|
921
|
+
}
|
|
922
|
+
return value # Numbers, booleans, etc. - no replacement needed
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: crackerjack
|
|
3
|
-
Version: 0.31.
|
|
3
|
+
Version: 0.31.10
|
|
4
4
|
Summary: Opinionated Python project management tool
|
|
5
5
|
Project-URL: documentation, https://github.com/lesleslie/crackerjack
|
|
6
6
|
Project-URL: homepage, https://github.com/lesleslie/crackerjack
|
|
@@ -32,19 +32,19 @@ crackerjack/cli/utils.py,sha256=VHR-PALgA8fsKiPytH0cl8feXrtWKCajjk9TS2piK5w,537
|
|
|
32
32
|
crackerjack/config/__init__.py,sha256=b0481N2f_JvGufMPcbo5IXu2VjYd111r1BHw0oD3x7o,330
|
|
33
33
|
crackerjack/config/hooks.py,sha256=6DHJkWRL5c5Ip2bw0tevRt_xzRFKSfeO7tQkGtoZtjs,5367
|
|
34
34
|
crackerjack/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
35
|
-
crackerjack/core/async_workflow_orchestrator.py,sha256=
|
|
35
|
+
crackerjack/core/async_workflow_orchestrator.py,sha256=r7E-F6hv5MvhcYFB2VB1HgBaOgDJZJXIGN9DCXk4pmo,21660
|
|
36
36
|
crackerjack/core/autofix_coordinator.py,sha256=txbXTeGm-gJwuyRekVDbvvKgG0gQ1GqB3BeGdQ8vqpY,7921
|
|
37
37
|
crackerjack/core/container.py,sha256=e9_1YnWHijUJ0yl23lpgf9mennVQy8NkgJBKtZstG-M,2889
|
|
38
38
|
crackerjack/core/enhanced_container.py,sha256=fl5XvhNY0fzDnD5hSr16yQXGtL_AW01Wf4F4PL1L0P4,18169
|
|
39
39
|
crackerjack/core/performance.py,sha256=sL9g2-_JflofnsXW6LUCwp9MaUDQprfV8lSG18s9mns,7601
|
|
40
|
-
crackerjack/core/phase_coordinator.py,sha256=
|
|
40
|
+
crackerjack/core/phase_coordinator.py,sha256=L0-OXrxNDfcqSQnhyaeXot1fDk_zdHEWBl-thcbz65Y,21244
|
|
41
41
|
crackerjack/core/proactive_workflow.py,sha256=ML1amNJI4Gx0dFJK5AKdvB0zNc1chbq-ZyqnhUi4tms,12677
|
|
42
42
|
crackerjack/core/session_coordinator.py,sha256=hJKLthZBzX7fXm8AmNMFLEjITNmKxDGqM58Om6p7fr0,9893
|
|
43
|
-
crackerjack/core/workflow_orchestrator.py,sha256=
|
|
43
|
+
crackerjack/core/workflow_orchestrator.py,sha256=YHG-qp8J76fqj_poGRvTvuimljbJBvVwPT1r1OjF95M,38459
|
|
44
44
|
crackerjack/executors/__init__.py,sha256=HF-DmXvKN45uKKDdiMxOT9bYxuy1B-Z91BihOhkK5lg,322
|
|
45
45
|
crackerjack/executors/async_hook_executor.py,sha256=3U-AHToGNBojnlDsXK6HLv4CfJvv64UqTmCWYAoLcb8,15958
|
|
46
46
|
crackerjack/executors/cached_hook_executor.py,sha256=LyrFINWbixB-0xEnaU0F2ZUBFUWrAdaTKvj_JW1Wss0,8186
|
|
47
|
-
crackerjack/executors/hook_executor.py,sha256=
|
|
47
|
+
crackerjack/executors/hook_executor.py,sha256=chdZg_DdPRq4q8P0Ed7v2L5q2X6Z9HVZsXfwVf0oj1I,13608
|
|
48
48
|
crackerjack/executors/individual_hook_executor.py,sha256=Fm58XlKtGAzTvkvFIOv7ILmN01QQy6QQeScKZ84mgFw,23526
|
|
49
49
|
crackerjack/intelligence/__init__.py,sha256=v5ovNZ9jLENYp4sizRINqxLw63b23lvzg6bGk8d0s-M,1106
|
|
50
50
|
crackerjack/intelligence/adaptive_learning.py,sha256=VvOAYAEyuKvwSwhSOikuVgGlwSpOzvtaJFIkJuemQtQ,28687
|
|
@@ -58,7 +58,7 @@ crackerjack/managers/hook_manager.py,sha256=3qAKLYqoJGPJ8NAUB1KEoWHZafjs6564P9ud
|
|
|
58
58
|
crackerjack/managers/publish_manager.py,sha256=7bBXkaHm1Ou-tMLvUqNQZScp_onZ2SJgB0o3kwThUDE,16084
|
|
59
59
|
crackerjack/managers/test_command_builder.py,sha256=MdDz9AYSLOoLmI-8zoq3zd2SXF3DeuIkANa76h9cINI,5295
|
|
60
60
|
crackerjack/managers/test_executor.py,sha256=XCMuJPssTV2Glb0hDPHvoDbVrJHVJpZrsAIltegMMfE,16337
|
|
61
|
-
crackerjack/managers/test_manager.py,sha256=
|
|
61
|
+
crackerjack/managers/test_manager.py,sha256=4HfnkWy2Sqbu-YW9zkIK0UNc1bnk0SUspK0ldg5RFGI,11387
|
|
62
62
|
crackerjack/managers/test_manager_backup.py,sha256=tptpX99nw-caLJMVga4Hss7grJRqcFHz1JkRBqro4sE,41307
|
|
63
63
|
crackerjack/managers/test_progress.py,sha256=gCNKdE7Bh7RS3K7Ekj2MGKCqiY4px54AzPNi0gMhAL0,3908
|
|
64
64
|
crackerjack/mcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -88,7 +88,7 @@ crackerjack/mcp/tools/monitoring_tools.py,sha256=tcMIc0-qj35QbQp-5fNM47tnyLmjieY
|
|
|
88
88
|
crackerjack/mcp/tools/proactive_tools.py,sha256=0ax3clOEJ9yoHTTnvE3IFoutjoDZIpvFgIBowL6ANfU,14067
|
|
89
89
|
crackerjack/mcp/tools/progress_tools.py,sha256=l9MzVRUxO19FNpHn67zWbhyUeXyJOVxlnAQLJiVJBIg,6822
|
|
90
90
|
crackerjack/mcp/tools/utility_tools.py,sha256=MIv1CGR8vLeQEdKzR8xsNYj5A9TG_LXWp86drdKReXY,11849
|
|
91
|
-
crackerjack/mcp/tools/workflow_executor.py,sha256=
|
|
91
|
+
crackerjack/mcp/tools/workflow_executor.py,sha256=JQSWmhxQBL4p7AwHUcBYBfbCBEGFO7OSyQFAKYuD0uM,17855
|
|
92
92
|
crackerjack/mcp/websocket/__init__.py,sha256=lZzyfvYjywHfqyy5X3wWR_jgBkRUxYSpgjdKALBzZcI,345
|
|
93
93
|
crackerjack/mcp/websocket/app.py,sha256=AXijAplfW-8NwWDrOON30Ol5qcMKdc64npTY2YEkX8s,1193
|
|
94
94
|
crackerjack/mcp/websocket/endpoints.py,sha256=L5zZzqhZtLFKF-Eh928jnwQIAIwunBSMrIaBoyaOaAE,20888
|
|
@@ -117,15 +117,15 @@ crackerjack/services/cache.py,sha256=LKZIa8xZ6SzwdTBttO1W6VEBxaTgMNI5Paz_IgrqoaI
|
|
|
117
117
|
crackerjack/services/config.py,sha256=BrIJoKKi0qyQg2lcZZhTTUpAVB6ej6gkbPv95o2rVvc,14050
|
|
118
118
|
crackerjack/services/config_integrity.py,sha256=wn6b0x90wXNRSj64J_kOQ5JMQvG_9FwtOno0ITVJGwM,3416
|
|
119
119
|
crackerjack/services/contextual_ai_assistant.py,sha256=QnJvJNdGimk-u6XxPm9D7F516AmbueNnAmky42D2caQ,19206
|
|
120
|
-
crackerjack/services/coverage_ratchet.py,sha256=
|
|
120
|
+
crackerjack/services/coverage_ratchet.py,sha256=XM4iCe_Bckh1j378FgAG2zlbiJNCF0VeH1qVWG-fu1s,13731
|
|
121
121
|
crackerjack/services/debug.py,sha256=crjsUZwVjP92aCEOEmtpEJwNf5gtlwNeZF_eB9JSfiI,24018
|
|
122
122
|
crackerjack/services/dependency_monitor.py,sha256=axBXFGBdezoPK9ph5_ZGxIwhSJhurgdvCSiuaCWSrKY,21085
|
|
123
123
|
crackerjack/services/enhanced_filesystem.py,sha256=MQj5zqvkNc5U6ZUmSVzgFQWKfnizD1lv4SJ2pt-w8W4,15424
|
|
124
124
|
crackerjack/services/file_hasher.py,sha256=vHSJ6QbWU5Q5JPLYuQkyRMRXCpDC_hsxToaM83vI58U,5201
|
|
125
125
|
crackerjack/services/filesystem.py,sha256=Re5VyP7H8W6T2tpDakoaghEivdR2VmshJgnZ9Y3QkH8,17932
|
|
126
|
-
crackerjack/services/git.py,sha256=
|
|
126
|
+
crackerjack/services/git.py,sha256=ZsIrsYhTuDyRSz4Lhtvh2cEFose7EPSpuDg74Vh8npI,8613
|
|
127
127
|
crackerjack/services/health_metrics.py,sha256=M2OBqwwnGvnJB3eXIXXh5SgMuckYCjHIrD0RkYFAbQU,21458
|
|
128
|
-
crackerjack/services/initialization.py,sha256=
|
|
128
|
+
crackerjack/services/initialization.py,sha256=l3CagjRvBD9AQWkzD_JWQciSCQpYOPduRkPdUprE6MA,33455
|
|
129
129
|
crackerjack/services/log_manager.py,sha256=deM_i97biZVyuZJoHaGlnBitc5QV4WaaZHEb70N5LV0,8388
|
|
130
130
|
crackerjack/services/logging.py,sha256=c15gVCLR_yRhqaza7f1pLLYL-xQ3Oi_OMWL_mR5G46k,5354
|
|
131
131
|
crackerjack/services/metrics.py,sha256=kInkb2G0ML8hAtmEG1jK04b-F1hT_fZjHvYJKisyr1Y,22894
|
|
@@ -142,8 +142,8 @@ crackerjack/slash_commands/__init__.py,sha256=ZHfKjluj9dX88zDYN6Saj7tGUMdMnh37Q8
|
|
|
142
142
|
crackerjack/slash_commands/init.md,sha256=mANRdCiFAzaTw29lKNrI1JFthK4pxVdtiFC5lN2SDSQ,4581
|
|
143
143
|
crackerjack/slash_commands/run.md,sha256=bf_mEtnXagUuw3w8os5h3t1Yi3vjpfiNbkMJvuFEu-Y,6500
|
|
144
144
|
crackerjack/slash_commands/status.md,sha256=U3qqppVLtIIm2lEiMYaKagaHYLI9UplL7OH1j6SRJGw,3921
|
|
145
|
-
crackerjack-0.31.
|
|
146
|
-
crackerjack-0.31.
|
|
147
|
-
crackerjack-0.31.
|
|
148
|
-
crackerjack-0.31.
|
|
149
|
-
crackerjack-0.31.
|
|
145
|
+
crackerjack-0.31.10.dist-info/METADATA,sha256=4VBwoRFPEEXi1M6Lh6wY7wCEFTAmVKnyRowdCjDT4V8,22438
|
|
146
|
+
crackerjack-0.31.10.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
147
|
+
crackerjack-0.31.10.dist-info/entry_points.txt,sha256=AJKNft0WXm9xoGUJ3Trl-iXHOWxRAYbagQiza3AILr4,57
|
|
148
|
+
crackerjack-0.31.10.dist-info/licenses/LICENSE,sha256=fDt371P6_6sCu7RyqiZH_AhT1LdN3sN1zjBtqEhDYCk,1531
|
|
149
|
+
crackerjack-0.31.10.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|