crackerjack 0.37.9__py3-none-any.whl → 0.38.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crackerjack might be problematic. Click here for more details.
- crackerjack/__init__.py +1 -1
- crackerjack/api.py +1 -2
- crackerjack/core/workflow_orchestrator.py +291 -161
- crackerjack/documentation/reference_generator.py +104 -59
- crackerjack/dynamic_config.py +21 -9
- crackerjack/interactive.py +7 -5
- crackerjack/plugins/hooks.py +1 -1
- {crackerjack-0.37.9.dist-info → crackerjack-0.38.0.dist-info}/METADATA +2 -2
- {crackerjack-0.37.9.dist-info → crackerjack-0.38.0.dist-info}/RECORD +12 -12
- {crackerjack-0.37.9.dist-info → crackerjack-0.38.0.dist-info}/WHEEL +0 -0
- {crackerjack-0.37.9.dist-info → crackerjack-0.38.0.dist-info}/entry_points.txt +0 -0
- {crackerjack-0.37.9.dist-info → crackerjack-0.38.0.dist-info}/licenses/LICENSE +0 -0
crackerjack/__init__.py
CHANGED
crackerjack/api.py
CHANGED
|
@@ -8,8 +8,7 @@ from rich.console import Console
|
|
|
8
8
|
from .code_cleaner import CleaningResult, CodeCleaner, PackageCleaningResult
|
|
9
9
|
from .core.workflow_orchestrator import WorkflowOrchestrator
|
|
10
10
|
from .errors import CrackerjackError, ErrorCode
|
|
11
|
-
from .interactive import InteractiveCLI
|
|
12
|
-
from .interactive import WorkflowOptions as InteractiveWorkflowOptions
|
|
11
|
+
from .interactive import InteractiveCLI, InteractiveWorkflowOptions
|
|
13
12
|
from .models.config import WorkflowOptions
|
|
14
13
|
from .services.regex_patterns import SAFE_PATTERNS
|
|
15
14
|
|
|
@@ -169,22 +169,33 @@ class WorkflowPipeline:
|
|
|
169
169
|
|
|
170
170
|
def _initialize_zuban_lsp(self, options: OptionsProtocol) -> None:
|
|
171
171
|
"""Initialize Zuban LSP server if not disabled."""
|
|
172
|
-
|
|
172
|
+
if self._should_skip_zuban_lsp(options):
|
|
173
|
+
return
|
|
174
|
+
|
|
175
|
+
if self._is_zuban_lsp_already_running():
|
|
176
|
+
return
|
|
177
|
+
|
|
178
|
+
self._start_zuban_lsp_server(options)
|
|
179
|
+
|
|
180
|
+
def _should_skip_zuban_lsp(self, options: OptionsProtocol) -> bool:
|
|
181
|
+
"""Check if Zuban LSP server should be skipped."""
|
|
173
182
|
if getattr(options, "no_zuban_lsp", False):
|
|
174
183
|
self.logger.debug("Zuban LSP server disabled by --no-zuban-lsp flag")
|
|
175
|
-
return
|
|
184
|
+
return True
|
|
176
185
|
|
|
177
|
-
# Get configuration from options (will use config system if available)
|
|
178
186
|
config = getattr(options, "zuban_lsp", None)
|
|
179
187
|
if config and not config.enabled:
|
|
180
188
|
self.logger.debug("Zuban LSP server disabled in configuration")
|
|
181
|
-
return
|
|
189
|
+
return True
|
|
182
190
|
|
|
183
191
|
if config and not config.auto_start:
|
|
184
192
|
self.logger.debug("Zuban LSP server auto-start disabled in configuration")
|
|
185
|
-
return
|
|
193
|
+
return True
|
|
186
194
|
|
|
187
|
-
|
|
195
|
+
return False
|
|
196
|
+
|
|
197
|
+
def _is_zuban_lsp_already_running(self) -> bool:
|
|
198
|
+
"""Check if LSP server is already running to avoid duplicates."""
|
|
188
199
|
from crackerjack.services.server_manager import find_zuban_lsp_processes
|
|
189
200
|
|
|
190
201
|
existing_processes = find_zuban_lsp_processes()
|
|
@@ -192,20 +203,17 @@ class WorkflowPipeline:
|
|
|
192
203
|
self.logger.debug(
|
|
193
204
|
f"Zuban LSP server already running (PID: {existing_processes[0]['pid']})"
|
|
194
205
|
)
|
|
195
|
-
return
|
|
206
|
+
return True
|
|
207
|
+
return False
|
|
196
208
|
|
|
197
|
-
|
|
209
|
+
def _start_zuban_lsp_server(self, options: OptionsProtocol) -> None:
|
|
210
|
+
"""Start the Zuban LSP server in background."""
|
|
198
211
|
try:
|
|
199
212
|
import subprocess
|
|
200
213
|
import sys
|
|
201
214
|
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
zuban_lsp_port = config.port
|
|
205
|
-
zuban_lsp_mode = config.mode
|
|
206
|
-
else:
|
|
207
|
-
zuban_lsp_port = getattr(options, "zuban_lsp_port", 8677)
|
|
208
|
-
zuban_lsp_mode = getattr(options, "zuban_lsp_mode", "stdio")
|
|
215
|
+
config = getattr(options, "zuban_lsp", None)
|
|
216
|
+
zuban_lsp_port, zuban_lsp_mode = self._get_zuban_lsp_config(options, config)
|
|
209
217
|
|
|
210
218
|
cmd = [
|
|
211
219
|
sys.executable,
|
|
@@ -232,6 +240,17 @@ class WorkflowPipeline:
|
|
|
232
240
|
except Exception as e:
|
|
233
241
|
self.logger.warning(f"Failed to auto-start Zuban LSP server: {e}")
|
|
234
242
|
|
|
243
|
+
def _get_zuban_lsp_config(
|
|
244
|
+
self, options: OptionsProtocol, config: any
|
|
245
|
+
) -> tuple[int, str]:
|
|
246
|
+
"""Get Zuban LSP configuration values."""
|
|
247
|
+
if config:
|
|
248
|
+
return config.port, config.mode
|
|
249
|
+
return (
|
|
250
|
+
getattr(options, "zuban_lsp_port", 8677),
|
|
251
|
+
getattr(options, "zuban_lsp_mode", "stdio"),
|
|
252
|
+
)
|
|
253
|
+
|
|
235
254
|
def _log_zuban_lsp_status(self) -> None:
|
|
236
255
|
"""Display current Zuban LSP server status during workflow startup."""
|
|
237
256
|
from crackerjack.services.server_manager import find_zuban_lsp_processes
|
|
@@ -351,36 +370,9 @@ class WorkflowPipeline:
|
|
|
351
370
|
return
|
|
352
371
|
|
|
353
372
|
try:
|
|
354
|
-
|
|
355
|
-
{
|
|
356
|
-
"workflow_id": workflow_id,
|
|
357
|
-
"total_duration": duration,
|
|
358
|
-
"success": success,
|
|
359
|
-
"cache_metrics": self._cache.get_stats() if self._cache else {},
|
|
360
|
-
"memory_metrics": self._memory_optimizer.get_stats()
|
|
361
|
-
if hasattr(self._memory_optimizer, "get_stats")
|
|
362
|
-
else {},
|
|
363
|
-
}
|
|
364
|
-
|
|
365
|
-
# Generate benchmark comparison
|
|
373
|
+
self._gather_performance_metrics(workflow_id, duration, success)
|
|
366
374
|
benchmark_results = await self._performance_benchmarks.run_benchmark_suite()
|
|
367
|
-
|
|
368
|
-
# Display compact performance summary
|
|
369
|
-
if benchmark_results:
|
|
370
|
-
self.console.print("\n[cyan]📊 Performance Benchmark Summary[/cyan]")
|
|
371
|
-
self.console.print(f"Workflow Duration: [bold]{duration:.2f}s[/bold]")
|
|
372
|
-
|
|
373
|
-
# Show key performance improvements if available
|
|
374
|
-
for result in benchmark_results.results[:3]: # Top 3 results
|
|
375
|
-
if result.time_improvement_percentage > 0:
|
|
376
|
-
self.console.print(
|
|
377
|
-
f"[green]⚡[/green] {result.test_name}: {result.time_improvement_percentage:.1f}% faster"
|
|
378
|
-
)
|
|
379
|
-
|
|
380
|
-
if result.cache_hit_ratio > 0:
|
|
381
|
-
self.console.print(
|
|
382
|
-
f"[blue]🎯[/blue] Cache efficiency: {result.cache_hit_ratio:.0%}"
|
|
383
|
-
)
|
|
375
|
+
self._display_benchmark_results(benchmark_results, duration)
|
|
384
376
|
|
|
385
377
|
except Exception as e:
|
|
386
378
|
self.console.print(
|
|
@@ -390,6 +382,52 @@ class WorkflowPipeline:
|
|
|
390
382
|
if self.debugger.enabled:
|
|
391
383
|
self.debugger.print_debug_summary()
|
|
392
384
|
|
|
385
|
+
def _gather_performance_metrics(
|
|
386
|
+
self, workflow_id: str, duration: float, success: bool
|
|
387
|
+
) -> dict:
|
|
388
|
+
"""Gather performance metrics from workflow execution."""
|
|
389
|
+
return {
|
|
390
|
+
"workflow_id": workflow_id,
|
|
391
|
+
"total_duration": duration,
|
|
392
|
+
"success": success,
|
|
393
|
+
"cache_metrics": self._cache.get_stats() if self._cache else {},
|
|
394
|
+
"memory_metrics": self._memory_optimizer.get_stats()
|
|
395
|
+
if hasattr(self._memory_optimizer, "get_stats")
|
|
396
|
+
else {},
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
def _display_benchmark_results(
|
|
400
|
+
self, benchmark_results: t.Any, duration: float
|
|
401
|
+
) -> None:
|
|
402
|
+
"""Display compact performance summary."""
|
|
403
|
+
if not benchmark_results:
|
|
404
|
+
return
|
|
405
|
+
|
|
406
|
+
self.console.print("\n[cyan]📊 Performance Benchmark Summary[/cyan]")
|
|
407
|
+
self.console.print(f"Workflow Duration: [bold]{duration:.2f}s[/bold]")
|
|
408
|
+
|
|
409
|
+
self._show_performance_improvements(benchmark_results)
|
|
410
|
+
|
|
411
|
+
def _show_performance_improvements(self, benchmark_results: t.Any) -> None:
|
|
412
|
+
"""Show key performance improvements from benchmark results."""
|
|
413
|
+
for result in benchmark_results.results[:3]: # Top 3 results
|
|
414
|
+
self._display_time_improvement(result)
|
|
415
|
+
self._display_cache_efficiency(result)
|
|
416
|
+
|
|
417
|
+
def _display_time_improvement(self, result: t.Any) -> None:
|
|
418
|
+
"""Display time improvement percentage if available."""
|
|
419
|
+
if result.time_improvement_percentage > 0:
|
|
420
|
+
self.console.print(
|
|
421
|
+
f"[green]⚡[/green] {result.test_name}: {result.time_improvement_percentage:.1f}% faster"
|
|
422
|
+
)
|
|
423
|
+
|
|
424
|
+
def _display_cache_efficiency(self, result: t.Any) -> None:
|
|
425
|
+
"""Display cache hit ratio if available."""
|
|
426
|
+
if result.cache_hit_ratio > 0:
|
|
427
|
+
self.console.print(
|
|
428
|
+
f"[blue]🎯[/blue] Cache efficiency: {result.cache_hit_ratio:.0%}"
|
|
429
|
+
)
|
|
430
|
+
|
|
393
431
|
def _handle_user_interruption(self) -> bool:
|
|
394
432
|
self.console.print("Interrupted by user")
|
|
395
433
|
self.session.fail_task("workflow", "Interrupted by user")
|
|
@@ -525,40 +563,52 @@ class WorkflowPipeline:
|
|
|
525
563
|
if not self._quality_intelligence:
|
|
526
564
|
return "Quality intelligence not available"
|
|
527
565
|
|
|
528
|
-
# Analyze recent quality trends and anomalies
|
|
529
566
|
anomalies = self._quality_intelligence.detect_anomalies()
|
|
530
567
|
patterns = self._quality_intelligence.identify_patterns()
|
|
531
568
|
|
|
532
|
-
|
|
533
|
-
recommendations = []
|
|
534
|
-
if anomalies:
|
|
535
|
-
high_severity_anomalies = [
|
|
536
|
-
a for a in anomalies if a.severity.name in ("CRITICAL", "HIGH")
|
|
537
|
-
]
|
|
538
|
-
if high_severity_anomalies:
|
|
539
|
-
recommendations.append(
|
|
540
|
-
"comprehensive analysis recommended due to quality anomalies"
|
|
541
|
-
)
|
|
542
|
-
else:
|
|
543
|
-
recommendations.append("standard quality checks sufficient")
|
|
544
|
-
|
|
545
|
-
if patterns:
|
|
546
|
-
improving_patterns = [
|
|
547
|
-
p for p in patterns if p.trend_direction.name == "IMPROVING"
|
|
548
|
-
]
|
|
549
|
-
if improving_patterns:
|
|
550
|
-
recommendations.append("quality trending upward")
|
|
551
|
-
else:
|
|
552
|
-
recommendations.append("quality monitoring active")
|
|
553
|
-
|
|
554
|
-
if not recommendations:
|
|
555
|
-
recommendations.append("baseline quality analysis active")
|
|
556
|
-
|
|
569
|
+
recommendations = self._build_quality_recommendations(anomalies, patterns)
|
|
557
570
|
return "; ".join(recommendations)
|
|
558
571
|
|
|
559
572
|
except Exception as e:
|
|
560
573
|
return f"Quality intelligence analysis failed: {str(e)[:50]}..."
|
|
561
574
|
|
|
575
|
+
def _build_quality_recommendations(
|
|
576
|
+
self, anomalies: t.Any, patterns: t.Any
|
|
577
|
+
) -> list[str]:
|
|
578
|
+
"""Build quality recommendations based on anomalies and patterns."""
|
|
579
|
+
recommendations = []
|
|
580
|
+
|
|
581
|
+
if anomalies:
|
|
582
|
+
recommendations.extend(self._analyze_anomalies(anomalies))
|
|
583
|
+
|
|
584
|
+
if patterns:
|
|
585
|
+
recommendations.extend(self._analyze_patterns(patterns))
|
|
586
|
+
|
|
587
|
+
if not recommendations:
|
|
588
|
+
recommendations.append("baseline quality analysis active")
|
|
589
|
+
|
|
590
|
+
return recommendations
|
|
591
|
+
|
|
592
|
+
def _analyze_anomalies(self, anomalies: t.Any) -> list[str]:
|
|
593
|
+
"""Analyze anomalies and return recommendations."""
|
|
594
|
+
high_severity_anomalies = [
|
|
595
|
+
a for a in anomalies if a.severity.name in ("CRITICAL", "HIGH")
|
|
596
|
+
]
|
|
597
|
+
|
|
598
|
+
if high_severity_anomalies:
|
|
599
|
+
return ["comprehensive analysis recommended due to quality anomalies"]
|
|
600
|
+
return ["standard quality checks sufficient"]
|
|
601
|
+
|
|
602
|
+
def _analyze_patterns(self, patterns: t.Any) -> list[str]:
|
|
603
|
+
"""Analyze patterns and return recommendations."""
|
|
604
|
+
improving_patterns = [
|
|
605
|
+
p for p in patterns if p.trend_direction.name == "IMPROVING"
|
|
606
|
+
]
|
|
607
|
+
|
|
608
|
+
if improving_patterns:
|
|
609
|
+
return ["quality trending upward"]
|
|
610
|
+
return ["quality monitoring active"]
|
|
611
|
+
|
|
562
612
|
async def _execute_test_workflow(
|
|
563
613
|
self, options: OptionsProtocol, workflow_id: str
|
|
564
614
|
) -> bool:
|
|
@@ -886,27 +936,36 @@ class WorkflowPipeline:
|
|
|
886
936
|
def _execute_standard_hooks_workflow(self, options: OptionsProtocol) -> bool:
|
|
887
937
|
self._update_hooks_status_running()
|
|
888
938
|
|
|
889
|
-
|
|
890
|
-
if not fast_hooks_success:
|
|
939
|
+
if not self._execute_fast_hooks_workflow(options):
|
|
891
940
|
self._handle_hooks_completion(False)
|
|
892
941
|
return False
|
|
893
942
|
|
|
894
|
-
if
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
return False
|
|
898
|
-
|
|
899
|
-
if not self._run_post_cleaning_fast_hooks(options):
|
|
900
|
-
self._handle_hooks_completion(False)
|
|
901
|
-
return False
|
|
902
|
-
self._mark_code_cleaning_complete()
|
|
943
|
+
if not self._execute_cleaning_workflow_if_needed(options):
|
|
944
|
+
self._handle_hooks_completion(False)
|
|
945
|
+
return False
|
|
903
946
|
|
|
904
947
|
comprehensive_success = self._run_comprehensive_hooks_phase(options)
|
|
948
|
+
self._handle_hooks_completion(comprehensive_success)
|
|
949
|
+
|
|
950
|
+
return comprehensive_success
|
|
905
951
|
|
|
906
|
-
|
|
907
|
-
|
|
952
|
+
def _execute_fast_hooks_workflow(self, options: OptionsProtocol) -> bool:
|
|
953
|
+
"""Execute fast hooks phase."""
|
|
954
|
+
return self._run_fast_hooks_phase(options)
|
|
908
955
|
|
|
909
|
-
|
|
956
|
+
def _execute_cleaning_workflow_if_needed(self, options: OptionsProtocol) -> bool:
|
|
957
|
+
"""Execute cleaning workflow if requested."""
|
|
958
|
+
if not getattr(options, "clean", False):
|
|
959
|
+
return True
|
|
960
|
+
|
|
961
|
+
if not self._run_code_cleaning_phase(options):
|
|
962
|
+
return False
|
|
963
|
+
|
|
964
|
+
if not self._run_post_cleaning_fast_hooks(options):
|
|
965
|
+
return False
|
|
966
|
+
|
|
967
|
+
self._mark_code_cleaning_complete()
|
|
968
|
+
return True
|
|
910
969
|
|
|
911
970
|
def _update_hooks_status_running(self) -> None:
|
|
912
971
|
if self._has_mcp_state_manager():
|
|
@@ -1116,15 +1175,16 @@ class WorkflowPipeline:
|
|
|
1116
1175
|
return test_success
|
|
1117
1176
|
|
|
1118
1177
|
def _should_verify_hook_fixes(self, fixes_applied: list[str]) -> bool:
|
|
1119
|
-
hook_fixes = [
|
|
1120
|
-
f
|
|
1121
|
-
for f in fixes_applied
|
|
1122
|
-
if "hook" not in f.lower()
|
|
1123
|
-
or "complexity" in f.lower()
|
|
1124
|
-
or "type" in f.lower()
|
|
1125
|
-
]
|
|
1178
|
+
hook_fixes = [fix for fix in fixes_applied if self._is_hook_related_fix(fix)]
|
|
1126
1179
|
return bool(hook_fixes)
|
|
1127
1180
|
|
|
1181
|
+
def _is_hook_related_fix(self, fix: str) -> bool:
|
|
1182
|
+
"""Check if a fix is related to hooks and should trigger hook verification."""
|
|
1183
|
+
fix_lower = fix.lower()
|
|
1184
|
+
return (
|
|
1185
|
+
"hook" not in fix_lower or "complexity" in fix_lower or "type" in fix_lower
|
|
1186
|
+
)
|
|
1187
|
+
|
|
1128
1188
|
async def _verify_hook_fixes(self, options: OptionsProtocol) -> bool:
|
|
1129
1189
|
self.logger.info("Re-running comprehensive hooks to verify hook fixes")
|
|
1130
1190
|
hook_success = self.phases.run_comprehensive_hooks_only(options)
|
|
@@ -1299,35 +1359,30 @@ class WorkflowPipeline:
|
|
|
1299
1359
|
return issues
|
|
1300
1360
|
|
|
1301
1361
|
def _parse_comprehensive_hook_errors(self, error_msg: str) -> list[Issue]:
|
|
1302
|
-
issues: list[Issue] = []
|
|
1303
1362
|
error_lower = error_msg.lower()
|
|
1363
|
+
error_checkers = self._get_comprehensive_error_checkers()
|
|
1304
1364
|
|
|
1305
|
-
|
|
1306
|
-
|
|
1307
|
-
|
|
1308
|
-
|
|
1309
|
-
|
|
1310
|
-
if type_error_issue:
|
|
1311
|
-
issues.append(type_error_issue)
|
|
1312
|
-
|
|
1313
|
-
security_issue = self._check_security_error(error_lower)
|
|
1314
|
-
if security_issue:
|
|
1315
|
-
issues.append(security_issue)
|
|
1316
|
-
|
|
1317
|
-
performance_issue = self._check_performance_error(error_lower)
|
|
1318
|
-
if performance_issue:
|
|
1319
|
-
issues.append(performance_issue)
|
|
1320
|
-
|
|
1321
|
-
dead_code_issue = self._check_dead_code_error(error_lower)
|
|
1322
|
-
if dead_code_issue:
|
|
1323
|
-
issues.append(dead_code_issue)
|
|
1324
|
-
|
|
1325
|
-
regex_issue = self._check_regex_validation_error(error_lower)
|
|
1326
|
-
if regex_issue:
|
|
1327
|
-
issues.append(regex_issue)
|
|
1365
|
+
issues = []
|
|
1366
|
+
for check_func in error_checkers:
|
|
1367
|
+
issue = check_func(error_lower)
|
|
1368
|
+
if issue:
|
|
1369
|
+
issues.append(issue)
|
|
1328
1370
|
|
|
1329
1371
|
return issues
|
|
1330
1372
|
|
|
1373
|
+
def _get_comprehensive_error_checkers(
|
|
1374
|
+
self,
|
|
1375
|
+
) -> list[t.Callable[[str], Issue | None]]:
|
|
1376
|
+
"""Get list of error checking functions for comprehensive hooks."""
|
|
1377
|
+
return [
|
|
1378
|
+
self._check_complexity_error,
|
|
1379
|
+
self._check_type_error,
|
|
1380
|
+
self._check_security_error,
|
|
1381
|
+
self._check_performance_error,
|
|
1382
|
+
self._check_dead_code_error,
|
|
1383
|
+
self._check_regex_validation_error,
|
|
1384
|
+
]
|
|
1385
|
+
|
|
1331
1386
|
def _check_complexity_error(self, error_lower: str) -> Issue | None:
|
|
1332
1387
|
if "complexipy" in error_lower or "c901" in error_lower:
|
|
1333
1388
|
return Issue(
|
|
@@ -1426,24 +1481,66 @@ class WorkflowPipeline:
|
|
|
1426
1481
|
def _classify_issue(self, issue_str: str) -> tuple[IssueType, Priority]:
|
|
1427
1482
|
issue_lower = issue_str.lower()
|
|
1428
1483
|
|
|
1429
|
-
|
|
1430
|
-
|
|
1431
|
-
if
|
|
1432
|
-
return
|
|
1433
|
-
if self._is_complexity_issue(issue_lower):
|
|
1434
|
-
return IssueType.COMPLEXITY, Priority.HIGH
|
|
1435
|
-
if self._is_regex_validation_issue(issue_lower):
|
|
1436
|
-
return IssueType.REGEX_VALIDATION, Priority.HIGH
|
|
1437
|
-
|
|
1438
|
-
if self._is_dead_code_issue(issue_lower):
|
|
1439
|
-
return IssueType.DEAD_CODE, Priority.MEDIUM
|
|
1440
|
-
if self._is_performance_issue(issue_lower):
|
|
1441
|
-
return IssueType.PERFORMANCE, Priority.MEDIUM
|
|
1442
|
-
if self._is_import_error(issue_lower):
|
|
1443
|
-
return IssueType.IMPORT_ERROR, Priority.MEDIUM
|
|
1484
|
+
# Check high priority issues first
|
|
1485
|
+
high_priority_result = self._check_high_priority_issues(issue_lower)
|
|
1486
|
+
if high_priority_result:
|
|
1487
|
+
return high_priority_result
|
|
1444
1488
|
|
|
1489
|
+
# Check medium priority issues
|
|
1490
|
+
medium_priority_result = self._check_medium_priority_issues(issue_lower)
|
|
1491
|
+
if medium_priority_result:
|
|
1492
|
+
return medium_priority_result
|
|
1493
|
+
|
|
1494
|
+
# Default to formatting issue
|
|
1445
1495
|
return IssueType.FORMATTING, Priority.MEDIUM
|
|
1446
1496
|
|
|
1497
|
+
def _check_high_priority_issues(
|
|
1498
|
+
self, issue_lower: str
|
|
1499
|
+
) -> tuple[IssueType, Priority] | None:
|
|
1500
|
+
"""Check for high priority issue types.
|
|
1501
|
+
|
|
1502
|
+
Args:
|
|
1503
|
+
issue_lower: Lowercase issue string
|
|
1504
|
+
|
|
1505
|
+
Returns:
|
|
1506
|
+
Tuple of issue type and priority if found, None otherwise
|
|
1507
|
+
"""
|
|
1508
|
+
high_priority_checks = [
|
|
1509
|
+
(self._is_type_error, IssueType.TYPE_ERROR),
|
|
1510
|
+
(self._is_security_issue, IssueType.SECURITY),
|
|
1511
|
+
(self._is_complexity_issue, IssueType.COMPLEXITY),
|
|
1512
|
+
(self._is_regex_validation_issue, IssueType.REGEX_VALIDATION),
|
|
1513
|
+
]
|
|
1514
|
+
|
|
1515
|
+
for check_func, issue_type in high_priority_checks:
|
|
1516
|
+
if check_func(issue_lower):
|
|
1517
|
+
return issue_type, Priority.HIGH
|
|
1518
|
+
|
|
1519
|
+
return None
|
|
1520
|
+
|
|
1521
|
+
def _check_medium_priority_issues(
|
|
1522
|
+
self, issue_lower: str
|
|
1523
|
+
) -> tuple[IssueType, Priority] | None:
|
|
1524
|
+
"""Check for medium priority issue types.
|
|
1525
|
+
|
|
1526
|
+
Args:
|
|
1527
|
+
issue_lower: Lowercase issue string
|
|
1528
|
+
|
|
1529
|
+
Returns:
|
|
1530
|
+
Tuple of issue type and priority if found, None otherwise
|
|
1531
|
+
"""
|
|
1532
|
+
medium_priority_checks = [
|
|
1533
|
+
(self._is_dead_code_issue, IssueType.DEAD_CODE),
|
|
1534
|
+
(self._is_performance_issue, IssueType.PERFORMANCE),
|
|
1535
|
+
(self._is_import_error, IssueType.IMPORT_ERROR),
|
|
1536
|
+
]
|
|
1537
|
+
|
|
1538
|
+
for check_func, issue_type in medium_priority_checks:
|
|
1539
|
+
if check_func(issue_lower):
|
|
1540
|
+
return issue_type, Priority.MEDIUM
|
|
1541
|
+
|
|
1542
|
+
return None
|
|
1543
|
+
|
|
1447
1544
|
def _is_type_error(self, issue_lower: str) -> bool:
|
|
1448
1545
|
return any(
|
|
1449
1546
|
keyword in issue_lower for keyword in ("type", "annotation", "pyright")
|
|
@@ -1512,44 +1609,77 @@ class WorkflowPipeline:
|
|
|
1512
1609
|
async def _handle_security_gate_failure(
|
|
1513
1610
|
self, options: OptionsProtocol, allow_ai_fixing: bool = False
|
|
1514
1611
|
) -> bool:
|
|
1612
|
+
self._display_security_gate_failure_message()
|
|
1613
|
+
|
|
1614
|
+
if allow_ai_fixing:
|
|
1615
|
+
return await self._attempt_ai_assisted_security_fix(options)
|
|
1616
|
+
return self._handle_manual_security_fix()
|
|
1617
|
+
|
|
1618
|
+
def _display_security_gate_failure_message(self) -> None:
|
|
1619
|
+
"""Display initial security gate failure message."""
|
|
1515
1620
|
self.console.print(
|
|
1516
1621
|
"[red]🔒 SECURITY GATE: Critical security checks failed[/red]"
|
|
1517
1622
|
)
|
|
1518
1623
|
|
|
1519
|
-
|
|
1520
|
-
|
|
1521
|
-
"[red]Security-critical hooks (bandit, pyright, gitleaks) must pass before publishing[/red]"
|
|
1522
|
-
)
|
|
1523
|
-
self.console.print(
|
|
1524
|
-
"[yellow]🤖 Attempting AI-assisted security issue resolution...[/yellow]"
|
|
1525
|
-
)
|
|
1624
|
+
async def _attempt_ai_assisted_security_fix(self, options: OptionsProtocol) -> bool:
|
|
1625
|
+
"""Attempt to fix security issues using AI assistance.
|
|
1526
1626
|
|
|
1527
|
-
|
|
1528
|
-
|
|
1529
|
-
|
|
1530
|
-
|
|
1531
|
-
|
|
1532
|
-
|
|
1533
|
-
|
|
1534
|
-
|
|
1535
|
-
|
|
1536
|
-
|
|
1537
|
-
|
|
1538
|
-
|
|
1539
|
-
|
|
1540
|
-
|
|
1541
|
-
|
|
1542
|
-
|
|
1543
|
-
|
|
1544
|
-
|
|
1545
|
-
|
|
1546
|
-
|
|
1547
|
-
|
|
1548
|
-
|
|
1549
|
-
|
|
1550
|
-
|
|
1627
|
+
Args:
|
|
1628
|
+
options: Configuration options
|
|
1629
|
+
|
|
1630
|
+
Returns:
|
|
1631
|
+
True if security issues were resolved, False otherwise
|
|
1632
|
+
"""
|
|
1633
|
+
self._display_ai_fixing_messages()
|
|
1634
|
+
|
|
1635
|
+
ai_fix_success = await self._run_ai_agent_fixing_phase(options)
|
|
1636
|
+
if ai_fix_success:
|
|
1637
|
+
return self._verify_security_fix_success()
|
|
1638
|
+
|
|
1639
|
+
return False
|
|
1640
|
+
|
|
1641
|
+
def _display_ai_fixing_messages(self) -> None:
|
|
1642
|
+
"""Display messages about AI-assisted security fixing."""
|
|
1643
|
+
self.console.print(
|
|
1644
|
+
"[red]Security-critical hooks (bandit, pyright, gitleaks) must pass before publishing[/red]"
|
|
1645
|
+
)
|
|
1646
|
+
self.console.print(
|
|
1647
|
+
"[yellow]🤖 Attempting AI-assisted security issue resolution...[/yellow]"
|
|
1648
|
+
)
|
|
1649
|
+
|
|
1650
|
+
def _verify_security_fix_success(self) -> bool:
|
|
1651
|
+
"""Verify that AI fixes resolved the security issues.
|
|
1652
|
+
|
|
1653
|
+
Returns:
|
|
1654
|
+
True if security issues were resolved, False otherwise
|
|
1655
|
+
"""
|
|
1656
|
+
try:
|
|
1657
|
+
security_still_blocks = self._check_security_critical_failures()
|
|
1658
|
+
if not security_still_blocks:
|
|
1659
|
+
self.console.print(
|
|
1660
|
+
"[green]✅ AI agents resolved security issues - publishing allowed[/green]"
|
|
1661
|
+
)
|
|
1662
|
+
return True
|
|
1663
|
+
else:
|
|
1664
|
+
self.console.print(
|
|
1665
|
+
"[red]🔒 Security issues persist after AI fixing - publishing still BLOCKED[/red]"
|
|
1666
|
+
)
|
|
1667
|
+
return False
|
|
1668
|
+
except Exception as e:
|
|
1669
|
+
self.logger.warning(f"Security re-check failed: {e} - blocking publishing")
|
|
1551
1670
|
return False
|
|
1552
1671
|
|
|
1672
|
+
def _handle_manual_security_fix(self) -> bool:
|
|
1673
|
+
"""Handle security fix when AI assistance is not allowed.
|
|
1674
|
+
|
|
1675
|
+
Returns:
|
|
1676
|
+
Always False since manual intervention is required
|
|
1677
|
+
"""
|
|
1678
|
+
self.console.print(
|
|
1679
|
+
"[red]Security-critical hooks (bandit, pyright, gitleaks) must pass before publishing[/red]"
|
|
1680
|
+
)
|
|
1681
|
+
return False
|
|
1682
|
+
|
|
1553
1683
|
def _determine_ai_fixing_needed(
|
|
1554
1684
|
self,
|
|
1555
1685
|
testing_passed: bool,
|
|
@@ -485,44 +485,74 @@ class ReferenceGenerator:
|
|
|
485
485
|
Returns:
|
|
486
486
|
Enhanced commands with workflow info
|
|
487
487
|
"""
|
|
488
|
-
|
|
489
|
-
|
|
488
|
+
workflow_patterns = self._get_workflow_patterns()
|
|
489
|
+
|
|
490
|
+
for command in commands.values():
|
|
491
|
+
self._assign_command_workflows(command, workflow_patterns)
|
|
492
|
+
self._add_ai_context_to_command(command)
|
|
493
|
+
|
|
494
|
+
return commands
|
|
495
|
+
|
|
496
|
+
def _get_workflow_patterns(self) -> dict[str, list[str]]:
|
|
497
|
+
"""Get workflow patterns for command categorization.
|
|
498
|
+
|
|
499
|
+
Returns:
|
|
500
|
+
Dictionary mapping workflow names to pattern lists
|
|
501
|
+
"""
|
|
502
|
+
return {
|
|
490
503
|
"development": ["test", "format", "lint", "type-check"],
|
|
491
504
|
"release": ["version", "build", "publish", "tag"],
|
|
492
505
|
"maintenance": ["clean", "update", "optimize", "backup"],
|
|
493
506
|
"monitoring": ["status", "health", "metrics", "logs"],
|
|
494
507
|
}
|
|
495
508
|
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
command.common_workflows.append(workflow)
|
|
501
|
-
|
|
502
|
-
# Add AI context based on command purpose
|
|
503
|
-
if "test" in command.name:
|
|
504
|
-
command.ai_context.update(
|
|
505
|
-
{
|
|
506
|
-
"purpose": "quality_assurance",
|
|
507
|
-
"automation_level": "high",
|
|
508
|
-
"ai_agent_compatible": True,
|
|
509
|
-
}
|
|
510
|
-
)
|
|
511
|
-
command.success_patterns.append("All tests passed")
|
|
512
|
-
command.failure_patterns.append("Test failures detected")
|
|
513
|
-
|
|
514
|
-
elif "format" in command.name or "lint" in command.name:
|
|
515
|
-
command.ai_context.update(
|
|
516
|
-
{
|
|
517
|
-
"purpose": "code_quality",
|
|
518
|
-
"automation_level": "high",
|
|
519
|
-
"ai_agent_compatible": True,
|
|
520
|
-
}
|
|
521
|
-
)
|
|
522
|
-
command.success_patterns.append("No formatting issues")
|
|
523
|
-
command.failure_patterns.append("Style violations found")
|
|
509
|
+
def _assign_command_workflows(
|
|
510
|
+
self, command: CommandInfo, workflow_patterns: dict[str, list[str]]
|
|
511
|
+
) -> None:
|
|
512
|
+
"""Assign workflows to a command based on name patterns.
|
|
524
513
|
|
|
525
|
-
|
|
514
|
+
Args:
|
|
515
|
+
command: Command to assign workflows to
|
|
516
|
+
workflow_patterns: Workflow patterns to match against
|
|
517
|
+
"""
|
|
518
|
+
for workflow, patterns in workflow_patterns.items():
|
|
519
|
+
if any(pattern in command.name for pattern in patterns):
|
|
520
|
+
command.common_workflows.append(workflow)
|
|
521
|
+
|
|
522
|
+
def _add_ai_context_to_command(self, command: CommandInfo) -> None:
|
|
523
|
+
"""Add AI context to a command based on its purpose.
|
|
524
|
+
|
|
525
|
+
Args:
|
|
526
|
+
command: Command to enhance with AI context
|
|
527
|
+
"""
|
|
528
|
+
if "test" in command.name:
|
|
529
|
+
self._add_test_ai_context(command)
|
|
530
|
+
elif "format" in command.name or "lint" in command.name:
|
|
531
|
+
self._add_quality_ai_context(command)
|
|
532
|
+
|
|
533
|
+
def _add_test_ai_context(self, command: CommandInfo) -> None:
|
|
534
|
+
"""Add AI context for test-related commands."""
|
|
535
|
+
command.ai_context.update(
|
|
536
|
+
{
|
|
537
|
+
"purpose": "quality_assurance",
|
|
538
|
+
"automation_level": "high",
|
|
539
|
+
"ai_agent_compatible": True,
|
|
540
|
+
}
|
|
541
|
+
)
|
|
542
|
+
command.success_patterns.append("All tests passed")
|
|
543
|
+
command.failure_patterns.append("Test failures detected")
|
|
544
|
+
|
|
545
|
+
def _add_quality_ai_context(self, command: CommandInfo) -> None:
|
|
546
|
+
"""Add AI context for code quality commands."""
|
|
547
|
+
command.ai_context.update(
|
|
548
|
+
{
|
|
549
|
+
"purpose": "code_quality",
|
|
550
|
+
"automation_level": "high",
|
|
551
|
+
"ai_agent_compatible": True,
|
|
552
|
+
}
|
|
553
|
+
)
|
|
554
|
+
command.success_patterns.append("No formatting issues")
|
|
555
|
+
command.failure_patterns.append("Style violations found")
|
|
526
556
|
|
|
527
557
|
def _categorize_commands(
|
|
528
558
|
self, commands: dict[str, CommandInfo]
|
|
@@ -536,8 +566,18 @@ class ReferenceGenerator:
|
|
|
536
566
|
Dictionary of category to command names
|
|
537
567
|
"""
|
|
538
568
|
categories: dict[str, list[str]] = {}
|
|
569
|
+
category_patterns = self._get_category_patterns()
|
|
570
|
+
|
|
571
|
+
for command in commands.values():
|
|
572
|
+
category = self._determine_command_category(command, category_patterns)
|
|
573
|
+
command.category = category
|
|
574
|
+
self._add_command_to_category(categories, category, command.name)
|
|
575
|
+
|
|
576
|
+
return categories
|
|
539
577
|
|
|
540
|
-
|
|
578
|
+
def _get_category_patterns(self) -> dict[str, list[str]]:
|
|
579
|
+
"""Get category patterns for command classification."""
|
|
580
|
+
return {
|
|
541
581
|
"development": ["test", "format", "lint", "check", "run"],
|
|
542
582
|
"server": ["server", "start", "stop", "restart", "monitor"],
|
|
543
583
|
"release": ["version", "bump", "publish", "build", "tag"],
|
|
@@ -545,27 +585,22 @@ class ReferenceGenerator:
|
|
|
545
585
|
"utilities": ["clean", "help", "info", "status"],
|
|
546
586
|
}
|
|
547
587
|
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
if "general" not in categories:
|
|
565
|
-
categories["general"] = []
|
|
566
|
-
categories["general"].append(command.name)
|
|
567
|
-
|
|
568
|
-
return categories
|
|
588
|
+
def _determine_command_category(
|
|
589
|
+
self, command: CommandInfo, category_patterns: dict[str, list[str]]
|
|
590
|
+
) -> str:
|
|
591
|
+
"""Determine the category for a command based on patterns."""
|
|
592
|
+
for category, patterns in category_patterns.items():
|
|
593
|
+
if any(pattern in command.name for pattern in patterns):
|
|
594
|
+
return category
|
|
595
|
+
return "general"
|
|
596
|
+
|
|
597
|
+
def _add_command_to_category(
|
|
598
|
+
self, categories: dict[str, list[str]], category: str, command_name: str
|
|
599
|
+
) -> None:
|
|
600
|
+
"""Add command to the specified category."""
|
|
601
|
+
if category not in categories:
|
|
602
|
+
categories[category] = []
|
|
603
|
+
categories[category].append(command_name)
|
|
569
604
|
|
|
570
605
|
def _generate_workflows(
|
|
571
606
|
self, commands: dict[str, CommandInfo]
|
|
@@ -663,7 +698,9 @@ class ReferenceGenerator:
|
|
|
663
698
|
"""Render command categories for markdown."""
|
|
664
699
|
category_lines = []
|
|
665
700
|
for category, command_names in reference.categories.items():
|
|
666
|
-
category_section = self._render_markdown_category(
|
|
701
|
+
category_section = self._render_markdown_category(
|
|
702
|
+
category, reference.commands, command_names
|
|
703
|
+
)
|
|
667
704
|
category_lines.extend(category_section)
|
|
668
705
|
return category_lines
|
|
669
706
|
|
|
@@ -733,7 +770,9 @@ class ReferenceGenerator:
|
|
|
733
770
|
|
|
734
771
|
# Add related commands section
|
|
735
772
|
if command.related_commands:
|
|
736
|
-
related_lines = self._render_command_related_markdown(
|
|
773
|
+
related_lines = self._render_command_related_markdown(
|
|
774
|
+
command.related_commands
|
|
775
|
+
)
|
|
737
776
|
lines.extend(related_lines)
|
|
738
777
|
|
|
739
778
|
return lines
|
|
@@ -813,9 +852,11 @@ class ReferenceGenerator:
|
|
|
813
852
|
def _render_html(self, reference: CommandReference) -> str:
|
|
814
853
|
"""Render reference as HTML."""
|
|
815
854
|
html_parts = [
|
|
816
|
-
self._render_html_header(
|
|
855
|
+
self._render_html_header(
|
|
856
|
+
reference.generated_at.strftime("%Y-%m-%d %H:%M:%S")
|
|
857
|
+
),
|
|
817
858
|
self._render_html_commands(reference),
|
|
818
|
-
"</body></html>"
|
|
859
|
+
"</body></html>",
|
|
819
860
|
]
|
|
820
861
|
return "".join(html_parts)
|
|
821
862
|
|
|
@@ -842,7 +883,9 @@ class ReferenceGenerator:
|
|
|
842
883
|
"""Render HTML commands by category."""
|
|
843
884
|
html_parts = []
|
|
844
885
|
for category, command_names in reference.categories.items():
|
|
845
|
-
category_html = self._render_html_category(
|
|
886
|
+
category_html = self._render_html_category(
|
|
887
|
+
category, reference.commands, command_names
|
|
888
|
+
)
|
|
846
889
|
html_parts.append(category_html)
|
|
847
890
|
return "".join(html_parts)
|
|
848
891
|
|
|
@@ -918,7 +961,9 @@ class ReferenceGenerator:
|
|
|
918
961
|
"aliases": command.aliases,
|
|
919
962
|
}
|
|
920
963
|
|
|
921
|
-
def _serialize_parameters(
|
|
964
|
+
def _serialize_parameters(
|
|
965
|
+
self, parameters: list[ParameterInfo]
|
|
966
|
+
) -> list[dict[str, t.Any]]:
|
|
922
967
|
"""Serialize parameters for JSON output."""
|
|
923
968
|
return [self._serialize_parameter(param) for param in parameters]
|
|
924
969
|
|
crackerjack/dynamic_config.py
CHANGED
|
@@ -192,7 +192,7 @@ HOOKS_REGISTRY: dict[str, list[HookMetadata]] = {
|
|
|
192
192
|
"stages": ["pre-push", "manual"],
|
|
193
193
|
"args": ["-c", "pyproject.toml", "-r", "-ll"],
|
|
194
194
|
"files": "^crackerjack/.*\\.py$",
|
|
195
|
-
"exclude":
|
|
195
|
+
"exclude": r"^tests/",
|
|
196
196
|
"additional_dependencies": None,
|
|
197
197
|
"types_or": None,
|
|
198
198
|
"language": None,
|
|
@@ -284,9 +284,9 @@ HOOKS_REGISTRY: dict[str, list[HookMetadata]] = {
|
|
|
284
284
|
"tier": 3,
|
|
285
285
|
"time_estimate": 0.1,
|
|
286
286
|
"stages": ["pre-push", "manual"],
|
|
287
|
-
"args": ["crackerjack"],
|
|
287
|
+
"args": ["crackerjack", "--exclude", "tests"],
|
|
288
288
|
"files": None,
|
|
289
|
-
"exclude":
|
|
289
|
+
"exclude": r"^tests/",
|
|
290
290
|
"additional_dependencies": None,
|
|
291
291
|
"types_or": None,
|
|
292
292
|
"language": "system",
|
|
@@ -338,9 +338,9 @@ HOOKS_REGISTRY: dict[str, list[HookMetadata]] = {
|
|
|
338
338
|
"tier": 3,
|
|
339
339
|
"time_estimate": 3.0,
|
|
340
340
|
"stages": ["pre-push", "manual"],
|
|
341
|
-
"args": [
|
|
341
|
+
"args": [],
|
|
342
342
|
"files": "^crackerjack/.*\\.py$",
|
|
343
|
-
"exclude": r"^tests
|
|
343
|
+
"exclude": r"^tests/",
|
|
344
344
|
"additional_dependencies": None,
|
|
345
345
|
"types_or": None,
|
|
346
346
|
"language": None,
|
|
@@ -358,7 +358,7 @@ HOOKS_REGISTRY: dict[str, list[HookMetadata]] = {
|
|
|
358
358
|
"stages": ["pre-push", "manual"],
|
|
359
359
|
"args": ["--config-file", "mypy.ini", "./crackerjack"],
|
|
360
360
|
"files": None,
|
|
361
|
-
"exclude":
|
|
361
|
+
"exclude": r"^tests/",
|
|
362
362
|
"additional_dependencies": None,
|
|
363
363
|
"types_or": None,
|
|
364
364
|
"language": "system",
|
|
@@ -544,7 +544,7 @@ class DynamicConfigGenerator:
|
|
|
544
544
|
"""Update hook configuration to use the detected package directory."""
|
|
545
545
|
# Update skylos hook
|
|
546
546
|
if hook["id"] == "skylos" and hook["args"]:
|
|
547
|
-
hook["args"] = [self.package_directory]
|
|
547
|
+
hook["args"] = [self.package_directory, "--exclude", "tests"]
|
|
548
548
|
|
|
549
549
|
# Update zuban hook
|
|
550
550
|
elif hook["id"] == "zuban" and hook["args"]:
|
|
@@ -566,12 +566,24 @@ class DynamicConfigGenerator:
|
|
|
566
566
|
"crackerjack", self.package_directory
|
|
567
567
|
)
|
|
568
568
|
|
|
569
|
-
# Ensure hooks exclude src directories to avoid JavaScript conflicts
|
|
569
|
+
# Ensure hooks exclude src directories to avoid JavaScript conflicts and tests
|
|
570
570
|
if hook["exclude"]:
|
|
571
|
+
# Add src exclusion if not present
|
|
571
572
|
if "src/" not in hook["exclude"]:
|
|
572
573
|
hook["exclude"] = f"{hook['exclude']}|^src/"
|
|
573
574
|
else:
|
|
574
|
-
|
|
575
|
+
# If no exclusion, add both tests and src
|
|
576
|
+
if hook["id"] in (
|
|
577
|
+
"skylos",
|
|
578
|
+
"zuban",
|
|
579
|
+
"bandit",
|
|
580
|
+
"refurb",
|
|
581
|
+
"complexipy",
|
|
582
|
+
"pyright",
|
|
583
|
+
):
|
|
584
|
+
hook["exclude"] = r"^tests/|^src/"
|
|
585
|
+
else:
|
|
586
|
+
hook["exclude"] = "^src/"
|
|
575
587
|
|
|
576
588
|
return hook
|
|
577
589
|
|
crackerjack/interactive.py
CHANGED
|
@@ -24,7 +24,7 @@ class TaskStatus(Enum):
|
|
|
24
24
|
|
|
25
25
|
|
|
26
26
|
@dataclass
|
|
27
|
-
class
|
|
27
|
+
class InteractiveWorkflowOptions:
|
|
28
28
|
clean: bool = False
|
|
29
29
|
test: bool = False
|
|
30
30
|
publish: str | None = None
|
|
@@ -35,7 +35,7 @@ class WorkflowOptions:
|
|
|
35
35
|
dry_run: bool = False
|
|
36
36
|
|
|
37
37
|
@classmethod
|
|
38
|
-
def from_args(cls, args: t.Any) -> "
|
|
38
|
+
def from_args(cls, args: t.Any) -> "InteractiveWorkflowOptions":
|
|
39
39
|
return cls(
|
|
40
40
|
clean=getattr(args, "clean", False),
|
|
41
41
|
test=getattr(args, "test", False),
|
|
@@ -399,7 +399,7 @@ class InteractiveCLI:
|
|
|
399
399
|
|
|
400
400
|
self.logger = logging.getLogger("crackerjack.interactive.cli")
|
|
401
401
|
|
|
402
|
-
def create_dynamic_workflow(self, options:
|
|
402
|
+
def create_dynamic_workflow(self, options: InteractiveWorkflowOptions) -> None:
|
|
403
403
|
builder = WorkflowBuilder(self.console)
|
|
404
404
|
|
|
405
405
|
workflow_steps = [
|
|
@@ -581,7 +581,7 @@ class InteractiveCLI:
|
|
|
581
581
|
or last_task
|
|
582
582
|
)
|
|
583
583
|
|
|
584
|
-
def run_interactive_workflow(self, options:
|
|
584
|
+
def run_interactive_workflow(self, options: InteractiveWorkflowOptions) -> bool:
|
|
585
585
|
self.logger.info(
|
|
586
586
|
f"Starting interactive workflow with options: {options.__dict__}",
|
|
587
587
|
)
|
|
@@ -682,7 +682,9 @@ def launch_interactive_cli(version: str, options: t.Any = None) -> None:
|
|
|
682
682
|
console.print()
|
|
683
683
|
|
|
684
684
|
workflow_options = (
|
|
685
|
-
|
|
685
|
+
InteractiveWorkflowOptions.from_args(options)
|
|
686
|
+
if options
|
|
687
|
+
else InteractiveWorkflowOptions()
|
|
686
688
|
)
|
|
687
689
|
cli.create_dynamic_workflow(workflow_options)
|
|
688
690
|
cli.run_interactive_workflow(workflow_options)
|
crackerjack/plugins/hooks.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: crackerjack
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.38.0
|
|
4
4
|
Summary: Crackerjack Python project management tool
|
|
5
5
|
Project-URL: documentation, https://github.com/lesleslie/crackerjack
|
|
6
6
|
Project-URL: homepage, https://github.com/lesleslie/crackerjack
|
|
@@ -73,7 +73,7 @@ Description-Content-Type: text/markdown
|
|
|
73
73
|
[](https://github.com/astral-sh/uv)
|
|
74
74
|
[](https://github.com/pre-commit/pre-commit)
|
|
75
75
|
[](https://opensource.org/licenses/BSD-3-Clause)
|
|
76
|
-

|
|
77
77
|
|
|
78
78
|
## 🎯 Purpose
|
|
79
79
|
|
|
@@ -1,10 +1,10 @@
|
|
|
1
|
-
crackerjack/__init__.py,sha256=
|
|
1
|
+
crackerjack/__init__.py,sha256=DajG9zHB8qBdgdiKMumrrssUbKeMXmtIQ3oOaSTb46Y,1426
|
|
2
2
|
crackerjack/__main__.py,sha256=lE5ZDbAzI9TLCzMTxFGw23Dk1hP-MEhd1i_nNbi_Mag,52515
|
|
3
|
-
crackerjack/api.py,sha256=
|
|
3
|
+
crackerjack/api.py,sha256=PyCRaZHvKWdu62_2O4t_HcEfKNBdqyrfPdonS_PNn4c,21495
|
|
4
4
|
crackerjack/code_cleaner.py,sha256=M1zVaq31uW0nOkPneKR8kfR3892gyyVx0VhFgRaxsj4,44338
|
|
5
|
-
crackerjack/dynamic_config.py,sha256=
|
|
5
|
+
crackerjack/dynamic_config.py,sha256=4c8Fts9vyH8Tdon_47OFVT1iTBINSzSgB0WoeSvpzII,22418
|
|
6
6
|
crackerjack/errors.py,sha256=yYbZ92kn_y6acEWgQvEPvozAYs2HT65uLwAXrtXxGsE,10049
|
|
7
|
-
crackerjack/interactive.py,sha256=
|
|
7
|
+
crackerjack/interactive.py,sha256=CYu53ySL2I1YCWRRRHmVEYQq3RQ0xDCXFTuPUAw6DTc,21399
|
|
8
8
|
crackerjack/adapters/__init__.py,sha256=k-8ajMDL9DS9hV2FYOu694nmNQg3HkudJRuNcXmx8N4,451
|
|
9
9
|
crackerjack/adapters/lsp_client.py,sha256=4kQ3T5JiWC7uc6kOjZuPdtUboseKSDjZpuKQpV74onc,10963
|
|
10
10
|
crackerjack/adapters/rust_tool_adapter.py,sha256=ui_qMt_WIwInRvRCeT7MnIdp8eln7Fvp4hakXQiVnjg,5999
|
|
@@ -54,7 +54,7 @@ crackerjack/core/service_watchdog.py,sha256=Ttj1imOxvUea4Tkf5JO1e2dQtGIK7D-bX1xO
|
|
|
54
54
|
crackerjack/core/session_coordinator.py,sha256=TgoGE9DfXe2x-OkH93Ld9dX9ROjx2_mZFkGXen-z5YI,15680
|
|
55
55
|
crackerjack/core/timeout_manager.py,sha256=_sbEsfYDwWx7y0Pn89QCoAZ5DpWIbCdtR9qkG_Kqj5E,15013
|
|
56
56
|
crackerjack/core/websocket_lifecycle.py,sha256=74kn6ugu6FLlDQhCNSPgqguCFwRoT1WFOvtl8G2OyFc,12860
|
|
57
|
-
crackerjack/core/workflow_orchestrator.py,sha256=
|
|
57
|
+
crackerjack/core/workflow_orchestrator.py,sha256=S7XiVdJEEKcHQF1_PL1IymDpx940le_z6ozG6EIpk-s,75876
|
|
58
58
|
crackerjack/docs/INDEX.md,sha256=a6CGFEeL5DX_FRft_JFWd0nOxoBmCSSp-QHIC3B7ato,342
|
|
59
59
|
crackerjack/docs/generated/api/API_REFERENCE.md,sha256=mWoqImZA7AhDvRqqF1MhUo70g_pnZr3NoBeZQRotqN8,155816
|
|
60
60
|
crackerjack/docs/generated/api/CLI_REFERENCE.md,sha256=ikuG0hO5EjIiQlJtAUnvEuAhXDa-JHPULPXNNmUwvk4,2805
|
|
@@ -65,7 +65,7 @@ crackerjack/documentation/__init__.py,sha256=gGR--er5oTHhbwLKOHVlU2QgGmQtA0qUXf3
|
|
|
65
65
|
crackerjack/documentation/ai_templates.py,sha256=GRBKB5bqWudh9MDLjo1b3vNiFAgpL62ezzRp_WxTews,21629
|
|
66
66
|
crackerjack/documentation/dual_output_generator.py,sha256=w7rDthOnyFeRPQDWvYiR4aiScPxsHzkwjJ3blMwT9-w,28552
|
|
67
67
|
crackerjack/documentation/mkdocs_integration.py,sha256=KqU2_9mA-rjP_VDrrfr6KTuPWtTlcvkInPxoH03LTC0,15657
|
|
68
|
-
crackerjack/documentation/reference_generator.py,sha256=
|
|
68
|
+
crackerjack/documentation/reference_generator.py,sha256=NGAIsC5bnjLBQkvEXPDU0pw8bQ5kYzbUUokhlXXFqrU,34520
|
|
69
69
|
crackerjack/executors/__init__.py,sha256=HF-DmXvKN45uKKDdiMxOT9bYxuy1B-Z91BihOhkK5lg,322
|
|
70
70
|
crackerjack/executors/async_hook_executor.py,sha256=FmKpiAxpZXKwvOWXnRQ73N-reDfX8NusESQ9a4HeacM,17620
|
|
71
71
|
crackerjack/executors/cached_hook_executor.py,sha256=izwdW0B22EZcl_2_llmTIyq5oTcZDZTRL2G97ZwYiXg,11173
|
|
@@ -145,7 +145,7 @@ crackerjack/orchestration/execution_strategies.py,sha256=G34eYqd5fqKPgPcRScASS86
|
|
|
145
145
|
crackerjack/orchestration/test_progress_streamer.py,sha256=Yu6uHuhoCvX6SZP0QNG3Yt8Q4s2tufEHr40o16QU98c,22541
|
|
146
146
|
crackerjack/plugins/__init__.py,sha256=B7hy9b9amJVbYLHgIz8kgTI29j-vYxsUY_sZ5ISbXU0,386
|
|
147
147
|
crackerjack/plugins/base.py,sha256=VFk-xNsgjSlmzJ_iPQALhkr7cguiOtEd3XSR9CcCPkc,5732
|
|
148
|
-
crackerjack/plugins/hooks.py,sha256=
|
|
148
|
+
crackerjack/plugins/hooks.py,sha256=XagUpeehUwP_k4NnODnn0M1ycTe1F4uP1EMiFULUlKY,7581
|
|
149
149
|
crackerjack/plugins/loader.py,sha256=9RmA5Lkizz5BADn-aJDGekISyL7C_O2Grr1tB6undHY,10814
|
|
150
150
|
crackerjack/plugins/managers.py,sha256=3kQlxjvcHyHDgZIdr-JZBO1kqz2asqA4kf2XVAA1K6A,8824
|
|
151
151
|
crackerjack/security/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -224,8 +224,8 @@ crackerjack/tools/validate_input_validator_patterns.py,sha256=NN7smYlXWrHLQXTb-8
|
|
|
224
224
|
crackerjack/tools/validate_regex_patterns.py,sha256=J7GG9EP1fASpRIsG8qRPeiCSkdCwmk0sdo29GgoJ6w8,5863
|
|
225
225
|
crackerjack/ui/__init__.py,sha256=eMb1OeTU-dSLICAACn0YdYB4Amdr8wHckjKfn0wOIZE,37
|
|
226
226
|
crackerjack/ui/server_panels.py,sha256=F5IH6SNN06BaZQMsFx_D-OA286aojmaFPJ5kvvSRv_c,4232
|
|
227
|
-
crackerjack-0.
|
|
228
|
-
crackerjack-0.
|
|
229
|
-
crackerjack-0.
|
|
230
|
-
crackerjack-0.
|
|
231
|
-
crackerjack-0.
|
|
227
|
+
crackerjack-0.38.0.dist-info/METADATA,sha256=eOtnsgHgSIJAR_hBiHx8bgF6maVsKm7LdATHCdtTu6A,37949
|
|
228
|
+
crackerjack-0.38.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
229
|
+
crackerjack-0.38.0.dist-info/entry_points.txt,sha256=AJKNft0WXm9xoGUJ3Trl-iXHOWxRAYbagQiza3AILr4,57
|
|
230
|
+
crackerjack-0.38.0.dist-info/licenses/LICENSE,sha256=fDt371P6_6sCu7RyqiZH_AhT1LdN3sN1zjBtqEhDYCk,1531
|
|
231
|
+
crackerjack-0.38.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|