crackerjack 0.37.9__py3-none-any.whl → 0.45.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- crackerjack/README.md +19 -0
- crackerjack/__init__.py +30 -1
- crackerjack/__main__.py +342 -1263
- crackerjack/adapters/README.md +18 -0
- crackerjack/adapters/__init__.py +27 -5
- crackerjack/adapters/_output_paths.py +167 -0
- crackerjack/adapters/_qa_adapter_base.py +309 -0
- crackerjack/adapters/_tool_adapter_base.py +706 -0
- crackerjack/adapters/ai/README.md +65 -0
- crackerjack/adapters/ai/__init__.py +5 -0
- crackerjack/adapters/ai/claude.py +853 -0
- crackerjack/adapters/complexity/README.md +53 -0
- crackerjack/adapters/complexity/__init__.py +10 -0
- crackerjack/adapters/complexity/complexipy.py +641 -0
- crackerjack/adapters/dependency/__init__.py +22 -0
- crackerjack/adapters/dependency/pip_audit.py +418 -0
- crackerjack/adapters/format/README.md +72 -0
- crackerjack/adapters/format/__init__.py +11 -0
- crackerjack/adapters/format/mdformat.py +313 -0
- crackerjack/adapters/format/ruff.py +516 -0
- crackerjack/adapters/lint/README.md +47 -0
- crackerjack/adapters/lint/__init__.py +11 -0
- crackerjack/adapters/lint/codespell.py +273 -0
- crackerjack/adapters/lsp/README.md +49 -0
- crackerjack/adapters/lsp/__init__.py +27 -0
- crackerjack/adapters/{rust_tool_manager.py → lsp/_manager.py} +3 -3
- crackerjack/adapters/{skylos_adapter.py → lsp/skylos.py} +59 -7
- crackerjack/adapters/{zuban_adapter.py → lsp/zuban.py} +3 -6
- crackerjack/adapters/refactor/README.md +59 -0
- crackerjack/adapters/refactor/__init__.py +12 -0
- crackerjack/adapters/refactor/creosote.py +318 -0
- crackerjack/adapters/refactor/refurb.py +406 -0
- crackerjack/adapters/refactor/skylos.py +494 -0
- crackerjack/adapters/sast/README.md +132 -0
- crackerjack/adapters/sast/__init__.py +32 -0
- crackerjack/adapters/sast/_base.py +201 -0
- crackerjack/adapters/sast/bandit.py +423 -0
- crackerjack/adapters/sast/pyscn.py +405 -0
- crackerjack/adapters/sast/semgrep.py +241 -0
- crackerjack/adapters/security/README.md +111 -0
- crackerjack/adapters/security/__init__.py +17 -0
- crackerjack/adapters/security/gitleaks.py +339 -0
- crackerjack/adapters/type/README.md +52 -0
- crackerjack/adapters/type/__init__.py +12 -0
- crackerjack/adapters/type/pyrefly.py +402 -0
- crackerjack/adapters/type/ty.py +402 -0
- crackerjack/adapters/type/zuban.py +522 -0
- crackerjack/adapters/utility/README.md +51 -0
- crackerjack/adapters/utility/__init__.py +10 -0
- crackerjack/adapters/utility/checks.py +884 -0
- crackerjack/agents/README.md +264 -0
- crackerjack/agents/__init__.py +40 -12
- crackerjack/agents/base.py +1 -0
- crackerjack/agents/claude_code_bridge.py +641 -0
- crackerjack/agents/coordinator.py +49 -53
- crackerjack/agents/dry_agent.py +187 -3
- crackerjack/agents/enhanced_coordinator.py +279 -0
- crackerjack/agents/enhanced_proactive_agent.py +185 -0
- crackerjack/agents/error_middleware.py +53 -0
- crackerjack/agents/formatting_agent.py +6 -8
- crackerjack/agents/helpers/__init__.py +9 -0
- crackerjack/agents/helpers/performance/__init__.py +22 -0
- crackerjack/agents/helpers/performance/performance_ast_analyzer.py +357 -0
- crackerjack/agents/helpers/performance/performance_pattern_detector.py +909 -0
- crackerjack/agents/helpers/performance/performance_recommender.py +572 -0
- crackerjack/agents/helpers/refactoring/__init__.py +22 -0
- crackerjack/agents/helpers/refactoring/code_transformer.py +536 -0
- crackerjack/agents/helpers/refactoring/complexity_analyzer.py +344 -0
- crackerjack/agents/helpers/refactoring/dead_code_detector.py +437 -0
- crackerjack/agents/helpers/test_creation/__init__.py +19 -0
- crackerjack/agents/helpers/test_creation/test_ast_analyzer.py +216 -0
- crackerjack/agents/helpers/test_creation/test_coverage_analyzer.py +643 -0
- crackerjack/agents/helpers/test_creation/test_template_generator.py +1031 -0
- crackerjack/agents/performance_agent.py +121 -1152
- crackerjack/agents/refactoring_agent.py +156 -655
- crackerjack/agents/semantic_agent.py +479 -0
- crackerjack/agents/semantic_helpers.py +356 -0
- crackerjack/agents/test_creation_agent.py +19 -1605
- crackerjack/api.py +5 -7
- crackerjack/cli/README.md +394 -0
- crackerjack/cli/__init__.py +1 -1
- crackerjack/cli/cache_handlers.py +23 -18
- crackerjack/cli/cache_handlers_enhanced.py +1 -4
- crackerjack/cli/facade.py +70 -8
- crackerjack/cli/formatting.py +13 -0
- crackerjack/cli/handlers/__init__.py +85 -0
- crackerjack/cli/handlers/advanced.py +103 -0
- crackerjack/cli/handlers/ai_features.py +62 -0
- crackerjack/cli/handlers/analytics.py +479 -0
- crackerjack/cli/handlers/changelog.py +271 -0
- crackerjack/cli/handlers/config_handlers.py +16 -0
- crackerjack/cli/handlers/coverage.py +84 -0
- crackerjack/cli/handlers/documentation.py +280 -0
- crackerjack/cli/handlers/main_handlers.py +497 -0
- crackerjack/cli/handlers/monitoring.py +371 -0
- crackerjack/cli/handlers.py +249 -49
- crackerjack/cli/interactive.py +8 -5
- crackerjack/cli/options.py +203 -110
- crackerjack/cli/semantic_handlers.py +292 -0
- crackerjack/cli/version.py +19 -0
- crackerjack/code_cleaner.py +60 -24
- crackerjack/config/README.md +472 -0
- crackerjack/config/__init__.py +256 -0
- crackerjack/config/global_lock_config.py +191 -54
- crackerjack/config/hooks.py +188 -16
- crackerjack/config/loader.py +239 -0
- crackerjack/config/settings.py +141 -0
- crackerjack/config/tool_commands.py +331 -0
- crackerjack/core/README.md +393 -0
- crackerjack/core/async_workflow_orchestrator.py +79 -53
- crackerjack/core/autofix_coordinator.py +22 -9
- crackerjack/core/container.py +10 -9
- crackerjack/core/enhanced_container.py +9 -9
- crackerjack/core/performance.py +1 -1
- crackerjack/core/performance_monitor.py +5 -3
- crackerjack/core/phase_coordinator.py +1018 -634
- crackerjack/core/proactive_workflow.py +3 -3
- crackerjack/core/retry.py +275 -0
- crackerjack/core/service_watchdog.py +167 -23
- crackerjack/core/session_coordinator.py +187 -382
- crackerjack/core/timeout_manager.py +161 -44
- crackerjack/core/workflow/__init__.py +21 -0
- crackerjack/core/workflow/workflow_ai_coordinator.py +863 -0
- crackerjack/core/workflow/workflow_event_orchestrator.py +1107 -0
- crackerjack/core/workflow/workflow_issue_parser.py +714 -0
- crackerjack/core/workflow/workflow_phase_executor.py +1158 -0
- crackerjack/core/workflow/workflow_security_gates.py +400 -0
- crackerjack/core/workflow_orchestrator.py +1247 -953
- crackerjack/data/README.md +11 -0
- crackerjack/data/__init__.py +8 -0
- crackerjack/data/models.py +79 -0
- crackerjack/data/repository.py +210 -0
- crackerjack/decorators/README.md +180 -0
- crackerjack/decorators/__init__.py +35 -0
- crackerjack/decorators/error_handling.py +649 -0
- crackerjack/decorators/error_handling_decorators.py +334 -0
- crackerjack/decorators/helpers.py +58 -0
- crackerjack/decorators/patterns.py +281 -0
- crackerjack/decorators/utils.py +58 -0
- crackerjack/docs/README.md +11 -0
- crackerjack/docs/generated/api/CLI_REFERENCE.md +1 -1
- crackerjack/documentation/README.md +11 -0
- crackerjack/documentation/ai_templates.py +1 -1
- crackerjack/documentation/dual_output_generator.py +11 -9
- crackerjack/documentation/reference_generator.py +104 -59
- crackerjack/dynamic_config.py +52 -61
- crackerjack/errors.py +1 -1
- crackerjack/events/README.md +11 -0
- crackerjack/events/__init__.py +16 -0
- crackerjack/events/telemetry.py +175 -0
- crackerjack/events/workflow_bus.py +346 -0
- crackerjack/exceptions/README.md +301 -0
- crackerjack/exceptions/__init__.py +5 -0
- crackerjack/exceptions/config.py +4 -0
- crackerjack/exceptions/tool_execution_error.py +245 -0
- crackerjack/executors/README.md +591 -0
- crackerjack/executors/__init__.py +2 -0
- crackerjack/executors/async_hook_executor.py +539 -77
- crackerjack/executors/cached_hook_executor.py +3 -3
- crackerjack/executors/hook_executor.py +967 -102
- crackerjack/executors/hook_lock_manager.py +31 -22
- crackerjack/executors/individual_hook_executor.py +66 -32
- crackerjack/executors/lsp_aware_hook_executor.py +136 -57
- crackerjack/executors/progress_hook_executor.py +282 -0
- crackerjack/executors/tool_proxy.py +23 -7
- crackerjack/hooks/README.md +485 -0
- crackerjack/hooks/lsp_hook.py +8 -9
- crackerjack/intelligence/README.md +557 -0
- crackerjack/interactive.py +37 -10
- crackerjack/managers/README.md +369 -0
- crackerjack/managers/async_hook_manager.py +41 -57
- crackerjack/managers/hook_manager.py +449 -79
- crackerjack/managers/publish_manager.py +81 -36
- crackerjack/managers/test_command_builder.py +290 -12
- crackerjack/managers/test_executor.py +93 -8
- crackerjack/managers/test_manager.py +1082 -75
- crackerjack/managers/test_progress.py +118 -26
- crackerjack/mcp/README.md +374 -0
- crackerjack/mcp/cache.py +25 -2
- crackerjack/mcp/client_runner.py +35 -18
- crackerjack/mcp/context.py +9 -9
- crackerjack/mcp/dashboard.py +24 -8
- crackerjack/mcp/enhanced_progress_monitor.py +34 -23
- crackerjack/mcp/file_monitor.py +27 -6
- crackerjack/mcp/progress_components.py +45 -34
- crackerjack/mcp/progress_monitor.py +6 -9
- crackerjack/mcp/rate_limiter.py +11 -7
- crackerjack/mcp/server.py +2 -0
- crackerjack/mcp/server_core.py +187 -55
- crackerjack/mcp/service_watchdog.py +12 -9
- crackerjack/mcp/task_manager.py +2 -2
- crackerjack/mcp/tools/README.md +27 -0
- crackerjack/mcp/tools/__init__.py +2 -0
- crackerjack/mcp/tools/core_tools.py +75 -52
- crackerjack/mcp/tools/execution_tools.py +87 -31
- crackerjack/mcp/tools/intelligence_tools.py +2 -2
- crackerjack/mcp/tools/proactive_tools.py +1 -1
- crackerjack/mcp/tools/semantic_tools.py +584 -0
- crackerjack/mcp/tools/utility_tools.py +180 -132
- crackerjack/mcp/tools/workflow_executor.py +87 -46
- crackerjack/mcp/websocket/README.md +31 -0
- crackerjack/mcp/websocket/app.py +11 -1
- crackerjack/mcp/websocket/event_bridge.py +188 -0
- crackerjack/mcp/websocket/jobs.py +27 -4
- crackerjack/mcp/websocket/monitoring/__init__.py +25 -0
- crackerjack/mcp/websocket/monitoring/api/__init__.py +19 -0
- crackerjack/mcp/websocket/monitoring/api/dependencies.py +141 -0
- crackerjack/mcp/websocket/monitoring/api/heatmap.py +154 -0
- crackerjack/mcp/websocket/monitoring/api/intelligence.py +199 -0
- crackerjack/mcp/websocket/monitoring/api/metrics.py +203 -0
- crackerjack/mcp/websocket/monitoring/api/telemetry.py +101 -0
- crackerjack/mcp/websocket/monitoring/dashboard.py +18 -0
- crackerjack/mcp/websocket/monitoring/factory.py +109 -0
- crackerjack/mcp/websocket/monitoring/filters.py +10 -0
- crackerjack/mcp/websocket/monitoring/metrics.py +64 -0
- crackerjack/mcp/websocket/monitoring/models.py +90 -0
- crackerjack/mcp/websocket/monitoring/utils.py +171 -0
- crackerjack/mcp/websocket/monitoring/websocket_manager.py +78 -0
- crackerjack/mcp/websocket/monitoring/websockets/__init__.py +17 -0
- crackerjack/mcp/websocket/monitoring/websockets/dependencies.py +126 -0
- crackerjack/mcp/websocket/monitoring/websockets/heatmap.py +176 -0
- crackerjack/mcp/websocket/monitoring/websockets/intelligence.py +291 -0
- crackerjack/mcp/websocket/monitoring/websockets/metrics.py +291 -0
- crackerjack/mcp/websocket/monitoring_endpoints.py +16 -2930
- crackerjack/mcp/websocket/server.py +1 -3
- crackerjack/mcp/websocket/websocket_handler.py +107 -6
- crackerjack/models/README.md +308 -0
- crackerjack/models/__init__.py +10 -1
- crackerjack/models/config.py +639 -22
- crackerjack/models/config_adapter.py +6 -6
- crackerjack/models/protocols.py +1167 -23
- crackerjack/models/pydantic_models.py +320 -0
- crackerjack/models/qa_config.py +145 -0
- crackerjack/models/qa_results.py +134 -0
- crackerjack/models/results.py +35 -0
- crackerjack/models/semantic_models.py +258 -0
- crackerjack/models/task.py +19 -3
- crackerjack/models/test_models.py +60 -0
- crackerjack/monitoring/README.md +11 -0
- crackerjack/monitoring/ai_agent_watchdog.py +5 -4
- crackerjack/monitoring/metrics_collector.py +4 -3
- crackerjack/monitoring/regression_prevention.py +4 -3
- crackerjack/monitoring/websocket_server.py +4 -241
- crackerjack/orchestration/README.md +340 -0
- crackerjack/orchestration/__init__.py +43 -0
- crackerjack/orchestration/advanced_orchestrator.py +20 -67
- crackerjack/orchestration/cache/README.md +312 -0
- crackerjack/orchestration/cache/__init__.py +37 -0
- crackerjack/orchestration/cache/memory_cache.py +338 -0
- crackerjack/orchestration/cache/tool_proxy_cache.py +340 -0
- crackerjack/orchestration/config.py +297 -0
- crackerjack/orchestration/coverage_improvement.py +13 -6
- crackerjack/orchestration/execution_strategies.py +6 -6
- crackerjack/orchestration/hook_orchestrator.py +1398 -0
- crackerjack/orchestration/strategies/README.md +401 -0
- crackerjack/orchestration/strategies/__init__.py +39 -0
- crackerjack/orchestration/strategies/adaptive_strategy.py +630 -0
- crackerjack/orchestration/strategies/parallel_strategy.py +237 -0
- crackerjack/orchestration/strategies/sequential_strategy.py +299 -0
- crackerjack/orchestration/test_progress_streamer.py +1 -1
- crackerjack/plugins/README.md +11 -0
- crackerjack/plugins/hooks.py +3 -2
- crackerjack/plugins/loader.py +3 -3
- crackerjack/plugins/managers.py +1 -1
- crackerjack/py313.py +191 -0
- crackerjack/security/README.md +11 -0
- crackerjack/services/README.md +374 -0
- crackerjack/services/__init__.py +8 -21
- crackerjack/services/ai/README.md +295 -0
- crackerjack/services/ai/__init__.py +7 -0
- crackerjack/services/ai/advanced_optimizer.py +878 -0
- crackerjack/services/{contextual_ai_assistant.py → ai/contextual_ai_assistant.py} +5 -3
- crackerjack/services/ai/embeddings.py +444 -0
- crackerjack/services/ai/intelligent_commit.py +328 -0
- crackerjack/services/ai/predictive_analytics.py +510 -0
- crackerjack/services/api_extractor.py +5 -3
- crackerjack/services/bounded_status_operations.py +45 -5
- crackerjack/services/cache.py +249 -318
- crackerjack/services/changelog_automation.py +7 -3
- crackerjack/services/command_execution_service.py +305 -0
- crackerjack/services/config_integrity.py +83 -39
- crackerjack/services/config_merge.py +9 -6
- crackerjack/services/config_service.py +198 -0
- crackerjack/services/config_template.py +13 -26
- crackerjack/services/coverage_badge_service.py +6 -4
- crackerjack/services/coverage_ratchet.py +53 -27
- crackerjack/services/debug.py +18 -7
- crackerjack/services/dependency_analyzer.py +4 -4
- crackerjack/services/dependency_monitor.py +13 -13
- crackerjack/services/documentation_generator.py +4 -2
- crackerjack/services/documentation_service.py +62 -33
- crackerjack/services/enhanced_filesystem.py +81 -27
- crackerjack/services/enterprise_optimizer.py +1 -1
- crackerjack/services/error_pattern_analyzer.py +10 -10
- crackerjack/services/file_filter.py +221 -0
- crackerjack/services/file_hasher.py +5 -7
- crackerjack/services/file_io_service.py +361 -0
- crackerjack/services/file_modifier.py +615 -0
- crackerjack/services/filesystem.py +80 -109
- crackerjack/services/git.py +99 -5
- crackerjack/services/health_metrics.py +4 -6
- crackerjack/services/heatmap_generator.py +12 -3
- crackerjack/services/incremental_executor.py +380 -0
- crackerjack/services/initialization.py +101 -49
- crackerjack/services/log_manager.py +2 -2
- crackerjack/services/logging.py +120 -68
- crackerjack/services/lsp_client.py +12 -12
- crackerjack/services/memory_optimizer.py +27 -22
- crackerjack/services/monitoring/README.md +30 -0
- crackerjack/services/monitoring/__init__.py +9 -0
- crackerjack/services/monitoring/dependency_monitor.py +678 -0
- crackerjack/services/monitoring/error_pattern_analyzer.py +676 -0
- crackerjack/services/monitoring/health_metrics.py +716 -0
- crackerjack/services/monitoring/metrics.py +587 -0
- crackerjack/services/{performance_benchmarks.py → monitoring/performance_benchmarks.py} +100 -14
- crackerjack/services/{performance_cache.py → monitoring/performance_cache.py} +21 -15
- crackerjack/services/{performance_monitor.py → monitoring/performance_monitor.py} +10 -6
- crackerjack/services/parallel_executor.py +166 -55
- crackerjack/services/patterns/__init__.py +142 -0
- crackerjack/services/patterns/agents.py +107 -0
- crackerjack/services/patterns/code/__init__.py +15 -0
- crackerjack/services/patterns/code/detection.py +118 -0
- crackerjack/services/patterns/code/imports.py +107 -0
- crackerjack/services/patterns/code/paths.py +159 -0
- crackerjack/services/patterns/code/performance.py +119 -0
- crackerjack/services/patterns/code/replacement.py +36 -0
- crackerjack/services/patterns/core.py +212 -0
- crackerjack/services/patterns/documentation/__init__.py +14 -0
- crackerjack/services/patterns/documentation/badges_markdown.py +96 -0
- crackerjack/services/patterns/documentation/comments_blocks.py +83 -0
- crackerjack/services/patterns/documentation/docstrings.py +89 -0
- crackerjack/services/patterns/formatting.py +226 -0
- crackerjack/services/patterns/operations.py +339 -0
- crackerjack/services/patterns/security/__init__.py +23 -0
- crackerjack/services/patterns/security/code_injection.py +122 -0
- crackerjack/services/patterns/security/credentials.py +190 -0
- crackerjack/services/patterns/security/path_traversal.py +221 -0
- crackerjack/services/patterns/security/unsafe_operations.py +216 -0
- crackerjack/services/patterns/templates.py +62 -0
- crackerjack/services/patterns/testing/__init__.py +18 -0
- crackerjack/services/patterns/testing/error_patterns.py +107 -0
- crackerjack/services/patterns/testing/pytest_output.py +126 -0
- crackerjack/services/patterns/tool_output/__init__.py +16 -0
- crackerjack/services/patterns/tool_output/bandit.py +72 -0
- crackerjack/services/patterns/tool_output/other.py +97 -0
- crackerjack/services/patterns/tool_output/pyright.py +67 -0
- crackerjack/services/patterns/tool_output/ruff.py +44 -0
- crackerjack/services/patterns/url_sanitization.py +114 -0
- crackerjack/services/patterns/utilities.py +42 -0
- crackerjack/services/patterns/utils.py +339 -0
- crackerjack/services/patterns/validation.py +46 -0
- crackerjack/services/patterns/versioning.py +62 -0
- crackerjack/services/predictive_analytics.py +21 -8
- crackerjack/services/profiler.py +280 -0
- crackerjack/services/quality/README.md +415 -0
- crackerjack/services/quality/__init__.py +11 -0
- crackerjack/services/quality/anomaly_detector.py +392 -0
- crackerjack/services/quality/pattern_cache.py +333 -0
- crackerjack/services/quality/pattern_detector.py +479 -0
- crackerjack/services/quality/qa_orchestrator.py +491 -0
- crackerjack/services/{quality_baseline.py → quality/quality_baseline.py} +163 -2
- crackerjack/services/{quality_baseline_enhanced.py → quality/quality_baseline_enhanced.py} +4 -1
- crackerjack/services/{quality_intelligence.py → quality/quality_intelligence.py} +180 -16
- crackerjack/services/regex_patterns.py +58 -2987
- crackerjack/services/regex_utils.py +55 -29
- crackerjack/services/secure_status_formatter.py +42 -15
- crackerjack/services/secure_subprocess.py +35 -2
- crackerjack/services/security.py +16 -8
- crackerjack/services/server_manager.py +40 -51
- crackerjack/services/smart_scheduling.py +46 -6
- crackerjack/services/status_authentication.py +3 -3
- crackerjack/services/thread_safe_status_collector.py +1 -0
- crackerjack/services/tool_filter.py +368 -0
- crackerjack/services/tool_version_service.py +9 -5
- crackerjack/services/unified_config.py +43 -351
- crackerjack/services/vector_store.py +689 -0
- crackerjack/services/version_analyzer.py +6 -4
- crackerjack/services/version_checker.py +14 -8
- crackerjack/services/zuban_lsp_service.py +5 -4
- crackerjack/slash_commands/README.md +11 -0
- crackerjack/slash_commands/init.md +2 -12
- crackerjack/slash_commands/run.md +84 -50
- crackerjack/tools/README.md +11 -0
- crackerjack/tools/__init__.py +30 -0
- crackerjack/tools/_git_utils.py +105 -0
- crackerjack/tools/check_added_large_files.py +139 -0
- crackerjack/tools/check_ast.py +105 -0
- crackerjack/tools/check_json.py +103 -0
- crackerjack/tools/check_jsonschema.py +297 -0
- crackerjack/tools/check_toml.py +103 -0
- crackerjack/tools/check_yaml.py +110 -0
- crackerjack/tools/codespell_wrapper.py +72 -0
- crackerjack/tools/end_of_file_fixer.py +202 -0
- crackerjack/tools/format_json.py +128 -0
- crackerjack/tools/mdformat_wrapper.py +114 -0
- crackerjack/tools/trailing_whitespace.py +198 -0
- crackerjack/tools/validate_regex_patterns.py +7 -3
- crackerjack/ui/README.md +11 -0
- crackerjack/ui/dashboard_renderer.py +28 -0
- crackerjack/ui/templates/README.md +11 -0
- crackerjack/utils/console_utils.py +13 -0
- crackerjack/utils/dependency_guard.py +230 -0
- crackerjack/utils/retry_utils.py +275 -0
- crackerjack/workflows/README.md +590 -0
- crackerjack/workflows/__init__.py +46 -0
- crackerjack/workflows/actions.py +811 -0
- crackerjack/workflows/auto_fix.py +444 -0
- crackerjack/workflows/container_builder.py +499 -0
- crackerjack/workflows/definitions.py +443 -0
- crackerjack/workflows/engine.py +177 -0
- crackerjack/workflows/event_bridge.py +242 -0
- {crackerjack-0.37.9.dist-info → crackerjack-0.45.2.dist-info}/METADATA +678 -98
- crackerjack-0.45.2.dist-info/RECORD +478 -0
- {crackerjack-0.37.9.dist-info → crackerjack-0.45.2.dist-info}/WHEEL +1 -1
- crackerjack/managers/test_manager_backup.py +0 -1075
- crackerjack/mcp/tools/execution_tools_backup.py +0 -1011
- crackerjack/mixins/__init__.py +0 -3
- crackerjack/mixins/error_handling.py +0 -145
- crackerjack/services/config.py +0 -358
- crackerjack/ui/server_panels.py +0 -125
- crackerjack-0.37.9.dist-info/RECORD +0 -231
- /crackerjack/adapters/{rust_tool_adapter.py → lsp/_base.py} +0 -0
- /crackerjack/adapters/{lsp_client.py → lsp/_client.py} +0 -0
- {crackerjack-0.37.9.dist-info → crackerjack-0.45.2.dist-info}/entry_points.txt +0 -0
- {crackerjack-0.37.9.dist-info → crackerjack-0.45.2.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,13 +1,16 @@
|
|
|
1
|
+
import json
|
|
1
2
|
import os
|
|
2
3
|
import subprocess
|
|
3
4
|
import time
|
|
4
5
|
import typing as t
|
|
5
6
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
7
|
+
from contextlib import suppress
|
|
6
8
|
from dataclasses import dataclass
|
|
7
9
|
from pathlib import Path
|
|
8
10
|
|
|
9
|
-
from
|
|
11
|
+
from acb.console import Console
|
|
10
12
|
|
|
13
|
+
from crackerjack.config import get_console_width
|
|
11
14
|
from crackerjack.config.hooks import HookDefinition, HookStrategy, RetryPolicy
|
|
12
15
|
from crackerjack.models.task import HookResult
|
|
13
16
|
from crackerjack.services.security_logger import get_security_logger
|
|
@@ -22,6 +25,7 @@ class HookExecutionResult:
|
|
|
22
25
|
concurrent_execution: bool = False
|
|
23
26
|
cache_hits: int = 0
|
|
24
27
|
cache_misses: int = 0
|
|
28
|
+
performance_gain: float = 0.0
|
|
25
29
|
|
|
26
30
|
@property
|
|
27
31
|
def failed_count(self) -> int:
|
|
@@ -57,100 +61,244 @@ class HookExecutor:
|
|
|
57
61
|
pkg_path: Path,
|
|
58
62
|
verbose: bool = False,
|
|
59
63
|
quiet: bool = False,
|
|
64
|
+
debug: bool = False,
|
|
65
|
+
use_incremental: bool = False,
|
|
66
|
+
git_service: t.Any | None = None,
|
|
60
67
|
) -> None:
|
|
61
68
|
self.console = console
|
|
62
69
|
self.pkg_path = pkg_path
|
|
63
70
|
self.verbose = verbose
|
|
64
71
|
self.quiet = quiet
|
|
72
|
+
self.debug = debug
|
|
73
|
+
self.use_incremental = use_incremental
|
|
74
|
+
self.git_service = git_service
|
|
75
|
+
# Optional progress callbacks used when orchestration is disabled
|
|
76
|
+
self._progress_callback: t.Callable[[int, int], None] | None = None
|
|
77
|
+
self._progress_start_callback: t.Callable[[int, int], None] | None = None
|
|
78
|
+
self._total_hooks: int = 0
|
|
79
|
+
self._started_hooks: int = 0
|
|
80
|
+
self._completed_hooks: int = 0
|
|
81
|
+
|
|
82
|
+
def set_progress_callbacks(
|
|
83
|
+
self,
|
|
84
|
+
*,
|
|
85
|
+
started_cb: t.Callable[[int, int], None] | None = None,
|
|
86
|
+
completed_cb: t.Callable[[int, int], None] | None = None,
|
|
87
|
+
total: int | None = None,
|
|
88
|
+
) -> None:
|
|
89
|
+
"""Set optional progress callbacks for legacy execution.
|
|
90
|
+
|
|
91
|
+
Args:
|
|
92
|
+
started_cb: Called when a hook starts with (started, total)
|
|
93
|
+
completed_cb: Called when a hook completes with (completed, total)
|
|
94
|
+
total: Total number of hooks (defaults to len(strategy.hooks))
|
|
95
|
+
"""
|
|
96
|
+
self._progress_start_callback = started_cb
|
|
97
|
+
self._progress_callback = completed_cb
|
|
98
|
+
self._total_hooks = int(total or 0)
|
|
99
|
+
self._started_hooks = 0
|
|
100
|
+
self._completed_hooks = 0
|
|
65
101
|
|
|
66
102
|
def execute_strategy(self, strategy: HookStrategy) -> HookExecutionResult:
|
|
67
103
|
start_time = time.time()
|
|
68
104
|
|
|
69
|
-
|
|
105
|
+
# Header is displayed by PhaseCoordinator; suppress here to avoid duplicates
|
|
106
|
+
|
|
107
|
+
results = self._execute_hooks(strategy)
|
|
108
|
+
|
|
109
|
+
results = self._apply_retries_if_needed(strategy, results)
|
|
70
110
|
|
|
111
|
+
return self._create_execution_result(strategy, results, start_time)
|
|
112
|
+
|
|
113
|
+
def _execute_hooks(self, strategy: HookStrategy) -> list[HookResult]:
|
|
114
|
+
"""Execute hooks based on strategy configuration."""
|
|
71
115
|
if strategy.parallel and len(strategy.hooks) > 1:
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
results = self._execute_sequential(strategy)
|
|
116
|
+
return self._execute_parallel(strategy)
|
|
117
|
+
return self._execute_sequential(strategy)
|
|
75
118
|
|
|
119
|
+
def _apply_retries_if_needed(
|
|
120
|
+
self, strategy: HookStrategy, results: list[HookResult]
|
|
121
|
+
) -> list[HookResult]:
|
|
122
|
+
"""Apply retries if the strategy requires it."""
|
|
76
123
|
if strategy.retry_policy != RetryPolicy.NONE:
|
|
77
|
-
|
|
124
|
+
return self._handle_retries(strategy, results)
|
|
125
|
+
return results
|
|
78
126
|
|
|
127
|
+
def _create_execution_result(
|
|
128
|
+
self, strategy: HookStrategy, results: list[HookResult], start_time: float
|
|
129
|
+
) -> HookExecutionResult:
|
|
130
|
+
"""Create the final execution result with performance metrics."""
|
|
79
131
|
total_duration = time.time() - start_time
|
|
80
132
|
success = all(r.status == "passed" for r in results)
|
|
81
133
|
|
|
134
|
+
performance_gain = self._calculate_performance_gain(
|
|
135
|
+
strategy, results, total_duration
|
|
136
|
+
)
|
|
137
|
+
|
|
82
138
|
if not self.quiet:
|
|
83
|
-
self._print_summary(strategy, results, success)
|
|
139
|
+
self._print_summary(strategy, results, success, performance_gain)
|
|
84
140
|
|
|
85
141
|
return HookExecutionResult(
|
|
86
142
|
strategy_name=strategy.name,
|
|
87
143
|
results=results,
|
|
88
144
|
total_duration=total_duration,
|
|
89
145
|
success=success,
|
|
146
|
+
performance_gain=performance_gain,
|
|
90
147
|
)
|
|
91
148
|
|
|
92
|
-
def
|
|
93
|
-
self
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
)
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
self.console.print(
|
|
104
|
-
f"[bold bright_cyan]🔍 HOOKS[/bold bright_cyan] [bold bright_white]Running {strategy.name} hooks[/bold bright_white]",
|
|
149
|
+
def _calculate_performance_gain(
|
|
150
|
+
self, strategy: HookStrategy, results: list[HookResult], total_duration: float
|
|
151
|
+
) -> float:
|
|
152
|
+
"""Calculate the performance gain from parallel execution."""
|
|
153
|
+
estimated_sequential = sum(
|
|
154
|
+
getattr(hook, "timeout", 30) for hook in strategy.hooks
|
|
155
|
+
)
|
|
156
|
+
return (
|
|
157
|
+
max(
|
|
158
|
+
0,
|
|
159
|
+
((estimated_sequential - total_duration) / estimated_sequential) * 100,
|
|
105
160
|
)
|
|
106
|
-
|
|
161
|
+
if estimated_sequential > 0
|
|
162
|
+
else 0.0
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
def _print_strategy_header(self, strategy: HookStrategy) -> None:
|
|
166
|
+
# Intentionally no-op: PhaseCoordinator controls stage headers
|
|
167
|
+
return None
|
|
107
168
|
|
|
108
169
|
def _execute_sequential(self, strategy: HookStrategy) -> list[HookResult]:
|
|
109
170
|
results: list[HookResult] = []
|
|
171
|
+
total_hooks = len(strategy.hooks)
|
|
172
|
+
|
|
110
173
|
for hook in strategy.hooks:
|
|
174
|
+
self._handle_progress_start(total_hooks)
|
|
111
175
|
result = self.execute_single_hook(hook)
|
|
112
176
|
results.append(result)
|
|
113
177
|
self._display_hook_result(result)
|
|
178
|
+
self._handle_progress_completion(total_hooks)
|
|
114
179
|
return results
|
|
115
180
|
|
|
181
|
+
def _handle_progress_start(self, total_hooks: int) -> None:
|
|
182
|
+
"""Handle progress start callback."""
|
|
183
|
+
if self._progress_start_callback:
|
|
184
|
+
with suppress(Exception):
|
|
185
|
+
self._started_hooks += 1
|
|
186
|
+
total = self._total_hooks or total_hooks
|
|
187
|
+
self._progress_start_callback(self._started_hooks, total)
|
|
188
|
+
|
|
189
|
+
def _handle_progress_completion(self, total_hooks: int) -> None:
|
|
190
|
+
"""Handle progress completion callback."""
|
|
191
|
+
if self._progress_callback:
|
|
192
|
+
with suppress(Exception):
|
|
193
|
+
self._completed_hooks += 1
|
|
194
|
+
total = self._total_hooks or total_hooks
|
|
195
|
+
self._progress_callback(self._completed_hooks, total)
|
|
196
|
+
|
|
116
197
|
def _execute_parallel(self, strategy: HookStrategy) -> list[HookResult]:
|
|
117
198
|
results: list[HookResult] = []
|
|
118
199
|
|
|
119
200
|
formatting_hooks = [h for h in strategy.hooks if h.is_formatting]
|
|
120
201
|
other_hooks = [h for h in strategy.hooks if not h.is_formatting]
|
|
121
202
|
|
|
203
|
+
# Execute formatting hooks sequentially first
|
|
122
204
|
for hook in formatting_hooks:
|
|
123
|
-
|
|
124
|
-
results.append(result)
|
|
125
|
-
self._display_hook_result(result)
|
|
205
|
+
self._execute_single_hook_with_progress(hook, results)
|
|
126
206
|
|
|
207
|
+
# Execute other hooks in parallel
|
|
127
208
|
if other_hooks:
|
|
128
|
-
|
|
129
|
-
future_to_hook = {
|
|
130
|
-
executor.submit(self.execute_single_hook, hook): hook
|
|
131
|
-
for hook in other_hooks
|
|
132
|
-
}
|
|
133
|
-
|
|
134
|
-
for future in as_completed(future_to_hook):
|
|
135
|
-
try:
|
|
136
|
-
result = future.result()
|
|
137
|
-
results.append(result)
|
|
138
|
-
self._display_hook_result(result)
|
|
139
|
-
except Exception as e:
|
|
140
|
-
hook = future_to_hook[future]
|
|
141
|
-
error_result = HookResult(
|
|
142
|
-
id=hook.name,
|
|
143
|
-
name=hook.name,
|
|
144
|
-
status="error",
|
|
145
|
-
duration=0.0,
|
|
146
|
-
issues_found=[str(e)],
|
|
147
|
-
stage=hook.stage.value,
|
|
148
|
-
)
|
|
149
|
-
results.append(error_result)
|
|
150
|
-
self._display_hook_result(error_result)
|
|
209
|
+
self._execute_parallel_hooks(other_hooks, strategy, results)
|
|
151
210
|
|
|
152
211
|
return results
|
|
153
212
|
|
|
213
|
+
def _execute_single_hook_with_progress(
|
|
214
|
+
self, hook: HookDefinition, results: list[HookResult]
|
|
215
|
+
) -> None:
|
|
216
|
+
"""Execute a single hook and update progress callbacks."""
|
|
217
|
+
if self._progress_start_callback:
|
|
218
|
+
with suppress(Exception):
|
|
219
|
+
self._started_hooks += 1
|
|
220
|
+
total = self._total_hooks or len(results) + 1 # Approximate total
|
|
221
|
+
self._progress_start_callback(self._started_hooks, total)
|
|
222
|
+
|
|
223
|
+
result = self.execute_single_hook(hook)
|
|
224
|
+
results.append(result)
|
|
225
|
+
self._display_hook_result(result)
|
|
226
|
+
|
|
227
|
+
if self._progress_callback:
|
|
228
|
+
with suppress(Exception):
|
|
229
|
+
self._completed_hooks += 1
|
|
230
|
+
total = self._total_hooks or len(results)
|
|
231
|
+
self._progress_callback(self._completed_hooks, total)
|
|
232
|
+
|
|
233
|
+
def _execute_parallel_hooks(
|
|
234
|
+
self,
|
|
235
|
+
other_hooks: list[HookDefinition],
|
|
236
|
+
strategy: HookStrategy,
|
|
237
|
+
results: list[HookResult],
|
|
238
|
+
) -> None:
|
|
239
|
+
"""Execute non-formatting hooks in parallel."""
|
|
240
|
+
|
|
241
|
+
# Use helper function to run hooks with progress tracking
|
|
242
|
+
run_hook_func = self._create_run_hook_func(results, other_hooks)
|
|
243
|
+
|
|
244
|
+
with ThreadPoolExecutor(max_workers=strategy.max_workers) as executor:
|
|
245
|
+
future_to_hook = {
|
|
246
|
+
executor.submit(run_hook_func, hook): hook for hook in other_hooks
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
for future in as_completed(future_to_hook):
|
|
250
|
+
self._handle_future_result(future, future_to_hook, results)
|
|
251
|
+
|
|
252
|
+
def _create_run_hook_func(
|
|
253
|
+
self, results: list[HookResult], other_hooks: list[HookDefinition]
|
|
254
|
+
) -> t.Callable[[HookDefinition], HookResult]:
|
|
255
|
+
"""Create a function that runs a hook with progress tracking."""
|
|
256
|
+
|
|
257
|
+
def _run_with_start(h: HookDefinition) -> HookResult:
|
|
258
|
+
if self._progress_start_callback:
|
|
259
|
+
with suppress(Exception):
|
|
260
|
+
self._started_hooks += 1
|
|
261
|
+
total_local = self._total_hooks or len(results) + len(other_hooks)
|
|
262
|
+
self._progress_start_callback(self._started_hooks, total_local)
|
|
263
|
+
return self.execute_single_hook(h)
|
|
264
|
+
|
|
265
|
+
return _run_with_start
|
|
266
|
+
|
|
267
|
+
def _handle_future_result(
|
|
268
|
+
self, future, future_to_hook: dict, results: list[HookResult]
|
|
269
|
+
) -> None:
|
|
270
|
+
"""Handle the result of a completed future from thread pool execution."""
|
|
271
|
+
try:
|
|
272
|
+
result = future.result()
|
|
273
|
+
results.append(result)
|
|
274
|
+
self._display_hook_result(result)
|
|
275
|
+
self._update_progress_on_completion()
|
|
276
|
+
except Exception as e:
|
|
277
|
+
hook = future_to_hook[future]
|
|
278
|
+
error_result = HookResult(
|
|
279
|
+
id=hook.name,
|
|
280
|
+
name=hook.name,
|
|
281
|
+
status="error",
|
|
282
|
+
duration=0.0,
|
|
283
|
+
issues_found=[str(e)],
|
|
284
|
+
issues_count=1, # Error counts as 1 issue
|
|
285
|
+
stage=hook.stage.value,
|
|
286
|
+
exit_code=1,
|
|
287
|
+
error_message=str(e),
|
|
288
|
+
is_timeout=False,
|
|
289
|
+
)
|
|
290
|
+
results.append(error_result)
|
|
291
|
+
self._display_hook_result(error_result)
|
|
292
|
+
self._update_progress_on_completion()
|
|
293
|
+
|
|
294
|
+
def _update_progress_on_completion(self) -> None:
|
|
295
|
+
"""Update progress callback when a hook completes."""
|
|
296
|
+
if self._progress_callback:
|
|
297
|
+
with suppress(Exception):
|
|
298
|
+
self._completed_hooks += 1
|
|
299
|
+
total = self._total_hooks or self._completed_hooks # Approximate total
|
|
300
|
+
self._progress_callback(self._completed_hooks, total)
|
|
301
|
+
|
|
154
302
|
def execute_single_hook(self, hook: HookDefinition) -> HookResult:
|
|
155
303
|
start_time = time.time()
|
|
156
304
|
|
|
@@ -158,7 +306,7 @@ class HookExecutor:
|
|
|
158
306
|
result = self._run_hook_subprocess(hook)
|
|
159
307
|
duration = time.time() - start_time
|
|
160
308
|
|
|
161
|
-
self._display_hook_output_if_needed(result)
|
|
309
|
+
self._display_hook_output_if_needed(result, hook.name)
|
|
162
310
|
return self._create_hook_result_from_process(hook, result, duration)
|
|
163
311
|
|
|
164
312
|
except subprocess.TimeoutExpired:
|
|
@@ -167,6 +315,49 @@ class HookExecutor:
|
|
|
167
315
|
except Exception as e:
|
|
168
316
|
return self._create_error_result(hook, start_time, e)
|
|
169
317
|
|
|
318
|
+
def _get_changed_files_for_hook(self, hook: HookDefinition) -> list[Path] | None:
|
|
319
|
+
"""Get changed files for incremental execution if supported.
|
|
320
|
+
|
|
321
|
+
Returns:
|
|
322
|
+
List of changed files if incremental mode enabled and hook supports it,
|
|
323
|
+
None if full scan should be used (no changes or hook doesn't support files)
|
|
324
|
+
"""
|
|
325
|
+
if not self.use_incremental or not hook.accepts_file_paths:
|
|
326
|
+
return None
|
|
327
|
+
|
|
328
|
+
if not self.git_service:
|
|
329
|
+
return None
|
|
330
|
+
|
|
331
|
+
# Map hook names to file extensions
|
|
332
|
+
extension_map = {
|
|
333
|
+
"ruff-check": [".py"],
|
|
334
|
+
"ruff-format": [".py"],
|
|
335
|
+
"mdformat": [".md"],
|
|
336
|
+
"refurb": [".py"],
|
|
337
|
+
"skylos": [".py"],
|
|
338
|
+
"complexipy": [".py"],
|
|
339
|
+
"semgrep": [".py"],
|
|
340
|
+
"check-yaml": [".yaml", ".yml"],
|
|
341
|
+
"check-toml": [".toml"],
|
|
342
|
+
"check-json": [".json"],
|
|
343
|
+
"check-ast": [".py"],
|
|
344
|
+
"format-json": [".json"],
|
|
345
|
+
"codespell": [".py", ".md", ".txt", ".rst"],
|
|
346
|
+
"check-jsonschema": [".json", ".yaml", ".yml"],
|
|
347
|
+
"trailing-whitespace": [""], # All files
|
|
348
|
+
"end-of-file-fixer": [""], # All files
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
extensions = extension_map.get(hook.name)
|
|
352
|
+
if not extensions:
|
|
353
|
+
return None
|
|
354
|
+
|
|
355
|
+
changed_files = self.git_service.get_changed_files_by_extension(extensions)
|
|
356
|
+
|
|
357
|
+
# If no files changed, return None to skip the hook entirely
|
|
358
|
+
# (or run full scan depending on configuration)
|
|
359
|
+
return changed_files or None
|
|
360
|
+
|
|
170
361
|
def _run_hook_subprocess(
|
|
171
362
|
self, hook: HookDefinition
|
|
172
363
|
) -> subprocess.CompletedProcess[str]:
|
|
@@ -175,8 +366,18 @@ class HookExecutor:
|
|
|
175
366
|
try:
|
|
176
367
|
repo_root = self.pkg_path
|
|
177
368
|
|
|
369
|
+
# Get changed files for incremental execution
|
|
370
|
+
changed_files = self._get_changed_files_for_hook(hook)
|
|
371
|
+
|
|
372
|
+
# Use build_command with files if incremental, otherwise get_command
|
|
373
|
+
command = (
|
|
374
|
+
hook.build_command(changed_files)
|
|
375
|
+
if changed_files
|
|
376
|
+
else hook.get_command()
|
|
377
|
+
)
|
|
378
|
+
|
|
178
379
|
return subprocess.run(
|
|
179
|
-
|
|
380
|
+
command,
|
|
180
381
|
cwd=repo_root,
|
|
181
382
|
env=clean_env,
|
|
182
383
|
timeout=hook.timeout,
|
|
@@ -197,8 +398,12 @@ class HookExecutor:
|
|
|
197
398
|
)
|
|
198
399
|
|
|
199
400
|
def _display_hook_output_if_needed(
|
|
200
|
-
self, result: subprocess.CompletedProcess[str]
|
|
401
|
+
self, result: subprocess.CompletedProcess[str], hook_name: str = ""
|
|
201
402
|
) -> None:
|
|
403
|
+
# For complexipy, only show output when --debug flag is set
|
|
404
|
+
if hook_name == "complexipy" and not self.debug:
|
|
405
|
+
return
|
|
406
|
+
|
|
202
407
|
if result.returncode == 0 or not self.verbose:
|
|
203
408
|
return
|
|
204
409
|
|
|
@@ -213,38 +418,179 @@ class HookExecutor:
|
|
|
213
418
|
result: subprocess.CompletedProcess[str],
|
|
214
419
|
duration: float,
|
|
215
420
|
) -> HookResult:
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
if "files were modified by this hook" in output_text:
|
|
219
|
-
status = "passed"
|
|
220
|
-
else:
|
|
221
|
-
status = "failed"
|
|
222
|
-
else:
|
|
223
|
-
status = "passed" if result.returncode == 0 else "failed"
|
|
421
|
+
# Determine initial status
|
|
422
|
+
status = self._determine_initial_status(hook, result)
|
|
224
423
|
|
|
424
|
+
# Extract issues
|
|
225
425
|
issues_found = self._extract_issues_from_process_output(hook, result, status)
|
|
226
426
|
|
|
427
|
+
# Update status for reporting tools
|
|
428
|
+
status = self._update_status_for_reporting_tools(
|
|
429
|
+
hook, status, issues_found, result
|
|
430
|
+
)
|
|
431
|
+
|
|
432
|
+
# Parse hook output to extract file count
|
|
433
|
+
parsed_output = self._parse_hook_output(result, hook.name)
|
|
434
|
+
|
|
435
|
+
# Determine exit code and error message
|
|
436
|
+
exit_code, error_message = self._determine_exit_code_and_error(status, result)
|
|
437
|
+
|
|
438
|
+
# Handle case where hook failed but has no parsed issues
|
|
439
|
+
issues_found = self._handle_no_issues_for_failed_hook(
|
|
440
|
+
status, issues_found, result
|
|
441
|
+
)
|
|
442
|
+
|
|
443
|
+
# Calculate issue count
|
|
444
|
+
issues_count = self._calculate_issues_count(status, issues_found)
|
|
445
|
+
|
|
227
446
|
return HookResult(
|
|
228
447
|
id=hook.name,
|
|
229
448
|
name=hook.name,
|
|
230
449
|
status=status,
|
|
231
450
|
duration=duration,
|
|
232
|
-
files_processed=
|
|
451
|
+
files_processed=parsed_output["files_processed"],
|
|
233
452
|
issues_found=issues_found,
|
|
453
|
+
issues_count=issues_count,
|
|
234
454
|
stage=hook.stage.value,
|
|
455
|
+
exit_code=exit_code,
|
|
456
|
+
error_message=error_message,
|
|
457
|
+
is_timeout=False, # Set by timeout handler if applicable
|
|
235
458
|
)
|
|
236
459
|
|
|
460
|
+
def _determine_initial_status(
|
|
461
|
+
self, hook: HookDefinition, result: subprocess.CompletedProcess[str]
|
|
462
|
+
) -> str:
|
|
463
|
+
"""Determine the initial status of the hook."""
|
|
464
|
+
reporting_tools = {"complexipy", "refurb", "gitleaks", "creosote"}
|
|
465
|
+
|
|
466
|
+
if self.debug and hook.name in reporting_tools:
|
|
467
|
+
self.console.print(
|
|
468
|
+
f"[yellow]DEBUG _create_hook_result_from_process: hook={hook.name}, "
|
|
469
|
+
f"returncode={result.returncode}[/yellow]"
|
|
470
|
+
)
|
|
471
|
+
|
|
472
|
+
if hook.is_formatting and result.returncode == 1:
|
|
473
|
+
output_text = result.stdout + result.stderr
|
|
474
|
+
if "files were modified by this hook" in output_text:
|
|
475
|
+
return "passed"
|
|
476
|
+
else:
|
|
477
|
+
return "failed"
|
|
478
|
+
else:
|
|
479
|
+
# Initial status based on exit code
|
|
480
|
+
return "passed" if result.returncode == 0 else "failed"
|
|
481
|
+
|
|
482
|
+
def _update_status_for_reporting_tools(
|
|
483
|
+
self,
|
|
484
|
+
hook: HookDefinition,
|
|
485
|
+
status: str,
|
|
486
|
+
issues_found: list[str],
|
|
487
|
+
result: subprocess.CompletedProcess[str] | None = None,
|
|
488
|
+
) -> str:
|
|
489
|
+
"""Update status for reporting tools if there are issues."""
|
|
490
|
+
reporting_tools = {"complexipy", "refurb", "gitleaks", "creosote"}
|
|
491
|
+
|
|
492
|
+
if hook.name in reporting_tools and issues_found:
|
|
493
|
+
status = "failed"
|
|
494
|
+
|
|
495
|
+
# Debug: Log status for reporting tools
|
|
496
|
+
if hook.name in reporting_tools and self.debug and result:
|
|
497
|
+
self.console.print(
|
|
498
|
+
f"[yellow]DEBUG {hook.name}: returncode={result.returncode}, "
|
|
499
|
+
f"issues={len(issues_found)}, status={status}[/yellow]"
|
|
500
|
+
)
|
|
501
|
+
|
|
502
|
+
return status
|
|
503
|
+
|
|
504
|
+
def _determine_exit_code_and_error(
|
|
505
|
+
self, status: str, result: subprocess.CompletedProcess[str]
|
|
506
|
+
) -> tuple[int | None, str | None]:
|
|
507
|
+
"""Determine exit code and error message."""
|
|
508
|
+
exit_code = result.returncode if status == "failed" else None
|
|
509
|
+
error_message = None
|
|
510
|
+
if status == "failed" and result.stderr.strip():
|
|
511
|
+
# Capture stderr for failed hooks (truncate if very long)
|
|
512
|
+
error_message = result.stderr.strip()[:500]
|
|
513
|
+
return exit_code, error_message
|
|
514
|
+
|
|
515
|
+
def _handle_no_issues_for_failed_hook(
|
|
516
|
+
self,
|
|
517
|
+
status: str,
|
|
518
|
+
issues_found: list[str],
|
|
519
|
+
result: subprocess.CompletedProcess[str],
|
|
520
|
+
) -> list[str]:
|
|
521
|
+
"""Handle the case where a hook failed but has no parsed issues."""
|
|
522
|
+
if status == "failed" and not issues_found:
|
|
523
|
+
output_text = (result.stdout + result.stderr).strip()
|
|
524
|
+
if output_text:
|
|
525
|
+
# Split output into lines and take first 10 non-empty lines as issues
|
|
526
|
+
error_lines = [
|
|
527
|
+
line.strip() for line in output_text.split("\n") if line.strip()
|
|
528
|
+
][:10]
|
|
529
|
+
issues_found = error_lines or ["Hook failed with non-zero exit code"]
|
|
530
|
+
return issues_found
|
|
531
|
+
|
|
532
|
+
def _calculate_issues_count(self, status: str, issues_found: list[str]) -> int:
|
|
533
|
+
"""Calculate the number of issues."""
|
|
534
|
+
return max(len(issues_found), 1 if status == "failed" else 0)
|
|
535
|
+
|
|
237
536
|
def _extract_issues_from_process_output(
|
|
238
537
|
self,
|
|
239
538
|
hook: HookDefinition,
|
|
240
539
|
result: subprocess.CompletedProcess[str],
|
|
241
540
|
status: str,
|
|
242
541
|
) -> list[str]:
|
|
542
|
+
error_output = (result.stdout + result.stderr).strip()
|
|
543
|
+
|
|
544
|
+
# These tools are reporting/analysis tools that return exit code 0 even when finding issues
|
|
545
|
+
# They need special parsing regardless of exit code status
|
|
546
|
+
reporting_tools = {"complexipy", "refurb", "gitleaks", "creosote"}
|
|
547
|
+
|
|
548
|
+
if self.debug and hook.name in reporting_tools:
|
|
549
|
+
self.console.print(
|
|
550
|
+
f"[yellow]DEBUG _extract_issues: hook={hook.name}, status={status}, "
|
|
551
|
+
f"output_len={len(error_output)}[/yellow]"
|
|
552
|
+
)
|
|
553
|
+
|
|
554
|
+
# Handle special parsing tools first
|
|
555
|
+
if hook.name == "semgrep":
|
|
556
|
+
return self._parse_semgrep_issues(error_output)
|
|
557
|
+
|
|
558
|
+
# Handle reporting tools that always need parsing
|
|
559
|
+
if hook.name in reporting_tools:
|
|
560
|
+
return self._extract_issues_for_reporting_tools(hook, error_output)
|
|
561
|
+
|
|
562
|
+
# For non-reporting tools, only parse output if they failed
|
|
563
|
+
return self._extract_issues_for_regular_tools(
|
|
564
|
+
hook, error_output, status, result
|
|
565
|
+
)
|
|
566
|
+
|
|
567
|
+
def _extract_issues_for_reporting_tools(
|
|
568
|
+
self, hook: HookDefinition, error_output: str
|
|
569
|
+
) -> list[str]:
|
|
570
|
+
"""Extract issues from reporting tools."""
|
|
571
|
+
# Always parse output for reporting tools (they exit 0 even with findings)
|
|
572
|
+
if hook.name == "complexipy":
|
|
573
|
+
return self._parse_complexipy_issues(error_output)
|
|
574
|
+
if hook.name == "refurb":
|
|
575
|
+
return self._parse_refurb_issues(error_output)
|
|
576
|
+
if hook.name == "gitleaks":
|
|
577
|
+
return self._parse_gitleaks_issues(error_output)
|
|
578
|
+
if hook.name == "creosote":
|
|
579
|
+
return self._parse_creosote_issues(error_output)
|
|
580
|
+
return []
|
|
581
|
+
|
|
582
|
+
def _extract_issues_for_regular_tools(
|
|
583
|
+
self,
|
|
584
|
+
hook: HookDefinition,
|
|
585
|
+
error_output: str,
|
|
586
|
+
status: str,
|
|
587
|
+
result: subprocess.CompletedProcess[str],
|
|
588
|
+
) -> list[str]:
|
|
589
|
+
"""Extract issues from regular tools."""
|
|
590
|
+
# For non-reporting tools, only parse output if they failed
|
|
243
591
|
if status == "passed":
|
|
244
592
|
return []
|
|
245
593
|
|
|
246
|
-
error_output = (result.stdout + result.stderr).strip()
|
|
247
|
-
|
|
248
594
|
if hook.is_formatting and "files were modified by this hook" in error_output:
|
|
249
595
|
return []
|
|
250
596
|
|
|
@@ -253,6 +599,251 @@ class HookExecutor:
|
|
|
253
599
|
|
|
254
600
|
return [f"Hook failed with code {result.returncode}"]
|
|
255
601
|
|
|
602
|
+
def _is_header_or_separator_line(self, line: str) -> bool:
|
|
603
|
+
"""Check if the line is a header or separator line."""
|
|
604
|
+
return any(x in line for x in ("Path", "─────", "┌", "└", "├", "┼", "┤", "┃"))
|
|
605
|
+
|
|
606
|
+
def _extract_complexity_from_parts(self, parts: list[str]) -> int | None:
|
|
607
|
+
"""Extract complexity value from line parts."""
|
|
608
|
+
if len(parts) >= 4:
|
|
609
|
+
with suppress(ValueError, IndexError):
|
|
610
|
+
return int(parts[-1])
|
|
611
|
+
return None
|
|
612
|
+
|
|
613
|
+
def _detect_package_from_output(self, output: str) -> str:
|
|
614
|
+
"""Auto-detect package name from tool output.
|
|
615
|
+
|
|
616
|
+
Looks for common patterns like:
|
|
617
|
+
- Table rows with paths: │ ./package_name/...
|
|
618
|
+
- File paths: package_name/file.py
|
|
619
|
+
|
|
620
|
+
Returns:
|
|
621
|
+
Detected package name, or falls back to pkg_path detection
|
|
622
|
+
"""
|
|
623
|
+
import re
|
|
624
|
+
from collections import Counter
|
|
625
|
+
|
|
626
|
+
# Try to extract from file paths in output (format: ./package_name/file.py)
|
|
627
|
+
path_pattern = r"\./([a-z_][a-z0-9_]*)/[a-z_]"
|
|
628
|
+
matches = re.findall(path_pattern, output, re.IGNORECASE)
|
|
629
|
+
|
|
630
|
+
if matches:
|
|
631
|
+
# Return most common package name found
|
|
632
|
+
return Counter(matches).most_common(1)[0][0]
|
|
633
|
+
|
|
634
|
+
# Fallback to detecting from pyproject.toml (existing logic)
|
|
635
|
+
from crackerjack.config.tool_commands import _detect_package_name_cached
|
|
636
|
+
|
|
637
|
+
return _detect_package_name_cached(str(self.pkg_path))
|
|
638
|
+
|
|
639
|
+
def _should_include_line(self, line: str, package_name: str) -> bool:
|
|
640
|
+
"""Check if the line should be included in the output.
|
|
641
|
+
|
|
642
|
+
Args:
|
|
643
|
+
line: Line from complexipy output
|
|
644
|
+
package_name: Name of the package being scanned
|
|
645
|
+
|
|
646
|
+
Returns:
|
|
647
|
+
True if line contains the package name and is a table row
|
|
648
|
+
"""
|
|
649
|
+
return "│" in line and package_name in line
|
|
650
|
+
|
|
651
|
+
def _parse_complexipy_issues(self, output: str) -> list[str]:
|
|
652
|
+
"""Parse complexipy table output to count actual violations (complexity > 15)."""
|
|
653
|
+
# Auto-detect package name from output
|
|
654
|
+
package_name = self._detect_package_from_output(output)
|
|
655
|
+
|
|
656
|
+
issues = []
|
|
657
|
+
for line in output.split("\n"):
|
|
658
|
+
# Match table rows: │ path │ file │ function │ complexity │
|
|
659
|
+
if self._should_include_line(line, package_name):
|
|
660
|
+
# Skip header/separator rows
|
|
661
|
+
if not self._is_header_or_separator_line(line):
|
|
662
|
+
# Extract complexity value (last column)
|
|
663
|
+
parts = [p.strip() for p in line.split("│") if p.strip()]
|
|
664
|
+
complexity = self._extract_complexity_from_parts(parts)
|
|
665
|
+
# Only count functions exceeding limit (15)
|
|
666
|
+
if complexity is not None and complexity > 15:
|
|
667
|
+
issues.append(line.strip())
|
|
668
|
+
return issues
|
|
669
|
+
|
|
670
|
+
def _parse_refurb_issues(self, output: str) -> list[str]:
|
|
671
|
+
"""Parse refurb output to count actual violations with shortened paths.
|
|
672
|
+
|
|
673
|
+
Refurb output format: "path/to/file.py: line: col [FURB###]: message"
|
|
674
|
+
Returns format: "relative/path.py:line [FURB###] message"
|
|
675
|
+
"""
|
|
676
|
+
import re
|
|
677
|
+
|
|
678
|
+
issues = []
|
|
679
|
+
for line in output.split("\n"):
|
|
680
|
+
if "[FURB" not in line or ":" not in line:
|
|
681
|
+
continue
|
|
682
|
+
|
|
683
|
+
# Match refurb format: path: line: col [FURB###]: message
|
|
684
|
+
# Example: ./crackerjack/core/phase.py: 42: 10 [FURB123]: Use dict.get() instead
|
|
685
|
+
# Note: Allow spaces after colons (": 42: 10" not ":42:10")
|
|
686
|
+
match = re.search(
|
|
687
|
+
r"(.+?):\s*(\d+):\s*\d+\s+\[(\w+)\]:\s*(.+)", line.strip()
|
|
688
|
+
)
|
|
689
|
+
|
|
690
|
+
if match:
|
|
691
|
+
file_path, line_num, error_code, message = match.groups()
|
|
692
|
+
|
|
693
|
+
# Shorten path to be relative to project root
|
|
694
|
+
short_path = self._shorten_path(file_path)
|
|
695
|
+
|
|
696
|
+
# Format: path:line [CODE] message
|
|
697
|
+
formatted = f"{short_path}:{line_num} [{error_code}] {message.strip()}"
|
|
698
|
+
issues.append(formatted)
|
|
699
|
+
else:
|
|
700
|
+
# Fallback: keep original line if parsing fails
|
|
701
|
+
issues.append(line.strip())
|
|
702
|
+
|
|
703
|
+
return issues
|
|
704
|
+
|
|
705
|
+
def _shorten_path(self, path: str) -> str:
|
|
706
|
+
"""Shorten file path to be relative to project root.
|
|
707
|
+
|
|
708
|
+
Args:
|
|
709
|
+
path: Absolute or relative file path
|
|
710
|
+
|
|
711
|
+
Returns:
|
|
712
|
+
Shortened path relative to pkg_path, or basename if outside project
|
|
713
|
+
"""
|
|
714
|
+
try:
|
|
715
|
+
# Convert to Path object
|
|
716
|
+
file_path = Path(path)
|
|
717
|
+
|
|
718
|
+
# Try to make it relative to pkg_path if it's absolute
|
|
719
|
+
if file_path.is_absolute():
|
|
720
|
+
try:
|
|
721
|
+
relative = file_path.relative_to(self.pkg_path)
|
|
722
|
+
return str(relative).replace("\\", "/")
|
|
723
|
+
except ValueError:
|
|
724
|
+
# Path is outside project, just use basename
|
|
725
|
+
return file_path.name
|
|
726
|
+
|
|
727
|
+
# Already relative - clean up by removing leading "./"
|
|
728
|
+
clean_path = str(file_path).lstrip("./")
|
|
729
|
+
return clean_path.replace("\\", "/")
|
|
730
|
+
|
|
731
|
+
except Exception:
|
|
732
|
+
# Fallback: return original path
|
|
733
|
+
return path
|
|
734
|
+
|
|
735
|
+
def _parse_gitleaks_issues(self, output: str) -> list[str]:
|
|
736
|
+
"""Parse gitleaks output - ignore warnings, only count leaks."""
|
|
737
|
+
# Gitleaks outputs "no leaks found" when clean
|
|
738
|
+
if "no leaks found" in output.lower():
|
|
739
|
+
return []
|
|
740
|
+
return [
|
|
741
|
+
line.strip()
|
|
742
|
+
for line in output.split("\n")
|
|
743
|
+
if not (
|
|
744
|
+
"WRN" in line and "Invalid .gitleaksignore" in line
|
|
745
|
+
) # Skip warnings about .gitleaksignore format
|
|
746
|
+
and any(
|
|
747
|
+
x in line.lower() for x in ("leak", "secret", "credential", "api")
|
|
748
|
+
) # Look for actual leak findings
|
|
749
|
+
and "found" not in line.lower() # Skip summary lines
|
|
750
|
+
]
|
|
751
|
+
|
|
752
|
+
def _parse_creosote_issues(self, output: str) -> list[str]:
|
|
753
|
+
"""Parse creosote output - only count unused dependencies."""
|
|
754
|
+
if "No unused dependencies found" in output:
|
|
755
|
+
return []
|
|
756
|
+
issues = []
|
|
757
|
+
parsing_unused = False
|
|
758
|
+
for line in output.split("\n"):
|
|
759
|
+
if "unused" in line.lower() and "dependenc" in line.lower():
|
|
760
|
+
parsing_unused = True
|
|
761
|
+
continue
|
|
762
|
+
if parsing_unused and line.strip() and not line.strip().startswith("["):
|
|
763
|
+
# Dependency names (not ANSI color codes)
|
|
764
|
+
dep_name = line.strip().lstrip("- ")
|
|
765
|
+
if dep_name:
|
|
766
|
+
issues.append(f"Unused dependency: {dep_name}")
|
|
767
|
+
if not line.strip():
|
|
768
|
+
parsing_unused = False
|
|
769
|
+
return issues
|
|
770
|
+
|
|
771
|
+
def _parse_semgrep_issues(self, output: str) -> list[str]:
|
|
772
|
+
"""Parse semgrep JSON output to extract both findings and errors.
|
|
773
|
+
|
|
774
|
+
Semgrep returns JSON with two arrays:
|
|
775
|
+
- "results": Security/code quality findings
|
|
776
|
+
- "errors": Configuration, download, or execution errors
|
|
777
|
+
|
|
778
|
+
Error categorization:
|
|
779
|
+
- CODE_ERROR_TYPES: Actual code issues that should fail the build
|
|
780
|
+
- INFRA_ERROR_TYPES: Infrastructure issues (network, timeouts) that should warn only
|
|
781
|
+
|
|
782
|
+
This method extracts issues from both arrays to provide comprehensive error reporting.
|
|
783
|
+
"""
|
|
784
|
+
import json
|
|
785
|
+
|
|
786
|
+
try:
|
|
787
|
+
# Try to parse as JSON
|
|
788
|
+
json_data = json.loads(output.strip())
|
|
789
|
+
issues = []
|
|
790
|
+
|
|
791
|
+
# Extract findings from results array
|
|
792
|
+
issues.extend(self._extract_semgrep_results(json_data))
|
|
793
|
+
|
|
794
|
+
# Extract errors from errors array with categorization
|
|
795
|
+
issues.extend(self._extract_semgrep_errors(json_data))
|
|
796
|
+
|
|
797
|
+
return issues
|
|
798
|
+
|
|
799
|
+
except json.JSONDecodeError:
|
|
800
|
+
# If JSON parsing fails, return raw output (shouldn't happen with --json flag)
|
|
801
|
+
if output.strip():
|
|
802
|
+
return [line.strip() for line in output.split("\n") if line.strip()][
|
|
803
|
+
:10
|
|
804
|
+
]
|
|
805
|
+
|
|
806
|
+
return []
|
|
807
|
+
|
|
808
|
+
def _extract_semgrep_results(self, json_data: dict) -> list[str]:
|
|
809
|
+
"""Extract findings from semgrep results."""
|
|
810
|
+
issues = []
|
|
811
|
+
for result in json_data.get("results", []):
|
|
812
|
+
# Format: "file.py:line - rule_id: message"
|
|
813
|
+
path = result.get("path", "unknown")
|
|
814
|
+
line_num = result.get("start", {}).get("line", "?")
|
|
815
|
+
rule_id = result.get("check_id", "unknown-rule")
|
|
816
|
+
message = result.get("extra", {}).get("message", "Security issue detected")
|
|
817
|
+
issues.append(f"{path}:{line_num} - {rule_id}: {message}")
|
|
818
|
+
return issues
|
|
819
|
+
|
|
820
|
+
def _extract_semgrep_errors(self, json_data: dict) -> list[str]:
|
|
821
|
+
"""Extract errors from semgrep errors with categorization."""
|
|
822
|
+
issues = []
|
|
823
|
+
INFRA_ERROR_TYPES = {
|
|
824
|
+
"NetworkError",
|
|
825
|
+
"DownloadError",
|
|
826
|
+
"TimeoutError",
|
|
827
|
+
"ConnectionError",
|
|
828
|
+
"HTTPError",
|
|
829
|
+
"SSLError",
|
|
830
|
+
}
|
|
831
|
+
|
|
832
|
+
for error in json_data.get("errors", []):
|
|
833
|
+
error_type = error.get("type", "SemgrepError")
|
|
834
|
+
error_msg = error.get("message", str(error))
|
|
835
|
+
|
|
836
|
+
# Infrastructure errors: warn but don't fail the build
|
|
837
|
+
if error_type in INFRA_ERROR_TYPES:
|
|
838
|
+
self.console.print(
|
|
839
|
+
f"[yellow]Warning: Semgrep infrastructure error: "
|
|
840
|
+
f"{error_type}: {error_msg}[/yellow]"
|
|
841
|
+
)
|
|
842
|
+
else:
|
|
843
|
+
# Code/config errors: add to issues (will fail the build)
|
|
844
|
+
issues.append(f"{error_type}: {error_msg}")
|
|
845
|
+
return issues
|
|
846
|
+
|
|
256
847
|
def _create_timeout_result(
|
|
257
848
|
self, hook: HookDefinition, start_time: float
|
|
258
849
|
) -> HookResult:
|
|
@@ -263,7 +854,11 @@ class HookExecutor:
|
|
|
263
854
|
status="timeout",
|
|
264
855
|
duration=duration,
|
|
265
856
|
issues_found=[f"Hook timed out after {duration: .1f}s"],
|
|
857
|
+
issues_count=1, # Timeout counts as 1 issue
|
|
266
858
|
stage=hook.stage.value,
|
|
859
|
+
exit_code=124, # Standard timeout exit code
|
|
860
|
+
error_message=f"Execution exceeded timeout of {duration:.1f}s",
|
|
861
|
+
is_timeout=True,
|
|
267
862
|
)
|
|
268
863
|
|
|
269
864
|
def _create_error_result(
|
|
@@ -276,33 +871,249 @@ class HookExecutor:
|
|
|
276
871
|
status="error",
|
|
277
872
|
duration=duration,
|
|
278
873
|
issues_found=[str(error)],
|
|
874
|
+
issues_count=1, # Error counts as 1 issue
|
|
279
875
|
stage=hook.stage.value,
|
|
876
|
+
exit_code=1,
|
|
877
|
+
error_message=str(error),
|
|
878
|
+
is_timeout=False,
|
|
280
879
|
)
|
|
281
880
|
|
|
282
881
|
def _parse_hook_output(
|
|
283
882
|
self,
|
|
284
883
|
result: subprocess.CompletedProcess[str],
|
|
884
|
+
hook_name: str = "",
|
|
285
885
|
) -> dict[str, t.Any]:
|
|
286
886
|
output = result.stdout + result.stderr
|
|
887
|
+
|
|
888
|
+
# Special handling for semgrep to count files with issues, not total files scanned
|
|
889
|
+
if hook_name == "semgrep":
|
|
890
|
+
files_processed = self._parse_semgrep_output(result)
|
|
891
|
+
else:
|
|
892
|
+
files_processed = self._parse_generic_hook_output(output)
|
|
893
|
+
|
|
894
|
+
return self._create_parse_result(files_processed, result.returncode, output)
|
|
895
|
+
|
|
896
|
+
def _is_semgrep_output(self, output: str, args_str: str) -> bool:
|
|
897
|
+
"""Check if the output is from semgrep."""
|
|
898
|
+
return "semgrep" in output.lower() or "semgrep" in args_str.lower()
|
|
899
|
+
|
|
900
|
+
def _create_parse_result(
|
|
901
|
+
self, files_processed: int, exit_code: int, output: str
|
|
902
|
+
) -> dict[str, t.Any]:
|
|
903
|
+
"""Create the parse result dictionary."""
|
|
287
904
|
return {
|
|
288
905
|
"hook_id": None,
|
|
289
|
-
"exit_code":
|
|
290
|
-
"files_processed":
|
|
906
|
+
"exit_code": exit_code,
|
|
907
|
+
"files_processed": files_processed,
|
|
291
908
|
"issues": [],
|
|
292
909
|
"raw_output": output,
|
|
293
910
|
}
|
|
294
911
|
|
|
912
|
+
def _parse_semgrep_output(
|
|
913
|
+
self,
|
|
914
|
+
result: subprocess.CompletedProcess[str],
|
|
915
|
+
) -> int:
|
|
916
|
+
"""Parse Semgrep output to count files with issues, not total files scanned."""
|
|
917
|
+
|
|
918
|
+
# Try to extract JSON output from semgrep (if available)
|
|
919
|
+
# Semgrep JSON output contains results with file paths
|
|
920
|
+
json_files = self._parse_semgrep_json_output(result)
|
|
921
|
+
if json_files is not None and json_files >= 0:
|
|
922
|
+
# Successfully parsed JSON - return result (including 0 for no issues)
|
|
923
|
+
return json_files
|
|
924
|
+
|
|
925
|
+
# If we couldn't extract from JSON, try to parse from text output
|
|
926
|
+
return self._parse_semgrep_text_output(result.stdout + result.stderr)
|
|
927
|
+
|
|
928
|
+
def _parse_semgrep_json_output(
|
|
929
|
+
self,
|
|
930
|
+
result: subprocess.CompletedProcess[str],
|
|
931
|
+
) -> int | None:
|
|
932
|
+
"""Parse Semgrep JSON output to count unique files with issues.
|
|
933
|
+
|
|
934
|
+
Returns:
|
|
935
|
+
int: Number of files with issues if JSON parsed successfully (including 0)
|
|
936
|
+
None: If JSON parsing failed
|
|
937
|
+
"""
|
|
938
|
+
# Look for JSON output between potentially mixed text output
|
|
939
|
+
output = result.stdout + result.stderr
|
|
940
|
+
return self._process_output_for_json(output)
|
|
941
|
+
|
|
942
|
+
def _process_output_for_json(self, output: str) -> int | None:
|
|
943
|
+
"""Process output looking for JSON content.
|
|
944
|
+
|
|
945
|
+
Returns:
|
|
946
|
+
int: Number of files if JSON found (including 0 for no issues)
|
|
947
|
+
None: If no valid JSON found
|
|
948
|
+
"""
|
|
949
|
+
lines = output.splitlines()
|
|
950
|
+
for line in lines:
|
|
951
|
+
result = self._try_parse_line_json(line)
|
|
952
|
+
if result is not None:
|
|
953
|
+
return result
|
|
954
|
+
return None
|
|
955
|
+
|
|
956
|
+
def _try_parse_line_json(self, line: str) -> int | None:
|
|
957
|
+
"""Try to parse a line as JSON, checking both pure JSON and JSON with text.
|
|
958
|
+
|
|
959
|
+
Returns:
|
|
960
|
+
int: Number of files if JSON parsed successfully (including 0)
|
|
961
|
+
None: If JSON parsing failed
|
|
962
|
+
"""
|
|
963
|
+
line = line.strip()
|
|
964
|
+
# Check if it's a pure JSON object
|
|
965
|
+
if self._is_pure_json(line):
|
|
966
|
+
result = self._parse_json_line(line)
|
|
967
|
+
if result is not None:
|
|
968
|
+
return result
|
|
969
|
+
# Check if it contains JSON results
|
|
970
|
+
if self._contains_json_results(line):
|
|
971
|
+
result = self._parse_json_line(line)
|
|
972
|
+
if result is not None:
|
|
973
|
+
return result
|
|
974
|
+
return None
|
|
975
|
+
|
|
976
|
+
def _is_pure_json(self, line: str) -> bool:
|
|
977
|
+
"""Check if a line is a pure JSON object."""
|
|
978
|
+
return line.startswith("{") and line.endswith("}")
|
|
979
|
+
|
|
980
|
+
def _contains_json_results(self, line: str) -> bool:
|
|
981
|
+
"""Check if a line contains JSON results."""
|
|
982
|
+
return '"results":' in line
|
|
983
|
+
|
|
984
|
+
def _parse_json_line(self, line: str) -> int | None:
|
|
985
|
+
"""Parse a single JSON line to extract file count.
|
|
986
|
+
|
|
987
|
+
Returns:
|
|
988
|
+
int: Number of unique files with issues if JSON is valid (including 0)
|
|
989
|
+
None: If JSON parsing failed
|
|
990
|
+
"""
|
|
991
|
+
try:
|
|
992
|
+
json_data = json.loads(line)
|
|
993
|
+
if "results" in json_data:
|
|
994
|
+
# Count unique file paths in results
|
|
995
|
+
file_paths = {
|
|
996
|
+
result.get("path") for result in json_data.get("results", [])
|
|
997
|
+
}
|
|
998
|
+
return len([p for p in file_paths if p]) # Filter out None values
|
|
999
|
+
except json.JSONDecodeError:
|
|
1000
|
+
pass
|
|
1001
|
+
return None
|
|
1002
|
+
|
|
1003
|
+
def _parse_semgrep_text_output(self, output: str) -> int:
|
|
1004
|
+
"""Parse Semgrep text output to extract file count."""
|
|
1005
|
+
import re
|
|
1006
|
+
|
|
1007
|
+
# Look for patterns in Semgrep output that indicate findings
|
|
1008
|
+
# Example: "found 3 issues in 2 files" or "found no issues"
|
|
1009
|
+
semgrep_patterns = [
|
|
1010
|
+
r"found\s+(\d+)\s+issues?\s+in\s+(\d+)\s+files?",
|
|
1011
|
+
r"found\s+no\s+issues",
|
|
1012
|
+
r"scanning\s+(\d+)\s+files?",
|
|
1013
|
+
]
|
|
1014
|
+
|
|
1015
|
+
for pattern in semgrep_patterns:
|
|
1016
|
+
matches = re.findall(pattern, output, re.IGNORECASE)
|
|
1017
|
+
if matches:
|
|
1018
|
+
result = self._process_matches(matches, output)
|
|
1019
|
+
if result != -1: # -1 means "continue to next pattern"
|
|
1020
|
+
return result
|
|
1021
|
+
|
|
1022
|
+
return 0
|
|
1023
|
+
|
|
1024
|
+
def _process_matches(self, matches: list, output: str) -> int:
|
|
1025
|
+
"""Process regex matches to extract file count."""
|
|
1026
|
+
for match in matches:
|
|
1027
|
+
if isinstance(match, tuple):
|
|
1028
|
+
if len(match) == 2: # "found X issues in Y files" pattern
|
|
1029
|
+
return self._handle_issues_in_files_match(match)
|
|
1030
|
+
elif len(match) == 1 and "no issues" not in output.lower():
|
|
1031
|
+
# This would be from "scanning X files" - we don't want this for the files_processed
|
|
1032
|
+
continue # Return -1 to indicate continue
|
|
1033
|
+
elif "no issues" in output.lower():
|
|
1034
|
+
return 0
|
|
1035
|
+
return -1 # Indicates to continue to next pattern
|
|
1036
|
+
|
|
1037
|
+
def _handle_issues_in_files_match(self, match: tuple) -> int:
|
|
1038
|
+
"""Handle the 'found X issues in Y files' match."""
|
|
1039
|
+
issue_count, file_count = int(match[0]), int(match[1])
|
|
1040
|
+
# Use the number of files with issues, not total files scanned
|
|
1041
|
+
return file_count if issue_count > 0 else 0
|
|
1042
|
+
|
|
1043
|
+
def _parse_generic_hook_output(self, output: str) -> int:
|
|
1044
|
+
"""Parse output from other hooks (non-semgrep) to extract file count."""
|
|
1045
|
+
files_processed = 0
|
|
1046
|
+
|
|
1047
|
+
# Check for common patterns in hook output (for other tools)
|
|
1048
|
+
if "files" in output.lower():
|
|
1049
|
+
files_processed = self._extract_file_count_from_patterns(output)
|
|
1050
|
+
|
|
1051
|
+
# Special handling for ruff and other common tools
|
|
1052
|
+
if not files_processed and "ruff" in output.lower():
|
|
1053
|
+
# Look for patterns like "All checks passed!" with files processed elsewhere
|
|
1054
|
+
files_processed = self._extract_file_count_for_ruff_like_tools(output)
|
|
1055
|
+
|
|
1056
|
+
return files_processed
|
|
1057
|
+
|
|
1058
|
+
def _extract_file_count_from_patterns(self, output: str) -> int:
|
|
1059
|
+
"""Extract file counts from common patterns in hook output."""
|
|
1060
|
+
import re
|
|
1061
|
+
|
|
1062
|
+
# Pattern for "N file(s)" in output - return the highest found number
|
|
1063
|
+
all_matches = []
|
|
1064
|
+
file_count_patterns = [
|
|
1065
|
+
r"(\d+)\s+files?\s+would\s+be", # "X files would be reformatted"
|
|
1066
|
+
r"(\d+)\s+files?\s+already\s+formatted", # "X files already formatted"
|
|
1067
|
+
r"(\d+)\s+files?\s+processed", # "X files processed"
|
|
1068
|
+
r"(\d+)\s+files?\s+checked", # "X files checked"
|
|
1069
|
+
r"(\d+)\s+files?\s+analyzed", # "X files analyzed"
|
|
1070
|
+
r"Checking\s+(\d+)\s+files?", # "Checking 5 files"
|
|
1071
|
+
r"Found\s+(\d+)\s+files?", # "Found 5 files"
|
|
1072
|
+
r"(\d+)\s+files?", # "5 files" or "1 file" (general pattern)
|
|
1073
|
+
]
|
|
1074
|
+
for pattern in file_count_patterns:
|
|
1075
|
+
matches = re.findall(pattern, output, re.IGNORECASE)
|
|
1076
|
+
if matches:
|
|
1077
|
+
# Convert all matches to integers and add to list
|
|
1078
|
+
all_matches.extend([int(m) for m in matches if m.isdigit()])
|
|
1079
|
+
|
|
1080
|
+
# Use the highest value found
|
|
1081
|
+
if all_matches:
|
|
1082
|
+
return max(all_matches)
|
|
1083
|
+
|
|
1084
|
+
return 0
|
|
1085
|
+
|
|
1086
|
+
def _extract_file_count_for_ruff_like_tools(self, output: str) -> int:
|
|
1087
|
+
"""Extract file counts for ruff-like tools that don't report files when all pass."""
|
|
1088
|
+
import re
|
|
1089
|
+
|
|
1090
|
+
# Look for patterns like "All checks passed!" with files processed elsewhere
|
|
1091
|
+
all_passed_match = re.search(r"All\s+checks?\s+passed!", output, re.IGNORECASE)
|
|
1092
|
+
if all_passed_match:
|
|
1093
|
+
# For all-checks-passed scenarios, try to find other mentions of file counts
|
|
1094
|
+
other_matches = re.findall(r"(\d+)\s+files?", output, re.IGNORECASE)
|
|
1095
|
+
if other_matches:
|
|
1096
|
+
all_matches = [int(m) for m in other_matches if m.isdigit()]
|
|
1097
|
+
if all_matches:
|
|
1098
|
+
return max(all_matches) # Use highest value found
|
|
1099
|
+
|
|
1100
|
+
return 0
|
|
1101
|
+
|
|
295
1102
|
def _display_hook_result(self, result: HookResult) -> None:
|
|
1103
|
+
if self.quiet:
|
|
1104
|
+
return
|
|
296
1105
|
status_icon = "✅" if result.status == "passed" else "❌"
|
|
297
1106
|
|
|
298
|
-
max_width =
|
|
1107
|
+
max_width = get_console_width()
|
|
1108
|
+
content_width = max_width - 4 # Adjusted for icon and padding
|
|
299
1109
|
|
|
300
|
-
if len(result.name) >
|
|
301
|
-
line = result.name[:
|
|
1110
|
+
if len(result.name) > content_width:
|
|
1111
|
+
line = result.name[: content_width - 3] + "..."
|
|
302
1112
|
else:
|
|
303
|
-
dots_needed =
|
|
1113
|
+
dots_needed = max(0, content_width - len(result.name))
|
|
304
1114
|
line = result.name + ("." * dots_needed)
|
|
305
1115
|
|
|
1116
|
+
# Real-time inline hook status (dotted-line format)
|
|
306
1117
|
self.console.print(f"{line} {status_icon}")
|
|
307
1118
|
|
|
308
1119
|
def _handle_retries(
|
|
@@ -321,6 +1132,17 @@ class HookExecutor:
|
|
|
321
1132
|
strategy: HookStrategy,
|
|
322
1133
|
results: list[HookResult],
|
|
323
1134
|
) -> list[HookResult]:
|
|
1135
|
+
formatting_hooks_failed = self._find_failed_formatting_hooks(strategy, results)
|
|
1136
|
+
|
|
1137
|
+
if not formatting_hooks_failed:
|
|
1138
|
+
return results
|
|
1139
|
+
|
|
1140
|
+
return self._retry_all_formatting_hooks(strategy, results)
|
|
1141
|
+
|
|
1142
|
+
def _find_failed_formatting_hooks(
|
|
1143
|
+
self, strategy: HookStrategy, results: list[HookResult]
|
|
1144
|
+
) -> set[str]:
|
|
1145
|
+
"""Find the names of formatting hooks that failed."""
|
|
324
1146
|
formatting_hooks_failed: set[str] = set()
|
|
325
1147
|
|
|
326
1148
|
for i, result in enumerate(results):
|
|
@@ -328,9 +1150,12 @@ class HookExecutor:
|
|
|
328
1150
|
if hook.is_formatting and result.status == "failed":
|
|
329
1151
|
formatting_hooks_failed.add(hook.name)
|
|
330
1152
|
|
|
331
|
-
|
|
332
|
-
return results
|
|
1153
|
+
return formatting_hooks_failed
|
|
333
1154
|
|
|
1155
|
+
def _retry_all_formatting_hooks(
|
|
1156
|
+
self, strategy: HookStrategy, results: list[HookResult]
|
|
1157
|
+
) -> list[HookResult]:
|
|
1158
|
+
"""Retry all formatting hooks."""
|
|
334
1159
|
updated_results: list[HookResult] = []
|
|
335
1160
|
for i, hook in enumerate(strategy.hooks):
|
|
336
1161
|
prev_result = results[i]
|
|
@@ -347,51 +1172,50 @@ class HookExecutor:
|
|
|
347
1172
|
strategy: HookStrategy,
|
|
348
1173
|
results: list[HookResult],
|
|
349
1174
|
) -> list[HookResult]:
|
|
350
|
-
failed_hooks =
|
|
1175
|
+
failed_hooks = self._find_failed_hooks(results)
|
|
351
1176
|
|
|
352
1177
|
if not failed_hooks:
|
|
353
1178
|
return results
|
|
354
1179
|
|
|
1180
|
+
return self._retry_failed_hooks(strategy, results, failed_hooks)
|
|
1181
|
+
|
|
1182
|
+
def _find_failed_hooks(self, results: list[HookResult]) -> list[int]:
|
|
1183
|
+
"""Find the indices of hooks that failed."""
|
|
1184
|
+
return [i for i, r in enumerate(results) if r.status == "failed"]
|
|
1185
|
+
|
|
1186
|
+
def _retry_failed_hooks(
|
|
1187
|
+
self, strategy: HookStrategy, results: list[HookResult], failed_hooks: list[int]
|
|
1188
|
+
) -> list[HookResult]:
|
|
1189
|
+
"""Retry the failed hooks."""
|
|
355
1190
|
updated_results: list[HookResult] = results.copy()
|
|
356
1191
|
for i in failed_hooks:
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
new_result = self.execute_single_hook(hook)
|
|
1192
|
+
self._retry_single_hook(strategy, results, updated_results, i)
|
|
1193
|
+
return updated_results
|
|
360
1194
|
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
1195
|
+
def _retry_single_hook(
|
|
1196
|
+
self,
|
|
1197
|
+
strategy: HookStrategy,
|
|
1198
|
+
results: list[HookResult],
|
|
1199
|
+
updated_results: list[HookResult],
|
|
1200
|
+
hook_idx: int,
|
|
1201
|
+
) -> None:
|
|
1202
|
+
"""Retry a single hook."""
|
|
1203
|
+
hook = strategy.hooks[hook_idx]
|
|
1204
|
+
prev_result = results[hook_idx]
|
|
1205
|
+
new_result = self.execute_single_hook(hook)
|
|
364
1206
|
|
|
365
|
-
|
|
1207
|
+
new_result.duration += prev_result.duration
|
|
1208
|
+
updated_results[hook_idx] = new_result
|
|
1209
|
+
self._display_hook_result(new_result)
|
|
366
1210
|
|
|
367
1211
|
def _get_clean_environment(self) -> dict[str, str]:
|
|
368
|
-
clean_env =
|
|
369
|
-
"HOME": os.environ.get("HOME", ""),
|
|
370
|
-
"USER": os.environ.get("USER", ""),
|
|
371
|
-
"SHELL": os.environ.get("SHELL", "/bin/bash"),
|
|
372
|
-
"LANG": os.environ.get("LANG", "en_US.UTF-8"),
|
|
373
|
-
"LC_ALL": os.environ.get("LC_ALL", ""),
|
|
374
|
-
"TERM": os.environ.get("TERM", "xterm-256color"),
|
|
375
|
-
}
|
|
1212
|
+
clean_env = self._get_base_environment()
|
|
376
1213
|
|
|
377
|
-
|
|
378
|
-
if system_path:
|
|
379
|
-
venv_bin = str(Path(self.pkg_path) / ".venv" / "bin")
|
|
380
|
-
path_parts = [p for p in system_path.split(": ") if p != venv_bin]
|
|
381
|
-
clean_env["PATH"] = ": ".join(path_parts)
|
|
382
|
-
|
|
383
|
-
python_vars_to_exclude = {
|
|
384
|
-
"VIRTUAL_ENV",
|
|
385
|
-
"PYTHONPATH",
|
|
386
|
-
"PYTHON_PATH",
|
|
387
|
-
"PIP_CONFIG_FILE",
|
|
388
|
-
"PYTHONHOME",
|
|
389
|
-
"CONDA_DEFAULT_ENV",
|
|
390
|
-
"PIPENV_ACTIVE",
|
|
391
|
-
"POETRY_ACTIVE",
|
|
392
|
-
}
|
|
1214
|
+
self._update_path(clean_env)
|
|
393
1215
|
|
|
394
1216
|
security_logger = get_security_logger()
|
|
1217
|
+
python_vars_to_exclude = self._get_python_vars_to_exclude()
|
|
1218
|
+
|
|
395
1219
|
original_count = len(os.environ)
|
|
396
1220
|
filtered_count = 0
|
|
397
1221
|
|
|
@@ -421,10 +1245,51 @@ class HookExecutor:
|
|
|
421
1245
|
|
|
422
1246
|
return clean_env
|
|
423
1247
|
|
|
1248
|
+
def _get_base_environment(self) -> dict[str, str]:
|
|
1249
|
+
"""Get the base environment variables."""
|
|
1250
|
+
return {
|
|
1251
|
+
"HOME": os.environ.get("HOME", ""),
|
|
1252
|
+
"USER": os.environ.get("USER", ""),
|
|
1253
|
+
"SHELL": os.environ.get("SHELL", "/bin/bash"),
|
|
1254
|
+
"LANG": os.environ.get("LANG", "en_US.UTF-8"),
|
|
1255
|
+
"LC_ALL": os.environ.get("LC_ALL", ""),
|
|
1256
|
+
"TERM": os.environ.get("TERM", "xterm-256color"),
|
|
1257
|
+
}
|
|
1258
|
+
|
|
1259
|
+
def _update_path(self, clean_env: dict[str, str]) -> None:
|
|
1260
|
+
"""Update the PATH environment variable."""
|
|
1261
|
+
system_path = os.environ.get("PATH", "")
|
|
1262
|
+
if system_path:
|
|
1263
|
+
venv_bin = str(Path(self.pkg_path) / ".venv" / "bin")
|
|
1264
|
+
path_parts = [p for p in system_path.split(os.pathsep) if p != venv_bin]
|
|
1265
|
+
clean_env["PATH"] = os.pathsep.join(path_parts)
|
|
1266
|
+
|
|
1267
|
+
def _get_python_vars_to_exclude(self) -> set[str]:
|
|
1268
|
+
"""Get the set of Python variables to exclude."""
|
|
1269
|
+
return {
|
|
1270
|
+
"VIRTUAL_ENV",
|
|
1271
|
+
"PYTHONPATH",
|
|
1272
|
+
"PYTHON_PATH",
|
|
1273
|
+
"PIP_CONFIG_FILE",
|
|
1274
|
+
"PYTHONHOME",
|
|
1275
|
+
"CONDA_DEFAULT_ENV",
|
|
1276
|
+
"PIPENV_ACTIVE",
|
|
1277
|
+
"POETRY_ACTIVE",
|
|
1278
|
+
}
|
|
1279
|
+
|
|
424
1280
|
def _print_summary(
|
|
425
1281
|
self,
|
|
426
1282
|
strategy: HookStrategy,
|
|
427
1283
|
results: list[HookResult],
|
|
428
1284
|
success: bool,
|
|
1285
|
+
performance_gain: float,
|
|
429
1286
|
) -> None:
|
|
430
|
-
|
|
1287
|
+
if success:
|
|
1288
|
+
mode = "async" if self.is_concurrent(strategy) else "sequential"
|
|
1289
|
+
self.console.print(
|
|
1290
|
+
f"[green]✅[/green] {strategy.name.title()} hooks passed: {len(results)} / {len(results)} "
|
|
1291
|
+
f"({mode}, {performance_gain:.1f}% faster)",
|
|
1292
|
+
)
|
|
1293
|
+
|
|
1294
|
+
def is_concurrent(self, strategy: HookStrategy) -> bool:
|
|
1295
|
+
return strategy.parallel and len(strategy.hooks) > 1
|