crackerjack 0.18.2__py3-none-any.whl → 0.45.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- crackerjack/README.md +19 -0
- crackerjack/__init__.py +96 -2
- crackerjack/__main__.py +637 -138
- crackerjack/adapters/README.md +18 -0
- crackerjack/adapters/__init__.py +39 -0
- crackerjack/adapters/_output_paths.py +167 -0
- crackerjack/adapters/_qa_adapter_base.py +309 -0
- crackerjack/adapters/_tool_adapter_base.py +706 -0
- crackerjack/adapters/ai/README.md +65 -0
- crackerjack/adapters/ai/__init__.py +5 -0
- crackerjack/adapters/ai/claude.py +853 -0
- crackerjack/adapters/complexity/README.md +53 -0
- crackerjack/adapters/complexity/__init__.py +10 -0
- crackerjack/adapters/complexity/complexipy.py +641 -0
- crackerjack/adapters/dependency/__init__.py +22 -0
- crackerjack/adapters/dependency/pip_audit.py +418 -0
- crackerjack/adapters/format/README.md +72 -0
- crackerjack/adapters/format/__init__.py +11 -0
- crackerjack/adapters/format/mdformat.py +313 -0
- crackerjack/adapters/format/ruff.py +516 -0
- crackerjack/adapters/lint/README.md +47 -0
- crackerjack/adapters/lint/__init__.py +11 -0
- crackerjack/adapters/lint/codespell.py +273 -0
- crackerjack/adapters/lsp/README.md +49 -0
- crackerjack/adapters/lsp/__init__.py +27 -0
- crackerjack/adapters/lsp/_base.py +194 -0
- crackerjack/adapters/lsp/_client.py +358 -0
- crackerjack/adapters/lsp/_manager.py +193 -0
- crackerjack/adapters/lsp/skylos.py +283 -0
- crackerjack/adapters/lsp/zuban.py +557 -0
- crackerjack/adapters/refactor/README.md +59 -0
- crackerjack/adapters/refactor/__init__.py +12 -0
- crackerjack/adapters/refactor/creosote.py +318 -0
- crackerjack/adapters/refactor/refurb.py +406 -0
- crackerjack/adapters/refactor/skylos.py +494 -0
- crackerjack/adapters/sast/README.md +132 -0
- crackerjack/adapters/sast/__init__.py +32 -0
- crackerjack/adapters/sast/_base.py +201 -0
- crackerjack/adapters/sast/bandit.py +423 -0
- crackerjack/adapters/sast/pyscn.py +405 -0
- crackerjack/adapters/sast/semgrep.py +241 -0
- crackerjack/adapters/security/README.md +111 -0
- crackerjack/adapters/security/__init__.py +17 -0
- crackerjack/adapters/security/gitleaks.py +339 -0
- crackerjack/adapters/type/README.md +52 -0
- crackerjack/adapters/type/__init__.py +12 -0
- crackerjack/adapters/type/pyrefly.py +402 -0
- crackerjack/adapters/type/ty.py +402 -0
- crackerjack/adapters/type/zuban.py +522 -0
- crackerjack/adapters/utility/README.md +51 -0
- crackerjack/adapters/utility/__init__.py +10 -0
- crackerjack/adapters/utility/checks.py +884 -0
- crackerjack/agents/README.md +264 -0
- crackerjack/agents/__init__.py +66 -0
- crackerjack/agents/architect_agent.py +238 -0
- crackerjack/agents/base.py +167 -0
- crackerjack/agents/claude_code_bridge.py +641 -0
- crackerjack/agents/coordinator.py +600 -0
- crackerjack/agents/documentation_agent.py +520 -0
- crackerjack/agents/dry_agent.py +585 -0
- crackerjack/agents/enhanced_coordinator.py +279 -0
- crackerjack/agents/enhanced_proactive_agent.py +185 -0
- crackerjack/agents/error_middleware.py +53 -0
- crackerjack/agents/formatting_agent.py +230 -0
- crackerjack/agents/helpers/__init__.py +9 -0
- crackerjack/agents/helpers/performance/__init__.py +22 -0
- crackerjack/agents/helpers/performance/performance_ast_analyzer.py +357 -0
- crackerjack/agents/helpers/performance/performance_pattern_detector.py +909 -0
- crackerjack/agents/helpers/performance/performance_recommender.py +572 -0
- crackerjack/agents/helpers/refactoring/__init__.py +22 -0
- crackerjack/agents/helpers/refactoring/code_transformer.py +536 -0
- crackerjack/agents/helpers/refactoring/complexity_analyzer.py +344 -0
- crackerjack/agents/helpers/refactoring/dead_code_detector.py +437 -0
- crackerjack/agents/helpers/test_creation/__init__.py +19 -0
- crackerjack/agents/helpers/test_creation/test_ast_analyzer.py +216 -0
- crackerjack/agents/helpers/test_creation/test_coverage_analyzer.py +643 -0
- crackerjack/agents/helpers/test_creation/test_template_generator.py +1031 -0
- crackerjack/agents/import_optimization_agent.py +1181 -0
- crackerjack/agents/performance_agent.py +325 -0
- crackerjack/agents/performance_helpers.py +205 -0
- crackerjack/agents/proactive_agent.py +55 -0
- crackerjack/agents/refactoring_agent.py +511 -0
- crackerjack/agents/refactoring_helpers.py +247 -0
- crackerjack/agents/security_agent.py +793 -0
- crackerjack/agents/semantic_agent.py +479 -0
- crackerjack/agents/semantic_helpers.py +356 -0
- crackerjack/agents/test_creation_agent.py +570 -0
- crackerjack/agents/test_specialist_agent.py +526 -0
- crackerjack/agents/tracker.py +110 -0
- crackerjack/api.py +647 -0
- crackerjack/cli/README.md +394 -0
- crackerjack/cli/__init__.py +24 -0
- crackerjack/cli/cache_handlers.py +209 -0
- crackerjack/cli/cache_handlers_enhanced.py +680 -0
- crackerjack/cli/facade.py +162 -0
- crackerjack/cli/formatting.py +13 -0
- crackerjack/cli/handlers/__init__.py +85 -0
- crackerjack/cli/handlers/advanced.py +103 -0
- crackerjack/cli/handlers/ai_features.py +62 -0
- crackerjack/cli/handlers/analytics.py +479 -0
- crackerjack/cli/handlers/changelog.py +271 -0
- crackerjack/cli/handlers/config_handlers.py +16 -0
- crackerjack/cli/handlers/coverage.py +84 -0
- crackerjack/cli/handlers/documentation.py +280 -0
- crackerjack/cli/handlers/main_handlers.py +497 -0
- crackerjack/cli/handlers/monitoring.py +371 -0
- crackerjack/cli/handlers.py +700 -0
- crackerjack/cli/interactive.py +488 -0
- crackerjack/cli/options.py +1216 -0
- crackerjack/cli/semantic_handlers.py +292 -0
- crackerjack/cli/utils.py +19 -0
- crackerjack/cli/version.py +19 -0
- crackerjack/code_cleaner.py +1307 -0
- crackerjack/config/README.md +472 -0
- crackerjack/config/__init__.py +275 -0
- crackerjack/config/global_lock_config.py +207 -0
- crackerjack/config/hooks.py +390 -0
- crackerjack/config/loader.py +239 -0
- crackerjack/config/settings.py +141 -0
- crackerjack/config/tool_commands.py +331 -0
- crackerjack/core/README.md +393 -0
- crackerjack/core/__init__.py +0 -0
- crackerjack/core/async_workflow_orchestrator.py +738 -0
- crackerjack/core/autofix_coordinator.py +282 -0
- crackerjack/core/container.py +105 -0
- crackerjack/core/enhanced_container.py +583 -0
- crackerjack/core/file_lifecycle.py +472 -0
- crackerjack/core/performance.py +244 -0
- crackerjack/core/performance_monitor.py +357 -0
- crackerjack/core/phase_coordinator.py +1227 -0
- crackerjack/core/proactive_workflow.py +267 -0
- crackerjack/core/resource_manager.py +425 -0
- crackerjack/core/retry.py +275 -0
- crackerjack/core/service_watchdog.py +601 -0
- crackerjack/core/session_coordinator.py +239 -0
- crackerjack/core/timeout_manager.py +563 -0
- crackerjack/core/websocket_lifecycle.py +410 -0
- crackerjack/core/workflow/__init__.py +21 -0
- crackerjack/core/workflow/workflow_ai_coordinator.py +863 -0
- crackerjack/core/workflow/workflow_event_orchestrator.py +1107 -0
- crackerjack/core/workflow/workflow_issue_parser.py +714 -0
- crackerjack/core/workflow/workflow_phase_executor.py +1158 -0
- crackerjack/core/workflow/workflow_security_gates.py +400 -0
- crackerjack/core/workflow_orchestrator.py +2243 -0
- crackerjack/data/README.md +11 -0
- crackerjack/data/__init__.py +8 -0
- crackerjack/data/models.py +79 -0
- crackerjack/data/repository.py +210 -0
- crackerjack/decorators/README.md +180 -0
- crackerjack/decorators/__init__.py +35 -0
- crackerjack/decorators/error_handling.py +649 -0
- crackerjack/decorators/error_handling_decorators.py +334 -0
- crackerjack/decorators/helpers.py +58 -0
- crackerjack/decorators/patterns.py +281 -0
- crackerjack/decorators/utils.py +58 -0
- crackerjack/docs/INDEX.md +11 -0
- crackerjack/docs/README.md +11 -0
- crackerjack/docs/generated/api/API_REFERENCE.md +10895 -0
- crackerjack/docs/generated/api/CLI_REFERENCE.md +109 -0
- crackerjack/docs/generated/api/CROSS_REFERENCES.md +1755 -0
- crackerjack/docs/generated/api/PROTOCOLS.md +3 -0
- crackerjack/docs/generated/api/SERVICES.md +1252 -0
- crackerjack/documentation/README.md +11 -0
- crackerjack/documentation/__init__.py +31 -0
- crackerjack/documentation/ai_templates.py +756 -0
- crackerjack/documentation/dual_output_generator.py +767 -0
- crackerjack/documentation/mkdocs_integration.py +518 -0
- crackerjack/documentation/reference_generator.py +1065 -0
- crackerjack/dynamic_config.py +678 -0
- crackerjack/errors.py +378 -0
- crackerjack/events/README.md +11 -0
- crackerjack/events/__init__.py +16 -0
- crackerjack/events/telemetry.py +175 -0
- crackerjack/events/workflow_bus.py +346 -0
- crackerjack/exceptions/README.md +301 -0
- crackerjack/exceptions/__init__.py +5 -0
- crackerjack/exceptions/config.py +4 -0
- crackerjack/exceptions/tool_execution_error.py +245 -0
- crackerjack/executors/README.md +591 -0
- crackerjack/executors/__init__.py +13 -0
- crackerjack/executors/async_hook_executor.py +938 -0
- crackerjack/executors/cached_hook_executor.py +316 -0
- crackerjack/executors/hook_executor.py +1295 -0
- crackerjack/executors/hook_lock_manager.py +708 -0
- crackerjack/executors/individual_hook_executor.py +739 -0
- crackerjack/executors/lsp_aware_hook_executor.py +349 -0
- crackerjack/executors/progress_hook_executor.py +282 -0
- crackerjack/executors/tool_proxy.py +433 -0
- crackerjack/hooks/README.md +485 -0
- crackerjack/hooks/lsp_hook.py +93 -0
- crackerjack/intelligence/README.md +557 -0
- crackerjack/intelligence/__init__.py +37 -0
- crackerjack/intelligence/adaptive_learning.py +693 -0
- crackerjack/intelligence/agent_orchestrator.py +485 -0
- crackerjack/intelligence/agent_registry.py +377 -0
- crackerjack/intelligence/agent_selector.py +439 -0
- crackerjack/intelligence/integration.py +250 -0
- crackerjack/interactive.py +719 -0
- crackerjack/managers/README.md +369 -0
- crackerjack/managers/__init__.py +11 -0
- crackerjack/managers/async_hook_manager.py +135 -0
- crackerjack/managers/hook_manager.py +585 -0
- crackerjack/managers/publish_manager.py +631 -0
- crackerjack/managers/test_command_builder.py +391 -0
- crackerjack/managers/test_executor.py +474 -0
- crackerjack/managers/test_manager.py +1357 -0
- crackerjack/managers/test_progress.py +187 -0
- crackerjack/mcp/README.md +374 -0
- crackerjack/mcp/__init__.py +0 -0
- crackerjack/mcp/cache.py +352 -0
- crackerjack/mcp/client_runner.py +121 -0
- crackerjack/mcp/context.py +802 -0
- crackerjack/mcp/dashboard.py +657 -0
- crackerjack/mcp/enhanced_progress_monitor.py +493 -0
- crackerjack/mcp/file_monitor.py +394 -0
- crackerjack/mcp/progress_components.py +607 -0
- crackerjack/mcp/progress_monitor.py +1016 -0
- crackerjack/mcp/rate_limiter.py +336 -0
- crackerjack/mcp/server.py +24 -0
- crackerjack/mcp/server_core.py +526 -0
- crackerjack/mcp/service_watchdog.py +505 -0
- crackerjack/mcp/state.py +407 -0
- crackerjack/mcp/task_manager.py +259 -0
- crackerjack/mcp/tools/README.md +27 -0
- crackerjack/mcp/tools/__init__.py +19 -0
- crackerjack/mcp/tools/core_tools.py +469 -0
- crackerjack/mcp/tools/error_analyzer.py +283 -0
- crackerjack/mcp/tools/execution_tools.py +384 -0
- crackerjack/mcp/tools/intelligence_tool_registry.py +46 -0
- crackerjack/mcp/tools/intelligence_tools.py +264 -0
- crackerjack/mcp/tools/monitoring_tools.py +628 -0
- crackerjack/mcp/tools/proactive_tools.py +367 -0
- crackerjack/mcp/tools/progress_tools.py +222 -0
- crackerjack/mcp/tools/semantic_tools.py +584 -0
- crackerjack/mcp/tools/utility_tools.py +358 -0
- crackerjack/mcp/tools/workflow_executor.py +699 -0
- crackerjack/mcp/websocket/README.md +31 -0
- crackerjack/mcp/websocket/__init__.py +14 -0
- crackerjack/mcp/websocket/app.py +54 -0
- crackerjack/mcp/websocket/endpoints.py +492 -0
- crackerjack/mcp/websocket/event_bridge.py +188 -0
- crackerjack/mcp/websocket/jobs.py +406 -0
- crackerjack/mcp/websocket/monitoring/__init__.py +25 -0
- crackerjack/mcp/websocket/monitoring/api/__init__.py +19 -0
- crackerjack/mcp/websocket/monitoring/api/dependencies.py +141 -0
- crackerjack/mcp/websocket/monitoring/api/heatmap.py +154 -0
- crackerjack/mcp/websocket/monitoring/api/intelligence.py +199 -0
- crackerjack/mcp/websocket/monitoring/api/metrics.py +203 -0
- crackerjack/mcp/websocket/monitoring/api/telemetry.py +101 -0
- crackerjack/mcp/websocket/monitoring/dashboard.py +18 -0
- crackerjack/mcp/websocket/monitoring/factory.py +109 -0
- crackerjack/mcp/websocket/monitoring/filters.py +10 -0
- crackerjack/mcp/websocket/monitoring/metrics.py +64 -0
- crackerjack/mcp/websocket/monitoring/models.py +90 -0
- crackerjack/mcp/websocket/monitoring/utils.py +171 -0
- crackerjack/mcp/websocket/monitoring/websocket_manager.py +78 -0
- crackerjack/mcp/websocket/monitoring/websockets/__init__.py +17 -0
- crackerjack/mcp/websocket/monitoring/websockets/dependencies.py +126 -0
- crackerjack/mcp/websocket/monitoring/websockets/heatmap.py +176 -0
- crackerjack/mcp/websocket/monitoring/websockets/intelligence.py +291 -0
- crackerjack/mcp/websocket/monitoring/websockets/metrics.py +291 -0
- crackerjack/mcp/websocket/monitoring_endpoints.py +21 -0
- crackerjack/mcp/websocket/server.py +174 -0
- crackerjack/mcp/websocket/websocket_handler.py +276 -0
- crackerjack/mcp/websocket_server.py +10 -0
- crackerjack/models/README.md +308 -0
- crackerjack/models/__init__.py +40 -0
- crackerjack/models/config.py +730 -0
- crackerjack/models/config_adapter.py +265 -0
- crackerjack/models/protocols.py +1535 -0
- crackerjack/models/pydantic_models.py +320 -0
- crackerjack/models/qa_config.py +145 -0
- crackerjack/models/qa_results.py +134 -0
- crackerjack/models/resource_protocols.py +299 -0
- crackerjack/models/results.py +35 -0
- crackerjack/models/semantic_models.py +258 -0
- crackerjack/models/task.py +173 -0
- crackerjack/models/test_models.py +60 -0
- crackerjack/monitoring/README.md +11 -0
- crackerjack/monitoring/__init__.py +0 -0
- crackerjack/monitoring/ai_agent_watchdog.py +405 -0
- crackerjack/monitoring/metrics_collector.py +427 -0
- crackerjack/monitoring/regression_prevention.py +580 -0
- crackerjack/monitoring/websocket_server.py +406 -0
- crackerjack/orchestration/README.md +340 -0
- crackerjack/orchestration/__init__.py +43 -0
- crackerjack/orchestration/advanced_orchestrator.py +894 -0
- crackerjack/orchestration/cache/README.md +312 -0
- crackerjack/orchestration/cache/__init__.py +37 -0
- crackerjack/orchestration/cache/memory_cache.py +338 -0
- crackerjack/orchestration/cache/tool_proxy_cache.py +340 -0
- crackerjack/orchestration/config.py +297 -0
- crackerjack/orchestration/coverage_improvement.py +180 -0
- crackerjack/orchestration/execution_strategies.py +361 -0
- crackerjack/orchestration/hook_orchestrator.py +1398 -0
- crackerjack/orchestration/strategies/README.md +401 -0
- crackerjack/orchestration/strategies/__init__.py +39 -0
- crackerjack/orchestration/strategies/adaptive_strategy.py +630 -0
- crackerjack/orchestration/strategies/parallel_strategy.py +237 -0
- crackerjack/orchestration/strategies/sequential_strategy.py +299 -0
- crackerjack/orchestration/test_progress_streamer.py +647 -0
- crackerjack/plugins/README.md +11 -0
- crackerjack/plugins/__init__.py +15 -0
- crackerjack/plugins/base.py +200 -0
- crackerjack/plugins/hooks.py +254 -0
- crackerjack/plugins/loader.py +335 -0
- crackerjack/plugins/managers.py +264 -0
- crackerjack/py313.py +191 -0
- crackerjack/security/README.md +11 -0
- crackerjack/security/__init__.py +0 -0
- crackerjack/security/audit.py +197 -0
- crackerjack/services/README.md +374 -0
- crackerjack/services/__init__.py +9 -0
- crackerjack/services/ai/README.md +295 -0
- crackerjack/services/ai/__init__.py +7 -0
- crackerjack/services/ai/advanced_optimizer.py +878 -0
- crackerjack/services/ai/contextual_ai_assistant.py +542 -0
- crackerjack/services/ai/embeddings.py +444 -0
- crackerjack/services/ai/intelligent_commit.py +328 -0
- crackerjack/services/ai/predictive_analytics.py +510 -0
- crackerjack/services/anomaly_detector.py +392 -0
- crackerjack/services/api_extractor.py +617 -0
- crackerjack/services/backup_service.py +467 -0
- crackerjack/services/bounded_status_operations.py +530 -0
- crackerjack/services/cache.py +369 -0
- crackerjack/services/changelog_automation.py +399 -0
- crackerjack/services/command_execution_service.py +305 -0
- crackerjack/services/config_integrity.py +132 -0
- crackerjack/services/config_merge.py +546 -0
- crackerjack/services/config_service.py +198 -0
- crackerjack/services/config_template.py +493 -0
- crackerjack/services/coverage_badge_service.py +173 -0
- crackerjack/services/coverage_ratchet.py +381 -0
- crackerjack/services/debug.py +733 -0
- crackerjack/services/dependency_analyzer.py +460 -0
- crackerjack/services/dependency_monitor.py +622 -0
- crackerjack/services/documentation_generator.py +493 -0
- crackerjack/services/documentation_service.py +704 -0
- crackerjack/services/enhanced_filesystem.py +497 -0
- crackerjack/services/enterprise_optimizer.py +865 -0
- crackerjack/services/error_pattern_analyzer.py +676 -0
- crackerjack/services/file_filter.py +221 -0
- crackerjack/services/file_hasher.py +149 -0
- crackerjack/services/file_io_service.py +361 -0
- crackerjack/services/file_modifier.py +615 -0
- crackerjack/services/filesystem.py +381 -0
- crackerjack/services/git.py +422 -0
- crackerjack/services/health_metrics.py +615 -0
- crackerjack/services/heatmap_generator.py +744 -0
- crackerjack/services/incremental_executor.py +380 -0
- crackerjack/services/initialization.py +823 -0
- crackerjack/services/input_validator.py +668 -0
- crackerjack/services/intelligent_commit.py +327 -0
- crackerjack/services/log_manager.py +289 -0
- crackerjack/services/logging.py +228 -0
- crackerjack/services/lsp_client.py +628 -0
- crackerjack/services/memory_optimizer.py +414 -0
- crackerjack/services/metrics.py +587 -0
- crackerjack/services/monitoring/README.md +30 -0
- crackerjack/services/monitoring/__init__.py +9 -0
- crackerjack/services/monitoring/dependency_monitor.py +678 -0
- crackerjack/services/monitoring/error_pattern_analyzer.py +676 -0
- crackerjack/services/monitoring/health_metrics.py +716 -0
- crackerjack/services/monitoring/metrics.py +587 -0
- crackerjack/services/monitoring/performance_benchmarks.py +410 -0
- crackerjack/services/monitoring/performance_cache.py +388 -0
- crackerjack/services/monitoring/performance_monitor.py +569 -0
- crackerjack/services/parallel_executor.py +527 -0
- crackerjack/services/pattern_cache.py +333 -0
- crackerjack/services/pattern_detector.py +478 -0
- crackerjack/services/patterns/__init__.py +142 -0
- crackerjack/services/patterns/agents.py +107 -0
- crackerjack/services/patterns/code/__init__.py +15 -0
- crackerjack/services/patterns/code/detection.py +118 -0
- crackerjack/services/patterns/code/imports.py +107 -0
- crackerjack/services/patterns/code/paths.py +159 -0
- crackerjack/services/patterns/code/performance.py +119 -0
- crackerjack/services/patterns/code/replacement.py +36 -0
- crackerjack/services/patterns/core.py +212 -0
- crackerjack/services/patterns/documentation/__init__.py +14 -0
- crackerjack/services/patterns/documentation/badges_markdown.py +96 -0
- crackerjack/services/patterns/documentation/comments_blocks.py +83 -0
- crackerjack/services/patterns/documentation/docstrings.py +89 -0
- crackerjack/services/patterns/formatting.py +226 -0
- crackerjack/services/patterns/operations.py +339 -0
- crackerjack/services/patterns/security/__init__.py +23 -0
- crackerjack/services/patterns/security/code_injection.py +122 -0
- crackerjack/services/patterns/security/credentials.py +190 -0
- crackerjack/services/patterns/security/path_traversal.py +221 -0
- crackerjack/services/patterns/security/unsafe_operations.py +216 -0
- crackerjack/services/patterns/templates.py +62 -0
- crackerjack/services/patterns/testing/__init__.py +18 -0
- crackerjack/services/patterns/testing/error_patterns.py +107 -0
- crackerjack/services/patterns/testing/pytest_output.py +126 -0
- crackerjack/services/patterns/tool_output/__init__.py +16 -0
- crackerjack/services/patterns/tool_output/bandit.py +72 -0
- crackerjack/services/patterns/tool_output/other.py +97 -0
- crackerjack/services/patterns/tool_output/pyright.py +67 -0
- crackerjack/services/patterns/tool_output/ruff.py +44 -0
- crackerjack/services/patterns/url_sanitization.py +114 -0
- crackerjack/services/patterns/utilities.py +42 -0
- crackerjack/services/patterns/utils.py +339 -0
- crackerjack/services/patterns/validation.py +46 -0
- crackerjack/services/patterns/versioning.py +62 -0
- crackerjack/services/predictive_analytics.py +523 -0
- crackerjack/services/profiler.py +280 -0
- crackerjack/services/quality/README.md +415 -0
- crackerjack/services/quality/__init__.py +11 -0
- crackerjack/services/quality/anomaly_detector.py +392 -0
- crackerjack/services/quality/pattern_cache.py +333 -0
- crackerjack/services/quality/pattern_detector.py +479 -0
- crackerjack/services/quality/qa_orchestrator.py +491 -0
- crackerjack/services/quality/quality_baseline.py +395 -0
- crackerjack/services/quality/quality_baseline_enhanced.py +649 -0
- crackerjack/services/quality/quality_intelligence.py +949 -0
- crackerjack/services/regex_patterns.py +58 -0
- crackerjack/services/regex_utils.py +483 -0
- crackerjack/services/secure_path_utils.py +524 -0
- crackerjack/services/secure_status_formatter.py +450 -0
- crackerjack/services/secure_subprocess.py +635 -0
- crackerjack/services/security.py +239 -0
- crackerjack/services/security_logger.py +495 -0
- crackerjack/services/server_manager.py +411 -0
- crackerjack/services/smart_scheduling.py +167 -0
- crackerjack/services/status_authentication.py +460 -0
- crackerjack/services/status_security_manager.py +315 -0
- crackerjack/services/terminal_utils.py +0 -0
- crackerjack/services/thread_safe_status_collector.py +441 -0
- crackerjack/services/tool_filter.py +368 -0
- crackerjack/services/tool_version_service.py +43 -0
- crackerjack/services/unified_config.py +115 -0
- crackerjack/services/validation_rate_limiter.py +220 -0
- crackerjack/services/vector_store.py +689 -0
- crackerjack/services/version_analyzer.py +461 -0
- crackerjack/services/version_checker.py +223 -0
- crackerjack/services/websocket_resource_limiter.py +438 -0
- crackerjack/services/zuban_lsp_service.py +391 -0
- crackerjack/slash_commands/README.md +11 -0
- crackerjack/slash_commands/__init__.py +59 -0
- crackerjack/slash_commands/init.md +112 -0
- crackerjack/slash_commands/run.md +197 -0
- crackerjack/slash_commands/status.md +127 -0
- crackerjack/tools/README.md +11 -0
- crackerjack/tools/__init__.py +30 -0
- crackerjack/tools/_git_utils.py +105 -0
- crackerjack/tools/check_added_large_files.py +139 -0
- crackerjack/tools/check_ast.py +105 -0
- crackerjack/tools/check_json.py +103 -0
- crackerjack/tools/check_jsonschema.py +297 -0
- crackerjack/tools/check_toml.py +103 -0
- crackerjack/tools/check_yaml.py +110 -0
- crackerjack/tools/codespell_wrapper.py +72 -0
- crackerjack/tools/end_of_file_fixer.py +202 -0
- crackerjack/tools/format_json.py +128 -0
- crackerjack/tools/mdformat_wrapper.py +114 -0
- crackerjack/tools/trailing_whitespace.py +198 -0
- crackerjack/tools/validate_input_validator_patterns.py +236 -0
- crackerjack/tools/validate_regex_patterns.py +188 -0
- crackerjack/ui/README.md +11 -0
- crackerjack/ui/__init__.py +1 -0
- crackerjack/ui/dashboard_renderer.py +28 -0
- crackerjack/ui/templates/README.md +11 -0
- crackerjack/utils/console_utils.py +13 -0
- crackerjack/utils/dependency_guard.py +230 -0
- crackerjack/utils/retry_utils.py +275 -0
- crackerjack/workflows/README.md +590 -0
- crackerjack/workflows/__init__.py +46 -0
- crackerjack/workflows/actions.py +811 -0
- crackerjack/workflows/auto_fix.py +444 -0
- crackerjack/workflows/container_builder.py +499 -0
- crackerjack/workflows/definitions.py +443 -0
- crackerjack/workflows/engine.py +177 -0
- crackerjack/workflows/event_bridge.py +242 -0
- crackerjack-0.45.2.dist-info/METADATA +1678 -0
- crackerjack-0.45.2.dist-info/RECORD +478 -0
- {crackerjack-0.18.2.dist-info → crackerjack-0.45.2.dist-info}/WHEEL +1 -1
- crackerjack-0.45.2.dist-info/entry_points.txt +2 -0
- crackerjack/.gitignore +0 -14
- crackerjack/.libcst.codemod.yaml +0 -18
- crackerjack/.pdm.toml +0 -1
- crackerjack/.pre-commit-config.yaml +0 -91
- crackerjack/.pytest_cache/.gitignore +0 -2
- crackerjack/.pytest_cache/CACHEDIR.TAG +0 -4
- crackerjack/.pytest_cache/README.md +0 -8
- crackerjack/.pytest_cache/v/cache/nodeids +0 -1
- crackerjack/.pytest_cache/v/cache/stepwise +0 -1
- crackerjack/.ruff_cache/.gitignore +0 -1
- crackerjack/.ruff_cache/0.1.11/3256171999636029978 +0 -0
- crackerjack/.ruff_cache/0.1.14/602324811142551221 +0 -0
- crackerjack/.ruff_cache/0.1.4/10355199064880463147 +0 -0
- crackerjack/.ruff_cache/0.1.6/15140459877605758699 +0 -0
- crackerjack/.ruff_cache/0.1.7/1790508110482614856 +0 -0
- crackerjack/.ruff_cache/0.1.9/17041001205004563469 +0 -0
- crackerjack/.ruff_cache/0.11.2/4070660268492669020 +0 -0
- crackerjack/.ruff_cache/0.11.3/9818742842212983150 +0 -0
- crackerjack/.ruff_cache/0.11.4/9818742842212983150 +0 -0
- crackerjack/.ruff_cache/0.11.6/3557596832929915217 +0 -0
- crackerjack/.ruff_cache/0.11.7/10386934055395314831 +0 -0
- crackerjack/.ruff_cache/0.11.7/3557596832929915217 +0 -0
- crackerjack/.ruff_cache/0.11.8/530407680854991027 +0 -0
- crackerjack/.ruff_cache/0.2.0/10047773857155985907 +0 -0
- crackerjack/.ruff_cache/0.2.1/8522267973936635051 +0 -0
- crackerjack/.ruff_cache/0.2.2/18053836298936336950 +0 -0
- crackerjack/.ruff_cache/0.3.0/12548816621480535786 +0 -0
- crackerjack/.ruff_cache/0.3.3/11081883392474770722 +0 -0
- crackerjack/.ruff_cache/0.3.4/676973378459347183 +0 -0
- crackerjack/.ruff_cache/0.3.5/16311176246009842383 +0 -0
- crackerjack/.ruff_cache/0.5.7/1493622539551733492 +0 -0
- crackerjack/.ruff_cache/0.5.7/6231957614044513175 +0 -0
- crackerjack/.ruff_cache/0.5.7/9932762556785938009 +0 -0
- crackerjack/.ruff_cache/0.6.0/11982804814124138945 +0 -0
- crackerjack/.ruff_cache/0.6.0/12055761203849489982 +0 -0
- crackerjack/.ruff_cache/0.6.2/1206147804896221174 +0 -0
- crackerjack/.ruff_cache/0.6.4/1206147804896221174 +0 -0
- crackerjack/.ruff_cache/0.6.5/1206147804896221174 +0 -0
- crackerjack/.ruff_cache/0.6.7/3657366982708166874 +0 -0
- crackerjack/.ruff_cache/0.6.9/285614542852677309 +0 -0
- crackerjack/.ruff_cache/0.7.1/1024065805990144819 +0 -0
- crackerjack/.ruff_cache/0.7.1/285614542852677309 +0 -0
- crackerjack/.ruff_cache/0.7.3/16061516852537040135 +0 -0
- crackerjack/.ruff_cache/0.8.4/16354268377385700367 +0 -0
- crackerjack/.ruff_cache/0.9.10/12813592349865671909 +0 -0
- crackerjack/.ruff_cache/0.9.10/923908772239632759 +0 -0
- crackerjack/.ruff_cache/0.9.3/13948373885254993391 +0 -0
- crackerjack/.ruff_cache/0.9.9/12813592349865671909 +0 -0
- crackerjack/.ruff_cache/0.9.9/8843823720003377982 +0 -0
- crackerjack/.ruff_cache/CACHEDIR.TAG +0 -1
- crackerjack/crackerjack.py +0 -855
- crackerjack/pyproject.toml +0 -214
- crackerjack-0.18.2.dist-info/METADATA +0 -420
- crackerjack-0.18.2.dist-info/RECORD +0 -59
- crackerjack-0.18.2.dist-info/entry_points.txt +0 -4
- {crackerjack-0.18.2.dist-info → crackerjack-0.45.2.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,1295 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import os
|
|
3
|
+
import subprocess
|
|
4
|
+
import time
|
|
5
|
+
import typing as t
|
|
6
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
7
|
+
from contextlib import suppress
|
|
8
|
+
from dataclasses import dataclass
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
|
|
11
|
+
from acb.console import Console
|
|
12
|
+
|
|
13
|
+
from crackerjack.config import get_console_width
|
|
14
|
+
from crackerjack.config.hooks import HookDefinition, HookStrategy, RetryPolicy
|
|
15
|
+
from crackerjack.models.task import HookResult
|
|
16
|
+
from crackerjack.services.security_logger import get_security_logger
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@dataclass
|
|
20
|
+
class HookExecutionResult:
|
|
21
|
+
strategy_name: str
|
|
22
|
+
results: list[HookResult]
|
|
23
|
+
total_duration: float
|
|
24
|
+
success: bool
|
|
25
|
+
concurrent_execution: bool = False
|
|
26
|
+
cache_hits: int = 0
|
|
27
|
+
cache_misses: int = 0
|
|
28
|
+
performance_gain: float = 0.0
|
|
29
|
+
|
|
30
|
+
@property
|
|
31
|
+
def failed_count(self) -> int:
|
|
32
|
+
return sum(1 for r in self.results if r.status == "failed")
|
|
33
|
+
|
|
34
|
+
@property
|
|
35
|
+
def passed_count(self) -> int:
|
|
36
|
+
return sum(1 for r in self.results if r.status == "passed")
|
|
37
|
+
|
|
38
|
+
@property
|
|
39
|
+
def cache_hit_rate(self) -> float:
|
|
40
|
+
total_requests = self.cache_hits + self.cache_misses
|
|
41
|
+
return (self.cache_hits / total_requests * 100) if total_requests > 0 else 0.0
|
|
42
|
+
|
|
43
|
+
@property
|
|
44
|
+
def performance_summary(self) -> dict[str, t.Any]:
|
|
45
|
+
return {
|
|
46
|
+
"total_hooks": len(self.results),
|
|
47
|
+
"passed": self.passed_count,
|
|
48
|
+
"failed": self.failed_count,
|
|
49
|
+
"duration_seconds": round(self.total_duration, 2),
|
|
50
|
+
"concurrent": self.concurrent_execution,
|
|
51
|
+
"cache_hits": self.cache_hits,
|
|
52
|
+
"cache_misses": self.cache_misses,
|
|
53
|
+
"cache_hit_rate_percent": round(self.cache_hit_rate, 1),
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class HookExecutor:
|
|
58
|
+
def __init__(
|
|
59
|
+
self,
|
|
60
|
+
console: Console,
|
|
61
|
+
pkg_path: Path,
|
|
62
|
+
verbose: bool = False,
|
|
63
|
+
quiet: bool = False,
|
|
64
|
+
debug: bool = False,
|
|
65
|
+
use_incremental: bool = False,
|
|
66
|
+
git_service: t.Any | None = None,
|
|
67
|
+
) -> None:
|
|
68
|
+
self.console = console
|
|
69
|
+
self.pkg_path = pkg_path
|
|
70
|
+
self.verbose = verbose
|
|
71
|
+
self.quiet = quiet
|
|
72
|
+
self.debug = debug
|
|
73
|
+
self.use_incremental = use_incremental
|
|
74
|
+
self.git_service = git_service
|
|
75
|
+
# Optional progress callbacks used when orchestration is disabled
|
|
76
|
+
self._progress_callback: t.Callable[[int, int], None] | None = None
|
|
77
|
+
self._progress_start_callback: t.Callable[[int, int], None] | None = None
|
|
78
|
+
self._total_hooks: int = 0
|
|
79
|
+
self._started_hooks: int = 0
|
|
80
|
+
self._completed_hooks: int = 0
|
|
81
|
+
|
|
82
|
+
def set_progress_callbacks(
|
|
83
|
+
self,
|
|
84
|
+
*,
|
|
85
|
+
started_cb: t.Callable[[int, int], None] | None = None,
|
|
86
|
+
completed_cb: t.Callable[[int, int], None] | None = None,
|
|
87
|
+
total: int | None = None,
|
|
88
|
+
) -> None:
|
|
89
|
+
"""Set optional progress callbacks for legacy execution.
|
|
90
|
+
|
|
91
|
+
Args:
|
|
92
|
+
started_cb: Called when a hook starts with (started, total)
|
|
93
|
+
completed_cb: Called when a hook completes with (completed, total)
|
|
94
|
+
total: Total number of hooks (defaults to len(strategy.hooks))
|
|
95
|
+
"""
|
|
96
|
+
self._progress_start_callback = started_cb
|
|
97
|
+
self._progress_callback = completed_cb
|
|
98
|
+
self._total_hooks = int(total or 0)
|
|
99
|
+
self._started_hooks = 0
|
|
100
|
+
self._completed_hooks = 0
|
|
101
|
+
|
|
102
|
+
def execute_strategy(self, strategy: HookStrategy) -> HookExecutionResult:
|
|
103
|
+
start_time = time.time()
|
|
104
|
+
|
|
105
|
+
# Header is displayed by PhaseCoordinator; suppress here to avoid duplicates
|
|
106
|
+
|
|
107
|
+
results = self._execute_hooks(strategy)
|
|
108
|
+
|
|
109
|
+
results = self._apply_retries_if_needed(strategy, results)
|
|
110
|
+
|
|
111
|
+
return self._create_execution_result(strategy, results, start_time)
|
|
112
|
+
|
|
113
|
+
def _execute_hooks(self, strategy: HookStrategy) -> list[HookResult]:
|
|
114
|
+
"""Execute hooks based on strategy configuration."""
|
|
115
|
+
if strategy.parallel and len(strategy.hooks) > 1:
|
|
116
|
+
return self._execute_parallel(strategy)
|
|
117
|
+
return self._execute_sequential(strategy)
|
|
118
|
+
|
|
119
|
+
def _apply_retries_if_needed(
|
|
120
|
+
self, strategy: HookStrategy, results: list[HookResult]
|
|
121
|
+
) -> list[HookResult]:
|
|
122
|
+
"""Apply retries if the strategy requires it."""
|
|
123
|
+
if strategy.retry_policy != RetryPolicy.NONE:
|
|
124
|
+
return self._handle_retries(strategy, results)
|
|
125
|
+
return results
|
|
126
|
+
|
|
127
|
+
def _create_execution_result(
|
|
128
|
+
self, strategy: HookStrategy, results: list[HookResult], start_time: float
|
|
129
|
+
) -> HookExecutionResult:
|
|
130
|
+
"""Create the final execution result with performance metrics."""
|
|
131
|
+
total_duration = time.time() - start_time
|
|
132
|
+
success = all(r.status == "passed" for r in results)
|
|
133
|
+
|
|
134
|
+
performance_gain = self._calculate_performance_gain(
|
|
135
|
+
strategy, results, total_duration
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
if not self.quiet:
|
|
139
|
+
self._print_summary(strategy, results, success, performance_gain)
|
|
140
|
+
|
|
141
|
+
return HookExecutionResult(
|
|
142
|
+
strategy_name=strategy.name,
|
|
143
|
+
results=results,
|
|
144
|
+
total_duration=total_duration,
|
|
145
|
+
success=success,
|
|
146
|
+
performance_gain=performance_gain,
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
def _calculate_performance_gain(
|
|
150
|
+
self, strategy: HookStrategy, results: list[HookResult], total_duration: float
|
|
151
|
+
) -> float:
|
|
152
|
+
"""Calculate the performance gain from parallel execution."""
|
|
153
|
+
estimated_sequential = sum(
|
|
154
|
+
getattr(hook, "timeout", 30) for hook in strategy.hooks
|
|
155
|
+
)
|
|
156
|
+
return (
|
|
157
|
+
max(
|
|
158
|
+
0,
|
|
159
|
+
((estimated_sequential - total_duration) / estimated_sequential) * 100,
|
|
160
|
+
)
|
|
161
|
+
if estimated_sequential > 0
|
|
162
|
+
else 0.0
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
def _print_strategy_header(self, strategy: HookStrategy) -> None:
|
|
166
|
+
# Intentionally no-op: PhaseCoordinator controls stage headers
|
|
167
|
+
return None
|
|
168
|
+
|
|
169
|
+
def _execute_sequential(self, strategy: HookStrategy) -> list[HookResult]:
|
|
170
|
+
results: list[HookResult] = []
|
|
171
|
+
total_hooks = len(strategy.hooks)
|
|
172
|
+
|
|
173
|
+
for hook in strategy.hooks:
|
|
174
|
+
self._handle_progress_start(total_hooks)
|
|
175
|
+
result = self.execute_single_hook(hook)
|
|
176
|
+
results.append(result)
|
|
177
|
+
self._display_hook_result(result)
|
|
178
|
+
self._handle_progress_completion(total_hooks)
|
|
179
|
+
return results
|
|
180
|
+
|
|
181
|
+
def _handle_progress_start(self, total_hooks: int) -> None:
|
|
182
|
+
"""Handle progress start callback."""
|
|
183
|
+
if self._progress_start_callback:
|
|
184
|
+
with suppress(Exception):
|
|
185
|
+
self._started_hooks += 1
|
|
186
|
+
total = self._total_hooks or total_hooks
|
|
187
|
+
self._progress_start_callback(self._started_hooks, total)
|
|
188
|
+
|
|
189
|
+
def _handle_progress_completion(self, total_hooks: int) -> None:
|
|
190
|
+
"""Handle progress completion callback."""
|
|
191
|
+
if self._progress_callback:
|
|
192
|
+
with suppress(Exception):
|
|
193
|
+
self._completed_hooks += 1
|
|
194
|
+
total = self._total_hooks or total_hooks
|
|
195
|
+
self._progress_callback(self._completed_hooks, total)
|
|
196
|
+
|
|
197
|
+
def _execute_parallel(self, strategy: HookStrategy) -> list[HookResult]:
|
|
198
|
+
results: list[HookResult] = []
|
|
199
|
+
|
|
200
|
+
formatting_hooks = [h for h in strategy.hooks if h.is_formatting]
|
|
201
|
+
other_hooks = [h for h in strategy.hooks if not h.is_formatting]
|
|
202
|
+
|
|
203
|
+
# Execute formatting hooks sequentially first
|
|
204
|
+
for hook in formatting_hooks:
|
|
205
|
+
self._execute_single_hook_with_progress(hook, results)
|
|
206
|
+
|
|
207
|
+
# Execute other hooks in parallel
|
|
208
|
+
if other_hooks:
|
|
209
|
+
self._execute_parallel_hooks(other_hooks, strategy, results)
|
|
210
|
+
|
|
211
|
+
return results
|
|
212
|
+
|
|
213
|
+
def _execute_single_hook_with_progress(
|
|
214
|
+
self, hook: HookDefinition, results: list[HookResult]
|
|
215
|
+
) -> None:
|
|
216
|
+
"""Execute a single hook and update progress callbacks."""
|
|
217
|
+
if self._progress_start_callback:
|
|
218
|
+
with suppress(Exception):
|
|
219
|
+
self._started_hooks += 1
|
|
220
|
+
total = self._total_hooks or len(results) + 1 # Approximate total
|
|
221
|
+
self._progress_start_callback(self._started_hooks, total)
|
|
222
|
+
|
|
223
|
+
result = self.execute_single_hook(hook)
|
|
224
|
+
results.append(result)
|
|
225
|
+
self._display_hook_result(result)
|
|
226
|
+
|
|
227
|
+
if self._progress_callback:
|
|
228
|
+
with suppress(Exception):
|
|
229
|
+
self._completed_hooks += 1
|
|
230
|
+
total = self._total_hooks or len(results)
|
|
231
|
+
self._progress_callback(self._completed_hooks, total)
|
|
232
|
+
|
|
233
|
+
def _execute_parallel_hooks(
|
|
234
|
+
self,
|
|
235
|
+
other_hooks: list[HookDefinition],
|
|
236
|
+
strategy: HookStrategy,
|
|
237
|
+
results: list[HookResult],
|
|
238
|
+
) -> None:
|
|
239
|
+
"""Execute non-formatting hooks in parallel."""
|
|
240
|
+
|
|
241
|
+
# Use helper function to run hooks with progress tracking
|
|
242
|
+
run_hook_func = self._create_run_hook_func(results, other_hooks)
|
|
243
|
+
|
|
244
|
+
with ThreadPoolExecutor(max_workers=strategy.max_workers) as executor:
|
|
245
|
+
future_to_hook = {
|
|
246
|
+
executor.submit(run_hook_func, hook): hook for hook in other_hooks
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
for future in as_completed(future_to_hook):
|
|
250
|
+
self._handle_future_result(future, future_to_hook, results)
|
|
251
|
+
|
|
252
|
+
def _create_run_hook_func(
|
|
253
|
+
self, results: list[HookResult], other_hooks: list[HookDefinition]
|
|
254
|
+
) -> t.Callable[[HookDefinition], HookResult]:
|
|
255
|
+
"""Create a function that runs a hook with progress tracking."""
|
|
256
|
+
|
|
257
|
+
def _run_with_start(h: HookDefinition) -> HookResult:
|
|
258
|
+
if self._progress_start_callback:
|
|
259
|
+
with suppress(Exception):
|
|
260
|
+
self._started_hooks += 1
|
|
261
|
+
total_local = self._total_hooks or len(results) + len(other_hooks)
|
|
262
|
+
self._progress_start_callback(self._started_hooks, total_local)
|
|
263
|
+
return self.execute_single_hook(h)
|
|
264
|
+
|
|
265
|
+
return _run_with_start
|
|
266
|
+
|
|
267
|
+
def _handle_future_result(
|
|
268
|
+
self, future, future_to_hook: dict, results: list[HookResult]
|
|
269
|
+
) -> None:
|
|
270
|
+
"""Handle the result of a completed future from thread pool execution."""
|
|
271
|
+
try:
|
|
272
|
+
result = future.result()
|
|
273
|
+
results.append(result)
|
|
274
|
+
self._display_hook_result(result)
|
|
275
|
+
self._update_progress_on_completion()
|
|
276
|
+
except Exception as e:
|
|
277
|
+
hook = future_to_hook[future]
|
|
278
|
+
error_result = HookResult(
|
|
279
|
+
id=hook.name,
|
|
280
|
+
name=hook.name,
|
|
281
|
+
status="error",
|
|
282
|
+
duration=0.0,
|
|
283
|
+
issues_found=[str(e)],
|
|
284
|
+
issues_count=1, # Error counts as 1 issue
|
|
285
|
+
stage=hook.stage.value,
|
|
286
|
+
exit_code=1,
|
|
287
|
+
error_message=str(e),
|
|
288
|
+
is_timeout=False,
|
|
289
|
+
)
|
|
290
|
+
results.append(error_result)
|
|
291
|
+
self._display_hook_result(error_result)
|
|
292
|
+
self._update_progress_on_completion()
|
|
293
|
+
|
|
294
|
+
def _update_progress_on_completion(self) -> None:
|
|
295
|
+
"""Update progress callback when a hook completes."""
|
|
296
|
+
if self._progress_callback:
|
|
297
|
+
with suppress(Exception):
|
|
298
|
+
self._completed_hooks += 1
|
|
299
|
+
total = self._total_hooks or self._completed_hooks # Approximate total
|
|
300
|
+
self._progress_callback(self._completed_hooks, total)
|
|
301
|
+
|
|
302
|
+
def execute_single_hook(self, hook: HookDefinition) -> HookResult:
|
|
303
|
+
start_time = time.time()
|
|
304
|
+
|
|
305
|
+
try:
|
|
306
|
+
result = self._run_hook_subprocess(hook)
|
|
307
|
+
duration = time.time() - start_time
|
|
308
|
+
|
|
309
|
+
self._display_hook_output_if_needed(result, hook.name)
|
|
310
|
+
return self._create_hook_result_from_process(hook, result, duration)
|
|
311
|
+
|
|
312
|
+
except subprocess.TimeoutExpired:
|
|
313
|
+
return self._create_timeout_result(hook, start_time)
|
|
314
|
+
|
|
315
|
+
except Exception as e:
|
|
316
|
+
return self._create_error_result(hook, start_time, e)
|
|
317
|
+
|
|
318
|
+
def _get_changed_files_for_hook(self, hook: HookDefinition) -> list[Path] | None:
|
|
319
|
+
"""Get changed files for incremental execution if supported.
|
|
320
|
+
|
|
321
|
+
Returns:
|
|
322
|
+
List of changed files if incremental mode enabled and hook supports it,
|
|
323
|
+
None if full scan should be used (no changes or hook doesn't support files)
|
|
324
|
+
"""
|
|
325
|
+
if not self.use_incremental or not hook.accepts_file_paths:
|
|
326
|
+
return None
|
|
327
|
+
|
|
328
|
+
if not self.git_service:
|
|
329
|
+
return None
|
|
330
|
+
|
|
331
|
+
# Map hook names to file extensions
|
|
332
|
+
extension_map = {
|
|
333
|
+
"ruff-check": [".py"],
|
|
334
|
+
"ruff-format": [".py"],
|
|
335
|
+
"mdformat": [".md"],
|
|
336
|
+
"refurb": [".py"],
|
|
337
|
+
"skylos": [".py"],
|
|
338
|
+
"complexipy": [".py"],
|
|
339
|
+
"semgrep": [".py"],
|
|
340
|
+
"check-yaml": [".yaml", ".yml"],
|
|
341
|
+
"check-toml": [".toml"],
|
|
342
|
+
"check-json": [".json"],
|
|
343
|
+
"check-ast": [".py"],
|
|
344
|
+
"format-json": [".json"],
|
|
345
|
+
"codespell": [".py", ".md", ".txt", ".rst"],
|
|
346
|
+
"check-jsonschema": [".json", ".yaml", ".yml"],
|
|
347
|
+
"trailing-whitespace": [""], # All files
|
|
348
|
+
"end-of-file-fixer": [""], # All files
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
extensions = extension_map.get(hook.name)
|
|
352
|
+
if not extensions:
|
|
353
|
+
return None
|
|
354
|
+
|
|
355
|
+
changed_files = self.git_service.get_changed_files_by_extension(extensions)
|
|
356
|
+
|
|
357
|
+
# If no files changed, return None to skip the hook entirely
|
|
358
|
+
# (or run full scan depending on configuration)
|
|
359
|
+
return changed_files or None
|
|
360
|
+
|
|
361
|
+
def _run_hook_subprocess(
|
|
362
|
+
self, hook: HookDefinition
|
|
363
|
+
) -> subprocess.CompletedProcess[str]:
|
|
364
|
+
clean_env = self._get_clean_environment()
|
|
365
|
+
|
|
366
|
+
try:
|
|
367
|
+
repo_root = self.pkg_path
|
|
368
|
+
|
|
369
|
+
# Get changed files for incremental execution
|
|
370
|
+
changed_files = self._get_changed_files_for_hook(hook)
|
|
371
|
+
|
|
372
|
+
# Use build_command with files if incremental, otherwise get_command
|
|
373
|
+
command = (
|
|
374
|
+
hook.build_command(changed_files)
|
|
375
|
+
if changed_files
|
|
376
|
+
else hook.get_command()
|
|
377
|
+
)
|
|
378
|
+
|
|
379
|
+
return subprocess.run(
|
|
380
|
+
command,
|
|
381
|
+
cwd=repo_root,
|
|
382
|
+
env=clean_env,
|
|
383
|
+
timeout=hook.timeout,
|
|
384
|
+
capture_output=True,
|
|
385
|
+
text=True,
|
|
386
|
+
check=False,
|
|
387
|
+
)
|
|
388
|
+
except Exception as e:
|
|
389
|
+
security_logger = get_security_logger()
|
|
390
|
+
security_logger.log_subprocess_failure(
|
|
391
|
+
command=hook.get_command(),
|
|
392
|
+
exit_code=-1,
|
|
393
|
+
error_output=str(e),
|
|
394
|
+
)
|
|
395
|
+
|
|
396
|
+
return subprocess.CompletedProcess(
|
|
397
|
+
args=hook.get_command(), returncode=1, stdout="", stderr=str(e)
|
|
398
|
+
)
|
|
399
|
+
|
|
400
|
+
def _display_hook_output_if_needed(
|
|
401
|
+
self, result: subprocess.CompletedProcess[str], hook_name: str = ""
|
|
402
|
+
) -> None:
|
|
403
|
+
# For complexipy, only show output when --debug flag is set
|
|
404
|
+
if hook_name == "complexipy" and not self.debug:
|
|
405
|
+
return
|
|
406
|
+
|
|
407
|
+
if result.returncode == 0 or not self.verbose:
|
|
408
|
+
return
|
|
409
|
+
|
|
410
|
+
if result.stdout:
|
|
411
|
+
self.console.print(result.stdout)
|
|
412
|
+
if result.stderr:
|
|
413
|
+
self.console.print(result.stderr)
|
|
414
|
+
|
|
415
|
+
def _create_hook_result_from_process(
|
|
416
|
+
self,
|
|
417
|
+
hook: HookDefinition,
|
|
418
|
+
result: subprocess.CompletedProcess[str],
|
|
419
|
+
duration: float,
|
|
420
|
+
) -> HookResult:
|
|
421
|
+
# Determine initial status
|
|
422
|
+
status = self._determine_initial_status(hook, result)
|
|
423
|
+
|
|
424
|
+
# Extract issues
|
|
425
|
+
issues_found = self._extract_issues_from_process_output(hook, result, status)
|
|
426
|
+
|
|
427
|
+
# Update status for reporting tools
|
|
428
|
+
status = self._update_status_for_reporting_tools(
|
|
429
|
+
hook, status, issues_found, result
|
|
430
|
+
)
|
|
431
|
+
|
|
432
|
+
# Parse hook output to extract file count
|
|
433
|
+
parsed_output = self._parse_hook_output(result, hook.name)
|
|
434
|
+
|
|
435
|
+
# Determine exit code and error message
|
|
436
|
+
exit_code, error_message = self._determine_exit_code_and_error(status, result)
|
|
437
|
+
|
|
438
|
+
# Handle case where hook failed but has no parsed issues
|
|
439
|
+
issues_found = self._handle_no_issues_for_failed_hook(
|
|
440
|
+
status, issues_found, result
|
|
441
|
+
)
|
|
442
|
+
|
|
443
|
+
# Calculate issue count
|
|
444
|
+
issues_count = self._calculate_issues_count(status, issues_found)
|
|
445
|
+
|
|
446
|
+
return HookResult(
|
|
447
|
+
id=hook.name,
|
|
448
|
+
name=hook.name,
|
|
449
|
+
status=status,
|
|
450
|
+
duration=duration,
|
|
451
|
+
files_processed=parsed_output["files_processed"],
|
|
452
|
+
issues_found=issues_found,
|
|
453
|
+
issues_count=issues_count,
|
|
454
|
+
stage=hook.stage.value,
|
|
455
|
+
exit_code=exit_code,
|
|
456
|
+
error_message=error_message,
|
|
457
|
+
is_timeout=False, # Set by timeout handler if applicable
|
|
458
|
+
)
|
|
459
|
+
|
|
460
|
+
def _determine_initial_status(
|
|
461
|
+
self, hook: HookDefinition, result: subprocess.CompletedProcess[str]
|
|
462
|
+
) -> str:
|
|
463
|
+
"""Determine the initial status of the hook."""
|
|
464
|
+
reporting_tools = {"complexipy", "refurb", "gitleaks", "creosote"}
|
|
465
|
+
|
|
466
|
+
if self.debug and hook.name in reporting_tools:
|
|
467
|
+
self.console.print(
|
|
468
|
+
f"[yellow]DEBUG _create_hook_result_from_process: hook={hook.name}, "
|
|
469
|
+
f"returncode={result.returncode}[/yellow]"
|
|
470
|
+
)
|
|
471
|
+
|
|
472
|
+
if hook.is_formatting and result.returncode == 1:
|
|
473
|
+
output_text = result.stdout + result.stderr
|
|
474
|
+
if "files were modified by this hook" in output_text:
|
|
475
|
+
return "passed"
|
|
476
|
+
else:
|
|
477
|
+
return "failed"
|
|
478
|
+
else:
|
|
479
|
+
# Initial status based on exit code
|
|
480
|
+
return "passed" if result.returncode == 0 else "failed"
|
|
481
|
+
|
|
482
|
+
def _update_status_for_reporting_tools(
|
|
483
|
+
self,
|
|
484
|
+
hook: HookDefinition,
|
|
485
|
+
status: str,
|
|
486
|
+
issues_found: list[str],
|
|
487
|
+
result: subprocess.CompletedProcess[str] | None = None,
|
|
488
|
+
) -> str:
|
|
489
|
+
"""Update status for reporting tools if there are issues."""
|
|
490
|
+
reporting_tools = {"complexipy", "refurb", "gitleaks", "creosote"}
|
|
491
|
+
|
|
492
|
+
if hook.name in reporting_tools and issues_found:
|
|
493
|
+
status = "failed"
|
|
494
|
+
|
|
495
|
+
# Debug: Log status for reporting tools
|
|
496
|
+
if hook.name in reporting_tools and self.debug and result:
|
|
497
|
+
self.console.print(
|
|
498
|
+
f"[yellow]DEBUG {hook.name}: returncode={result.returncode}, "
|
|
499
|
+
f"issues={len(issues_found)}, status={status}[/yellow]"
|
|
500
|
+
)
|
|
501
|
+
|
|
502
|
+
return status
|
|
503
|
+
|
|
504
|
+
def _determine_exit_code_and_error(
|
|
505
|
+
self, status: str, result: subprocess.CompletedProcess[str]
|
|
506
|
+
) -> tuple[int | None, str | None]:
|
|
507
|
+
"""Determine exit code and error message."""
|
|
508
|
+
exit_code = result.returncode if status == "failed" else None
|
|
509
|
+
error_message = None
|
|
510
|
+
if status == "failed" and result.stderr.strip():
|
|
511
|
+
# Capture stderr for failed hooks (truncate if very long)
|
|
512
|
+
error_message = result.stderr.strip()[:500]
|
|
513
|
+
return exit_code, error_message
|
|
514
|
+
|
|
515
|
+
def _handle_no_issues_for_failed_hook(
|
|
516
|
+
self,
|
|
517
|
+
status: str,
|
|
518
|
+
issues_found: list[str],
|
|
519
|
+
result: subprocess.CompletedProcess[str],
|
|
520
|
+
) -> list[str]:
|
|
521
|
+
"""Handle the case where a hook failed but has no parsed issues."""
|
|
522
|
+
if status == "failed" and not issues_found:
|
|
523
|
+
output_text = (result.stdout + result.stderr).strip()
|
|
524
|
+
if output_text:
|
|
525
|
+
# Split output into lines and take first 10 non-empty lines as issues
|
|
526
|
+
error_lines = [
|
|
527
|
+
line.strip() for line in output_text.split("\n") if line.strip()
|
|
528
|
+
][:10]
|
|
529
|
+
issues_found = error_lines or ["Hook failed with non-zero exit code"]
|
|
530
|
+
return issues_found
|
|
531
|
+
|
|
532
|
+
def _calculate_issues_count(self, status: str, issues_found: list[str]) -> int:
|
|
533
|
+
"""Calculate the number of issues."""
|
|
534
|
+
return max(len(issues_found), 1 if status == "failed" else 0)
|
|
535
|
+
|
|
536
|
+
def _extract_issues_from_process_output(
|
|
537
|
+
self,
|
|
538
|
+
hook: HookDefinition,
|
|
539
|
+
result: subprocess.CompletedProcess[str],
|
|
540
|
+
status: str,
|
|
541
|
+
) -> list[str]:
|
|
542
|
+
error_output = (result.stdout + result.stderr).strip()
|
|
543
|
+
|
|
544
|
+
# These tools are reporting/analysis tools that return exit code 0 even when finding issues
|
|
545
|
+
# They need special parsing regardless of exit code status
|
|
546
|
+
reporting_tools = {"complexipy", "refurb", "gitleaks", "creosote"}
|
|
547
|
+
|
|
548
|
+
if self.debug and hook.name in reporting_tools:
|
|
549
|
+
self.console.print(
|
|
550
|
+
f"[yellow]DEBUG _extract_issues: hook={hook.name}, status={status}, "
|
|
551
|
+
f"output_len={len(error_output)}[/yellow]"
|
|
552
|
+
)
|
|
553
|
+
|
|
554
|
+
# Handle special parsing tools first
|
|
555
|
+
if hook.name == "semgrep":
|
|
556
|
+
return self._parse_semgrep_issues(error_output)
|
|
557
|
+
|
|
558
|
+
# Handle reporting tools that always need parsing
|
|
559
|
+
if hook.name in reporting_tools:
|
|
560
|
+
return self._extract_issues_for_reporting_tools(hook, error_output)
|
|
561
|
+
|
|
562
|
+
# For non-reporting tools, only parse output if they failed
|
|
563
|
+
return self._extract_issues_for_regular_tools(
|
|
564
|
+
hook, error_output, status, result
|
|
565
|
+
)
|
|
566
|
+
|
|
567
|
+
def _extract_issues_for_reporting_tools(
|
|
568
|
+
self, hook: HookDefinition, error_output: str
|
|
569
|
+
) -> list[str]:
|
|
570
|
+
"""Extract issues from reporting tools."""
|
|
571
|
+
# Always parse output for reporting tools (they exit 0 even with findings)
|
|
572
|
+
if hook.name == "complexipy":
|
|
573
|
+
return self._parse_complexipy_issues(error_output)
|
|
574
|
+
if hook.name == "refurb":
|
|
575
|
+
return self._parse_refurb_issues(error_output)
|
|
576
|
+
if hook.name == "gitleaks":
|
|
577
|
+
return self._parse_gitleaks_issues(error_output)
|
|
578
|
+
if hook.name == "creosote":
|
|
579
|
+
return self._parse_creosote_issues(error_output)
|
|
580
|
+
return []
|
|
581
|
+
|
|
582
|
+
def _extract_issues_for_regular_tools(
|
|
583
|
+
self,
|
|
584
|
+
hook: HookDefinition,
|
|
585
|
+
error_output: str,
|
|
586
|
+
status: str,
|
|
587
|
+
result: subprocess.CompletedProcess[str],
|
|
588
|
+
) -> list[str]:
|
|
589
|
+
"""Extract issues from regular tools."""
|
|
590
|
+
# For non-reporting tools, only parse output if they failed
|
|
591
|
+
if status == "passed":
|
|
592
|
+
return []
|
|
593
|
+
|
|
594
|
+
if hook.is_formatting and "files were modified by this hook" in error_output:
|
|
595
|
+
return []
|
|
596
|
+
|
|
597
|
+
if error_output:
|
|
598
|
+
return [line.strip() for line in error_output.split("\n") if line.strip()]
|
|
599
|
+
|
|
600
|
+
return [f"Hook failed with code {result.returncode}"]
|
|
601
|
+
|
|
602
|
+
def _is_header_or_separator_line(self, line: str) -> bool:
|
|
603
|
+
"""Check if the line is a header or separator line."""
|
|
604
|
+
return any(x in line for x in ("Path", "─────", "┌", "└", "├", "┼", "┤", "┃"))
|
|
605
|
+
|
|
606
|
+
def _extract_complexity_from_parts(self, parts: list[str]) -> int | None:
|
|
607
|
+
"""Extract complexity value from line parts."""
|
|
608
|
+
if len(parts) >= 4:
|
|
609
|
+
with suppress(ValueError, IndexError):
|
|
610
|
+
return int(parts[-1])
|
|
611
|
+
return None
|
|
612
|
+
|
|
613
|
+
def _detect_package_from_output(self, output: str) -> str:
|
|
614
|
+
"""Auto-detect package name from tool output.
|
|
615
|
+
|
|
616
|
+
Looks for common patterns like:
|
|
617
|
+
- Table rows with paths: │ ./package_name/...
|
|
618
|
+
- File paths: package_name/file.py
|
|
619
|
+
|
|
620
|
+
Returns:
|
|
621
|
+
Detected package name, or falls back to pkg_path detection
|
|
622
|
+
"""
|
|
623
|
+
import re
|
|
624
|
+
from collections import Counter
|
|
625
|
+
|
|
626
|
+
# Try to extract from file paths in output (format: ./package_name/file.py)
|
|
627
|
+
path_pattern = r"\./([a-z_][a-z0-9_]*)/[a-z_]"
|
|
628
|
+
matches = re.findall(path_pattern, output, re.IGNORECASE)
|
|
629
|
+
|
|
630
|
+
if matches:
|
|
631
|
+
# Return most common package name found
|
|
632
|
+
return Counter(matches).most_common(1)[0][0]
|
|
633
|
+
|
|
634
|
+
# Fallback to detecting from pyproject.toml (existing logic)
|
|
635
|
+
from crackerjack.config.tool_commands import _detect_package_name_cached
|
|
636
|
+
|
|
637
|
+
return _detect_package_name_cached(str(self.pkg_path))
|
|
638
|
+
|
|
639
|
+
def _should_include_line(self, line: str, package_name: str) -> bool:
|
|
640
|
+
"""Check if the line should be included in the output.
|
|
641
|
+
|
|
642
|
+
Args:
|
|
643
|
+
line: Line from complexipy output
|
|
644
|
+
package_name: Name of the package being scanned
|
|
645
|
+
|
|
646
|
+
Returns:
|
|
647
|
+
True if line contains the package name and is a table row
|
|
648
|
+
"""
|
|
649
|
+
return "│" in line and package_name in line
|
|
650
|
+
|
|
651
|
+
def _parse_complexipy_issues(self, output: str) -> list[str]:
|
|
652
|
+
"""Parse complexipy table output to count actual violations (complexity > 15)."""
|
|
653
|
+
# Auto-detect package name from output
|
|
654
|
+
package_name = self._detect_package_from_output(output)
|
|
655
|
+
|
|
656
|
+
issues = []
|
|
657
|
+
for line in output.split("\n"):
|
|
658
|
+
# Match table rows: │ path │ file │ function │ complexity │
|
|
659
|
+
if self._should_include_line(line, package_name):
|
|
660
|
+
# Skip header/separator rows
|
|
661
|
+
if not self._is_header_or_separator_line(line):
|
|
662
|
+
# Extract complexity value (last column)
|
|
663
|
+
parts = [p.strip() for p in line.split("│") if p.strip()]
|
|
664
|
+
complexity = self._extract_complexity_from_parts(parts)
|
|
665
|
+
# Only count functions exceeding limit (15)
|
|
666
|
+
if complexity is not None and complexity > 15:
|
|
667
|
+
issues.append(line.strip())
|
|
668
|
+
return issues
|
|
669
|
+
|
|
670
|
+
def _parse_refurb_issues(self, output: str) -> list[str]:
|
|
671
|
+
"""Parse refurb output to count actual violations with shortened paths.
|
|
672
|
+
|
|
673
|
+
Refurb output format: "path/to/file.py: line: col [FURB###]: message"
|
|
674
|
+
Returns format: "relative/path.py:line [FURB###] message"
|
|
675
|
+
"""
|
|
676
|
+
import re
|
|
677
|
+
|
|
678
|
+
issues = []
|
|
679
|
+
for line in output.split("\n"):
|
|
680
|
+
if "[FURB" not in line or ":" not in line:
|
|
681
|
+
continue
|
|
682
|
+
|
|
683
|
+
# Match refurb format: path: line: col [FURB###]: message
|
|
684
|
+
# Example: ./crackerjack/core/phase.py: 42: 10 [FURB123]: Use dict.get() instead
|
|
685
|
+
# Note: Allow spaces after colons (": 42: 10" not ":42:10")
|
|
686
|
+
match = re.search(
|
|
687
|
+
r"(.+?):\s*(\d+):\s*\d+\s+\[(\w+)\]:\s*(.+)", line.strip()
|
|
688
|
+
)
|
|
689
|
+
|
|
690
|
+
if match:
|
|
691
|
+
file_path, line_num, error_code, message = match.groups()
|
|
692
|
+
|
|
693
|
+
# Shorten path to be relative to project root
|
|
694
|
+
short_path = self._shorten_path(file_path)
|
|
695
|
+
|
|
696
|
+
# Format: path:line [CODE] message
|
|
697
|
+
formatted = f"{short_path}:{line_num} [{error_code}] {message.strip()}"
|
|
698
|
+
issues.append(formatted)
|
|
699
|
+
else:
|
|
700
|
+
# Fallback: keep original line if parsing fails
|
|
701
|
+
issues.append(line.strip())
|
|
702
|
+
|
|
703
|
+
return issues
|
|
704
|
+
|
|
705
|
+
def _shorten_path(self, path: str) -> str:
|
|
706
|
+
"""Shorten file path to be relative to project root.
|
|
707
|
+
|
|
708
|
+
Args:
|
|
709
|
+
path: Absolute or relative file path
|
|
710
|
+
|
|
711
|
+
Returns:
|
|
712
|
+
Shortened path relative to pkg_path, or basename if outside project
|
|
713
|
+
"""
|
|
714
|
+
try:
|
|
715
|
+
# Convert to Path object
|
|
716
|
+
file_path = Path(path)
|
|
717
|
+
|
|
718
|
+
# Try to make it relative to pkg_path if it's absolute
|
|
719
|
+
if file_path.is_absolute():
|
|
720
|
+
try:
|
|
721
|
+
relative = file_path.relative_to(self.pkg_path)
|
|
722
|
+
return str(relative).replace("\\", "/")
|
|
723
|
+
except ValueError:
|
|
724
|
+
# Path is outside project, just use basename
|
|
725
|
+
return file_path.name
|
|
726
|
+
|
|
727
|
+
# Already relative - clean up by removing leading "./"
|
|
728
|
+
clean_path = str(file_path).lstrip("./")
|
|
729
|
+
return clean_path.replace("\\", "/")
|
|
730
|
+
|
|
731
|
+
except Exception:
|
|
732
|
+
# Fallback: return original path
|
|
733
|
+
return path
|
|
734
|
+
|
|
735
|
+
def _parse_gitleaks_issues(self, output: str) -> list[str]:
|
|
736
|
+
"""Parse gitleaks output - ignore warnings, only count leaks."""
|
|
737
|
+
# Gitleaks outputs "no leaks found" when clean
|
|
738
|
+
if "no leaks found" in output.lower():
|
|
739
|
+
return []
|
|
740
|
+
return [
|
|
741
|
+
line.strip()
|
|
742
|
+
for line in output.split("\n")
|
|
743
|
+
if not (
|
|
744
|
+
"WRN" in line and "Invalid .gitleaksignore" in line
|
|
745
|
+
) # Skip warnings about .gitleaksignore format
|
|
746
|
+
and any(
|
|
747
|
+
x in line.lower() for x in ("leak", "secret", "credential", "api")
|
|
748
|
+
) # Look for actual leak findings
|
|
749
|
+
and "found" not in line.lower() # Skip summary lines
|
|
750
|
+
]
|
|
751
|
+
|
|
752
|
+
def _parse_creosote_issues(self, output: str) -> list[str]:
|
|
753
|
+
"""Parse creosote output - only count unused dependencies."""
|
|
754
|
+
if "No unused dependencies found" in output:
|
|
755
|
+
return []
|
|
756
|
+
issues = []
|
|
757
|
+
parsing_unused = False
|
|
758
|
+
for line in output.split("\n"):
|
|
759
|
+
if "unused" in line.lower() and "dependenc" in line.lower():
|
|
760
|
+
parsing_unused = True
|
|
761
|
+
continue
|
|
762
|
+
if parsing_unused and line.strip() and not line.strip().startswith("["):
|
|
763
|
+
# Dependency names (not ANSI color codes)
|
|
764
|
+
dep_name = line.strip().lstrip("- ")
|
|
765
|
+
if dep_name:
|
|
766
|
+
issues.append(f"Unused dependency: {dep_name}")
|
|
767
|
+
if not line.strip():
|
|
768
|
+
parsing_unused = False
|
|
769
|
+
return issues
|
|
770
|
+
|
|
771
|
+
def _parse_semgrep_issues(self, output: str) -> list[str]:
|
|
772
|
+
"""Parse semgrep JSON output to extract both findings and errors.
|
|
773
|
+
|
|
774
|
+
Semgrep returns JSON with two arrays:
|
|
775
|
+
- "results": Security/code quality findings
|
|
776
|
+
- "errors": Configuration, download, or execution errors
|
|
777
|
+
|
|
778
|
+
Error categorization:
|
|
779
|
+
- CODE_ERROR_TYPES: Actual code issues that should fail the build
|
|
780
|
+
- INFRA_ERROR_TYPES: Infrastructure issues (network, timeouts) that should warn only
|
|
781
|
+
|
|
782
|
+
This method extracts issues from both arrays to provide comprehensive error reporting.
|
|
783
|
+
"""
|
|
784
|
+
import json
|
|
785
|
+
|
|
786
|
+
try:
|
|
787
|
+
# Try to parse as JSON
|
|
788
|
+
json_data = json.loads(output.strip())
|
|
789
|
+
issues = []
|
|
790
|
+
|
|
791
|
+
# Extract findings from results array
|
|
792
|
+
issues.extend(self._extract_semgrep_results(json_data))
|
|
793
|
+
|
|
794
|
+
# Extract errors from errors array with categorization
|
|
795
|
+
issues.extend(self._extract_semgrep_errors(json_data))
|
|
796
|
+
|
|
797
|
+
return issues
|
|
798
|
+
|
|
799
|
+
except json.JSONDecodeError:
|
|
800
|
+
# If JSON parsing fails, return raw output (shouldn't happen with --json flag)
|
|
801
|
+
if output.strip():
|
|
802
|
+
return [line.strip() for line in output.split("\n") if line.strip()][
|
|
803
|
+
:10
|
|
804
|
+
]
|
|
805
|
+
|
|
806
|
+
return []
|
|
807
|
+
|
|
808
|
+
def _extract_semgrep_results(self, json_data: dict) -> list[str]:
|
|
809
|
+
"""Extract findings from semgrep results."""
|
|
810
|
+
issues = []
|
|
811
|
+
for result in json_data.get("results", []):
|
|
812
|
+
# Format: "file.py:line - rule_id: message"
|
|
813
|
+
path = result.get("path", "unknown")
|
|
814
|
+
line_num = result.get("start", {}).get("line", "?")
|
|
815
|
+
rule_id = result.get("check_id", "unknown-rule")
|
|
816
|
+
message = result.get("extra", {}).get("message", "Security issue detected")
|
|
817
|
+
issues.append(f"{path}:{line_num} - {rule_id}: {message}")
|
|
818
|
+
return issues
|
|
819
|
+
|
|
820
|
+
def _extract_semgrep_errors(self, json_data: dict) -> list[str]:
|
|
821
|
+
"""Extract errors from semgrep errors with categorization."""
|
|
822
|
+
issues = []
|
|
823
|
+
INFRA_ERROR_TYPES = {
|
|
824
|
+
"NetworkError",
|
|
825
|
+
"DownloadError",
|
|
826
|
+
"TimeoutError",
|
|
827
|
+
"ConnectionError",
|
|
828
|
+
"HTTPError",
|
|
829
|
+
"SSLError",
|
|
830
|
+
}
|
|
831
|
+
|
|
832
|
+
for error in json_data.get("errors", []):
|
|
833
|
+
error_type = error.get("type", "SemgrepError")
|
|
834
|
+
error_msg = error.get("message", str(error))
|
|
835
|
+
|
|
836
|
+
# Infrastructure errors: warn but don't fail the build
|
|
837
|
+
if error_type in INFRA_ERROR_TYPES:
|
|
838
|
+
self.console.print(
|
|
839
|
+
f"[yellow]Warning: Semgrep infrastructure error: "
|
|
840
|
+
f"{error_type}: {error_msg}[/yellow]"
|
|
841
|
+
)
|
|
842
|
+
else:
|
|
843
|
+
# Code/config errors: add to issues (will fail the build)
|
|
844
|
+
issues.append(f"{error_type}: {error_msg}")
|
|
845
|
+
return issues
|
|
846
|
+
|
|
847
|
+
def _create_timeout_result(
|
|
848
|
+
self, hook: HookDefinition, start_time: float
|
|
849
|
+
) -> HookResult:
|
|
850
|
+
duration = time.time() - start_time
|
|
851
|
+
return HookResult(
|
|
852
|
+
id=hook.name,
|
|
853
|
+
name=hook.name,
|
|
854
|
+
status="timeout",
|
|
855
|
+
duration=duration,
|
|
856
|
+
issues_found=[f"Hook timed out after {duration: .1f}s"],
|
|
857
|
+
issues_count=1, # Timeout counts as 1 issue
|
|
858
|
+
stage=hook.stage.value,
|
|
859
|
+
exit_code=124, # Standard timeout exit code
|
|
860
|
+
error_message=f"Execution exceeded timeout of {duration:.1f}s",
|
|
861
|
+
is_timeout=True,
|
|
862
|
+
)
|
|
863
|
+
|
|
864
|
+
def _create_error_result(
|
|
865
|
+
self, hook: HookDefinition, start_time: float, error: Exception
|
|
866
|
+
) -> HookResult:
|
|
867
|
+
duration = time.time() - start_time
|
|
868
|
+
return HookResult(
|
|
869
|
+
id=hook.name,
|
|
870
|
+
name=hook.name,
|
|
871
|
+
status="error",
|
|
872
|
+
duration=duration,
|
|
873
|
+
issues_found=[str(error)],
|
|
874
|
+
issues_count=1, # Error counts as 1 issue
|
|
875
|
+
stage=hook.stage.value,
|
|
876
|
+
exit_code=1,
|
|
877
|
+
error_message=str(error),
|
|
878
|
+
is_timeout=False,
|
|
879
|
+
)
|
|
880
|
+
|
|
881
|
+
def _parse_hook_output(
|
|
882
|
+
self,
|
|
883
|
+
result: subprocess.CompletedProcess[str],
|
|
884
|
+
hook_name: str = "",
|
|
885
|
+
) -> dict[str, t.Any]:
|
|
886
|
+
output = result.stdout + result.stderr
|
|
887
|
+
|
|
888
|
+
# Special handling for semgrep to count files with issues, not total files scanned
|
|
889
|
+
if hook_name == "semgrep":
|
|
890
|
+
files_processed = self._parse_semgrep_output(result)
|
|
891
|
+
else:
|
|
892
|
+
files_processed = self._parse_generic_hook_output(output)
|
|
893
|
+
|
|
894
|
+
return self._create_parse_result(files_processed, result.returncode, output)
|
|
895
|
+
|
|
896
|
+
def _is_semgrep_output(self, output: str, args_str: str) -> bool:
|
|
897
|
+
"""Check if the output is from semgrep."""
|
|
898
|
+
return "semgrep" in output.lower() or "semgrep" in args_str.lower()
|
|
899
|
+
|
|
900
|
+
def _create_parse_result(
|
|
901
|
+
self, files_processed: int, exit_code: int, output: str
|
|
902
|
+
) -> dict[str, t.Any]:
|
|
903
|
+
"""Create the parse result dictionary."""
|
|
904
|
+
return {
|
|
905
|
+
"hook_id": None,
|
|
906
|
+
"exit_code": exit_code,
|
|
907
|
+
"files_processed": files_processed,
|
|
908
|
+
"issues": [],
|
|
909
|
+
"raw_output": output,
|
|
910
|
+
}
|
|
911
|
+
|
|
912
|
+
def _parse_semgrep_output(
|
|
913
|
+
self,
|
|
914
|
+
result: subprocess.CompletedProcess[str],
|
|
915
|
+
) -> int:
|
|
916
|
+
"""Parse Semgrep output to count files with issues, not total files scanned."""
|
|
917
|
+
|
|
918
|
+
# Try to extract JSON output from semgrep (if available)
|
|
919
|
+
# Semgrep JSON output contains results with file paths
|
|
920
|
+
json_files = self._parse_semgrep_json_output(result)
|
|
921
|
+
if json_files is not None and json_files >= 0:
|
|
922
|
+
# Successfully parsed JSON - return result (including 0 for no issues)
|
|
923
|
+
return json_files
|
|
924
|
+
|
|
925
|
+
# If we couldn't extract from JSON, try to parse from text output
|
|
926
|
+
return self._parse_semgrep_text_output(result.stdout + result.stderr)
|
|
927
|
+
|
|
928
|
+
def _parse_semgrep_json_output(
|
|
929
|
+
self,
|
|
930
|
+
result: subprocess.CompletedProcess[str],
|
|
931
|
+
) -> int | None:
|
|
932
|
+
"""Parse Semgrep JSON output to count unique files with issues.
|
|
933
|
+
|
|
934
|
+
Returns:
|
|
935
|
+
int: Number of files with issues if JSON parsed successfully (including 0)
|
|
936
|
+
None: If JSON parsing failed
|
|
937
|
+
"""
|
|
938
|
+
# Look for JSON output between potentially mixed text output
|
|
939
|
+
output = result.stdout + result.stderr
|
|
940
|
+
return self._process_output_for_json(output)
|
|
941
|
+
|
|
942
|
+
def _process_output_for_json(self, output: str) -> int | None:
|
|
943
|
+
"""Process output looking for JSON content.
|
|
944
|
+
|
|
945
|
+
Returns:
|
|
946
|
+
int: Number of files if JSON found (including 0 for no issues)
|
|
947
|
+
None: If no valid JSON found
|
|
948
|
+
"""
|
|
949
|
+
lines = output.splitlines()
|
|
950
|
+
for line in lines:
|
|
951
|
+
result = self._try_parse_line_json(line)
|
|
952
|
+
if result is not None:
|
|
953
|
+
return result
|
|
954
|
+
return None
|
|
955
|
+
|
|
956
|
+
def _try_parse_line_json(self, line: str) -> int | None:
|
|
957
|
+
"""Try to parse a line as JSON, checking both pure JSON and JSON with text.
|
|
958
|
+
|
|
959
|
+
Returns:
|
|
960
|
+
int: Number of files if JSON parsed successfully (including 0)
|
|
961
|
+
None: If JSON parsing failed
|
|
962
|
+
"""
|
|
963
|
+
line = line.strip()
|
|
964
|
+
# Check if it's a pure JSON object
|
|
965
|
+
if self._is_pure_json(line):
|
|
966
|
+
result = self._parse_json_line(line)
|
|
967
|
+
if result is not None:
|
|
968
|
+
return result
|
|
969
|
+
# Check if it contains JSON results
|
|
970
|
+
if self._contains_json_results(line):
|
|
971
|
+
result = self._parse_json_line(line)
|
|
972
|
+
if result is not None:
|
|
973
|
+
return result
|
|
974
|
+
return None
|
|
975
|
+
|
|
976
|
+
def _is_pure_json(self, line: str) -> bool:
|
|
977
|
+
"""Check if a line is a pure JSON object."""
|
|
978
|
+
return line.startswith("{") and line.endswith("}")
|
|
979
|
+
|
|
980
|
+
def _contains_json_results(self, line: str) -> bool:
|
|
981
|
+
"""Check if a line contains JSON results."""
|
|
982
|
+
return '"results":' in line
|
|
983
|
+
|
|
984
|
+
def _parse_json_line(self, line: str) -> int | None:
|
|
985
|
+
"""Parse a single JSON line to extract file count.
|
|
986
|
+
|
|
987
|
+
Returns:
|
|
988
|
+
int: Number of unique files with issues if JSON is valid (including 0)
|
|
989
|
+
None: If JSON parsing failed
|
|
990
|
+
"""
|
|
991
|
+
try:
|
|
992
|
+
json_data = json.loads(line)
|
|
993
|
+
if "results" in json_data:
|
|
994
|
+
# Count unique file paths in results
|
|
995
|
+
file_paths = {
|
|
996
|
+
result.get("path") for result in json_data.get("results", [])
|
|
997
|
+
}
|
|
998
|
+
return len([p for p in file_paths if p]) # Filter out None values
|
|
999
|
+
except json.JSONDecodeError:
|
|
1000
|
+
pass
|
|
1001
|
+
return None
|
|
1002
|
+
|
|
1003
|
+
def _parse_semgrep_text_output(self, output: str) -> int:
|
|
1004
|
+
"""Parse Semgrep text output to extract file count."""
|
|
1005
|
+
import re
|
|
1006
|
+
|
|
1007
|
+
# Look for patterns in Semgrep output that indicate findings
|
|
1008
|
+
# Example: "found 3 issues in 2 files" or "found no issues"
|
|
1009
|
+
semgrep_patterns = [
|
|
1010
|
+
r"found\s+(\d+)\s+issues?\s+in\s+(\d+)\s+files?",
|
|
1011
|
+
r"found\s+no\s+issues",
|
|
1012
|
+
r"scanning\s+(\d+)\s+files?",
|
|
1013
|
+
]
|
|
1014
|
+
|
|
1015
|
+
for pattern in semgrep_patterns:
|
|
1016
|
+
matches = re.findall(pattern, output, re.IGNORECASE)
|
|
1017
|
+
if matches:
|
|
1018
|
+
result = self._process_matches(matches, output)
|
|
1019
|
+
if result != -1: # -1 means "continue to next pattern"
|
|
1020
|
+
return result
|
|
1021
|
+
|
|
1022
|
+
return 0
|
|
1023
|
+
|
|
1024
|
+
def _process_matches(self, matches: list, output: str) -> int:
|
|
1025
|
+
"""Process regex matches to extract file count."""
|
|
1026
|
+
for match in matches:
|
|
1027
|
+
if isinstance(match, tuple):
|
|
1028
|
+
if len(match) == 2: # "found X issues in Y files" pattern
|
|
1029
|
+
return self._handle_issues_in_files_match(match)
|
|
1030
|
+
elif len(match) == 1 and "no issues" not in output.lower():
|
|
1031
|
+
# This would be from "scanning X files" - we don't want this for the files_processed
|
|
1032
|
+
continue # Return -1 to indicate continue
|
|
1033
|
+
elif "no issues" in output.lower():
|
|
1034
|
+
return 0
|
|
1035
|
+
return -1 # Indicates to continue to next pattern
|
|
1036
|
+
|
|
1037
|
+
def _handle_issues_in_files_match(self, match: tuple) -> int:
|
|
1038
|
+
"""Handle the 'found X issues in Y files' match."""
|
|
1039
|
+
issue_count, file_count = int(match[0]), int(match[1])
|
|
1040
|
+
# Use the number of files with issues, not total files scanned
|
|
1041
|
+
return file_count if issue_count > 0 else 0
|
|
1042
|
+
|
|
1043
|
+
def _parse_generic_hook_output(self, output: str) -> int:
|
|
1044
|
+
"""Parse output from other hooks (non-semgrep) to extract file count."""
|
|
1045
|
+
files_processed = 0
|
|
1046
|
+
|
|
1047
|
+
# Check for common patterns in hook output (for other tools)
|
|
1048
|
+
if "files" in output.lower():
|
|
1049
|
+
files_processed = self._extract_file_count_from_patterns(output)
|
|
1050
|
+
|
|
1051
|
+
# Special handling for ruff and other common tools
|
|
1052
|
+
if not files_processed and "ruff" in output.lower():
|
|
1053
|
+
# Look for patterns like "All checks passed!" with files processed elsewhere
|
|
1054
|
+
files_processed = self._extract_file_count_for_ruff_like_tools(output)
|
|
1055
|
+
|
|
1056
|
+
return files_processed
|
|
1057
|
+
|
|
1058
|
+
def _extract_file_count_from_patterns(self, output: str) -> int:
|
|
1059
|
+
"""Extract file counts from common patterns in hook output."""
|
|
1060
|
+
import re
|
|
1061
|
+
|
|
1062
|
+
# Pattern for "N file(s)" in output - return the highest found number
|
|
1063
|
+
all_matches = []
|
|
1064
|
+
file_count_patterns = [
|
|
1065
|
+
r"(\d+)\s+files?\s+would\s+be", # "X files would be reformatted"
|
|
1066
|
+
r"(\d+)\s+files?\s+already\s+formatted", # "X files already formatted"
|
|
1067
|
+
r"(\d+)\s+files?\s+processed", # "X files processed"
|
|
1068
|
+
r"(\d+)\s+files?\s+checked", # "X files checked"
|
|
1069
|
+
r"(\d+)\s+files?\s+analyzed", # "X files analyzed"
|
|
1070
|
+
r"Checking\s+(\d+)\s+files?", # "Checking 5 files"
|
|
1071
|
+
r"Found\s+(\d+)\s+files?", # "Found 5 files"
|
|
1072
|
+
r"(\d+)\s+files?", # "5 files" or "1 file" (general pattern)
|
|
1073
|
+
]
|
|
1074
|
+
for pattern in file_count_patterns:
|
|
1075
|
+
matches = re.findall(pattern, output, re.IGNORECASE)
|
|
1076
|
+
if matches:
|
|
1077
|
+
# Convert all matches to integers and add to list
|
|
1078
|
+
all_matches.extend([int(m) for m in matches if m.isdigit()])
|
|
1079
|
+
|
|
1080
|
+
# Use the highest value found
|
|
1081
|
+
if all_matches:
|
|
1082
|
+
return max(all_matches)
|
|
1083
|
+
|
|
1084
|
+
return 0
|
|
1085
|
+
|
|
1086
|
+
def _extract_file_count_for_ruff_like_tools(self, output: str) -> int:
|
|
1087
|
+
"""Extract file counts for ruff-like tools that don't report files when all pass."""
|
|
1088
|
+
import re
|
|
1089
|
+
|
|
1090
|
+
# Look for patterns like "All checks passed!" with files processed elsewhere
|
|
1091
|
+
all_passed_match = re.search(r"All\s+checks?\s+passed!", output, re.IGNORECASE)
|
|
1092
|
+
if all_passed_match:
|
|
1093
|
+
# For all-checks-passed scenarios, try to find other mentions of file counts
|
|
1094
|
+
other_matches = re.findall(r"(\d+)\s+files?", output, re.IGNORECASE)
|
|
1095
|
+
if other_matches:
|
|
1096
|
+
all_matches = [int(m) for m in other_matches if m.isdigit()]
|
|
1097
|
+
if all_matches:
|
|
1098
|
+
return max(all_matches) # Use highest value found
|
|
1099
|
+
|
|
1100
|
+
return 0
|
|
1101
|
+
|
|
1102
|
+
def _display_hook_result(self, result: HookResult) -> None:
|
|
1103
|
+
if self.quiet:
|
|
1104
|
+
return
|
|
1105
|
+
status_icon = "✅" if result.status == "passed" else "❌"
|
|
1106
|
+
|
|
1107
|
+
max_width = get_console_width()
|
|
1108
|
+
content_width = max_width - 4 # Adjusted for icon and padding
|
|
1109
|
+
|
|
1110
|
+
if len(result.name) > content_width:
|
|
1111
|
+
line = result.name[: content_width - 3] + "..."
|
|
1112
|
+
else:
|
|
1113
|
+
dots_needed = max(0, content_width - len(result.name))
|
|
1114
|
+
line = result.name + ("." * dots_needed)
|
|
1115
|
+
|
|
1116
|
+
# Real-time inline hook status (dotted-line format)
|
|
1117
|
+
self.console.print(f"{line} {status_icon}")
|
|
1118
|
+
|
|
1119
|
+
def _handle_retries(
|
|
1120
|
+
self,
|
|
1121
|
+
strategy: HookStrategy,
|
|
1122
|
+
results: list[HookResult],
|
|
1123
|
+
) -> list[HookResult]:
|
|
1124
|
+
if strategy.retry_policy == RetryPolicy.FORMATTING_ONLY:
|
|
1125
|
+
return self._retry_formatting_hooks(strategy, results)
|
|
1126
|
+
if strategy.retry_policy == RetryPolicy.ALL_HOOKS:
|
|
1127
|
+
return self._retry_all_hooks(strategy, results)
|
|
1128
|
+
return results
|
|
1129
|
+
|
|
1130
|
+
def _retry_formatting_hooks(
|
|
1131
|
+
self,
|
|
1132
|
+
strategy: HookStrategy,
|
|
1133
|
+
results: list[HookResult],
|
|
1134
|
+
) -> list[HookResult]:
|
|
1135
|
+
formatting_hooks_failed = self._find_failed_formatting_hooks(strategy, results)
|
|
1136
|
+
|
|
1137
|
+
if not formatting_hooks_failed:
|
|
1138
|
+
return results
|
|
1139
|
+
|
|
1140
|
+
return self._retry_all_formatting_hooks(strategy, results)
|
|
1141
|
+
|
|
1142
|
+
def _find_failed_formatting_hooks(
|
|
1143
|
+
self, strategy: HookStrategy, results: list[HookResult]
|
|
1144
|
+
) -> set[str]:
|
|
1145
|
+
"""Find the names of formatting hooks that failed."""
|
|
1146
|
+
formatting_hooks_failed: set[str] = set()
|
|
1147
|
+
|
|
1148
|
+
for i, result in enumerate(results):
|
|
1149
|
+
hook = strategy.hooks[i]
|
|
1150
|
+
if hook.is_formatting and result.status == "failed":
|
|
1151
|
+
formatting_hooks_failed.add(hook.name)
|
|
1152
|
+
|
|
1153
|
+
return formatting_hooks_failed
|
|
1154
|
+
|
|
1155
|
+
def _retry_all_formatting_hooks(
|
|
1156
|
+
self, strategy: HookStrategy, results: list[HookResult]
|
|
1157
|
+
) -> list[HookResult]:
|
|
1158
|
+
"""Retry all formatting hooks."""
|
|
1159
|
+
updated_results: list[HookResult] = []
|
|
1160
|
+
for i, hook in enumerate(strategy.hooks):
|
|
1161
|
+
prev_result = results[i]
|
|
1162
|
+
new_result = self.execute_single_hook(hook)
|
|
1163
|
+
|
|
1164
|
+
new_result.duration += prev_result.duration
|
|
1165
|
+
updated_results.append(new_result)
|
|
1166
|
+
self._display_hook_result(new_result)
|
|
1167
|
+
|
|
1168
|
+
return updated_results
|
|
1169
|
+
|
|
1170
|
+
def _retry_all_hooks(
|
|
1171
|
+
self,
|
|
1172
|
+
strategy: HookStrategy,
|
|
1173
|
+
results: list[HookResult],
|
|
1174
|
+
) -> list[HookResult]:
|
|
1175
|
+
failed_hooks = self._find_failed_hooks(results)
|
|
1176
|
+
|
|
1177
|
+
if not failed_hooks:
|
|
1178
|
+
return results
|
|
1179
|
+
|
|
1180
|
+
return self._retry_failed_hooks(strategy, results, failed_hooks)
|
|
1181
|
+
|
|
1182
|
+
def _find_failed_hooks(self, results: list[HookResult]) -> list[int]:
|
|
1183
|
+
"""Find the indices of hooks that failed."""
|
|
1184
|
+
return [i for i, r in enumerate(results) if r.status == "failed"]
|
|
1185
|
+
|
|
1186
|
+
def _retry_failed_hooks(
|
|
1187
|
+
self, strategy: HookStrategy, results: list[HookResult], failed_hooks: list[int]
|
|
1188
|
+
) -> list[HookResult]:
|
|
1189
|
+
"""Retry the failed hooks."""
|
|
1190
|
+
updated_results: list[HookResult] = results.copy()
|
|
1191
|
+
for i in failed_hooks:
|
|
1192
|
+
self._retry_single_hook(strategy, results, updated_results, i)
|
|
1193
|
+
return updated_results
|
|
1194
|
+
|
|
1195
|
+
def _retry_single_hook(
|
|
1196
|
+
self,
|
|
1197
|
+
strategy: HookStrategy,
|
|
1198
|
+
results: list[HookResult],
|
|
1199
|
+
updated_results: list[HookResult],
|
|
1200
|
+
hook_idx: int,
|
|
1201
|
+
) -> None:
|
|
1202
|
+
"""Retry a single hook."""
|
|
1203
|
+
hook = strategy.hooks[hook_idx]
|
|
1204
|
+
prev_result = results[hook_idx]
|
|
1205
|
+
new_result = self.execute_single_hook(hook)
|
|
1206
|
+
|
|
1207
|
+
new_result.duration += prev_result.duration
|
|
1208
|
+
updated_results[hook_idx] = new_result
|
|
1209
|
+
self._display_hook_result(new_result)
|
|
1210
|
+
|
|
1211
|
+
def _get_clean_environment(self) -> dict[str, str]:
|
|
1212
|
+
clean_env = self._get_base_environment()
|
|
1213
|
+
|
|
1214
|
+
self._update_path(clean_env)
|
|
1215
|
+
|
|
1216
|
+
security_logger = get_security_logger()
|
|
1217
|
+
python_vars_to_exclude = self._get_python_vars_to_exclude()
|
|
1218
|
+
|
|
1219
|
+
original_count = len(os.environ)
|
|
1220
|
+
filtered_count = 0
|
|
1221
|
+
|
|
1222
|
+
for key, value in os.environ.items():
|
|
1223
|
+
if key not in python_vars_to_exclude and key not in clean_env:
|
|
1224
|
+
if not key.startswith(
|
|
1225
|
+
("PYTHON", "PIP_", "CONDA_", "VIRTUAL_", "__PYVENV")
|
|
1226
|
+
):
|
|
1227
|
+
if key not in {"LD_PRELOAD", "DYLD_INSERT_LIBRARIES", "IFS", "PS4"}:
|
|
1228
|
+
clean_env[key] = value
|
|
1229
|
+
else:
|
|
1230
|
+
filtered_count += 1
|
|
1231
|
+
security_logger.log_environment_variable_filtered(
|
|
1232
|
+
variable_name=key,
|
|
1233
|
+
reason="dangerous environment variable",
|
|
1234
|
+
value_preview=(value[:50] if value else "")[:50],
|
|
1235
|
+
)
|
|
1236
|
+
else:
|
|
1237
|
+
filtered_count += 1
|
|
1238
|
+
|
|
1239
|
+
if filtered_count > 5:
|
|
1240
|
+
security_logger.log_subprocess_environment_sanitized(
|
|
1241
|
+
original_count=original_count,
|
|
1242
|
+
sanitized_count=len(clean_env),
|
|
1243
|
+
filtered_vars=[],
|
|
1244
|
+
)
|
|
1245
|
+
|
|
1246
|
+
return clean_env
|
|
1247
|
+
|
|
1248
|
+
def _get_base_environment(self) -> dict[str, str]:
|
|
1249
|
+
"""Get the base environment variables."""
|
|
1250
|
+
return {
|
|
1251
|
+
"HOME": os.environ.get("HOME", ""),
|
|
1252
|
+
"USER": os.environ.get("USER", ""),
|
|
1253
|
+
"SHELL": os.environ.get("SHELL", "/bin/bash"),
|
|
1254
|
+
"LANG": os.environ.get("LANG", "en_US.UTF-8"),
|
|
1255
|
+
"LC_ALL": os.environ.get("LC_ALL", ""),
|
|
1256
|
+
"TERM": os.environ.get("TERM", "xterm-256color"),
|
|
1257
|
+
}
|
|
1258
|
+
|
|
1259
|
+
def _update_path(self, clean_env: dict[str, str]) -> None:
|
|
1260
|
+
"""Update the PATH environment variable."""
|
|
1261
|
+
system_path = os.environ.get("PATH", "")
|
|
1262
|
+
if system_path:
|
|
1263
|
+
venv_bin = str(Path(self.pkg_path) / ".venv" / "bin")
|
|
1264
|
+
path_parts = [p for p in system_path.split(os.pathsep) if p != venv_bin]
|
|
1265
|
+
clean_env["PATH"] = os.pathsep.join(path_parts)
|
|
1266
|
+
|
|
1267
|
+
def _get_python_vars_to_exclude(self) -> set[str]:
|
|
1268
|
+
"""Get the set of Python variables to exclude."""
|
|
1269
|
+
return {
|
|
1270
|
+
"VIRTUAL_ENV",
|
|
1271
|
+
"PYTHONPATH",
|
|
1272
|
+
"PYTHON_PATH",
|
|
1273
|
+
"PIP_CONFIG_FILE",
|
|
1274
|
+
"PYTHONHOME",
|
|
1275
|
+
"CONDA_DEFAULT_ENV",
|
|
1276
|
+
"PIPENV_ACTIVE",
|
|
1277
|
+
"POETRY_ACTIVE",
|
|
1278
|
+
}
|
|
1279
|
+
|
|
1280
|
+
def _print_summary(
|
|
1281
|
+
self,
|
|
1282
|
+
strategy: HookStrategy,
|
|
1283
|
+
results: list[HookResult],
|
|
1284
|
+
success: bool,
|
|
1285
|
+
performance_gain: float,
|
|
1286
|
+
) -> None:
|
|
1287
|
+
if success:
|
|
1288
|
+
mode = "async" if self.is_concurrent(strategy) else "sequential"
|
|
1289
|
+
self.console.print(
|
|
1290
|
+
f"[green]✅[/green] {strategy.name.title()} hooks passed: {len(results)} / {len(results)} "
|
|
1291
|
+
f"({mode}, {performance_gain:.1f}% faster)",
|
|
1292
|
+
)
|
|
1293
|
+
|
|
1294
|
+
def is_concurrent(self, strategy: HookStrategy) -> bool:
|
|
1295
|
+
return strategy.parallel and len(strategy.hooks) > 1
|