crackerjack 0.18.2__py3-none-any.whl → 0.45.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- crackerjack/README.md +19 -0
- crackerjack/__init__.py +96 -2
- crackerjack/__main__.py +637 -138
- crackerjack/adapters/README.md +18 -0
- crackerjack/adapters/__init__.py +39 -0
- crackerjack/adapters/_output_paths.py +167 -0
- crackerjack/adapters/_qa_adapter_base.py +309 -0
- crackerjack/adapters/_tool_adapter_base.py +706 -0
- crackerjack/adapters/ai/README.md +65 -0
- crackerjack/adapters/ai/__init__.py +5 -0
- crackerjack/adapters/ai/claude.py +853 -0
- crackerjack/adapters/complexity/README.md +53 -0
- crackerjack/adapters/complexity/__init__.py +10 -0
- crackerjack/adapters/complexity/complexipy.py +641 -0
- crackerjack/adapters/dependency/__init__.py +22 -0
- crackerjack/adapters/dependency/pip_audit.py +418 -0
- crackerjack/adapters/format/README.md +72 -0
- crackerjack/adapters/format/__init__.py +11 -0
- crackerjack/adapters/format/mdformat.py +313 -0
- crackerjack/adapters/format/ruff.py +516 -0
- crackerjack/adapters/lint/README.md +47 -0
- crackerjack/adapters/lint/__init__.py +11 -0
- crackerjack/adapters/lint/codespell.py +273 -0
- crackerjack/adapters/lsp/README.md +49 -0
- crackerjack/adapters/lsp/__init__.py +27 -0
- crackerjack/adapters/lsp/_base.py +194 -0
- crackerjack/adapters/lsp/_client.py +358 -0
- crackerjack/adapters/lsp/_manager.py +193 -0
- crackerjack/adapters/lsp/skylos.py +283 -0
- crackerjack/adapters/lsp/zuban.py +557 -0
- crackerjack/adapters/refactor/README.md +59 -0
- crackerjack/adapters/refactor/__init__.py +12 -0
- crackerjack/adapters/refactor/creosote.py +318 -0
- crackerjack/adapters/refactor/refurb.py +406 -0
- crackerjack/adapters/refactor/skylos.py +494 -0
- crackerjack/adapters/sast/README.md +132 -0
- crackerjack/adapters/sast/__init__.py +32 -0
- crackerjack/adapters/sast/_base.py +201 -0
- crackerjack/adapters/sast/bandit.py +423 -0
- crackerjack/adapters/sast/pyscn.py +405 -0
- crackerjack/adapters/sast/semgrep.py +241 -0
- crackerjack/adapters/security/README.md +111 -0
- crackerjack/adapters/security/__init__.py +17 -0
- crackerjack/adapters/security/gitleaks.py +339 -0
- crackerjack/adapters/type/README.md +52 -0
- crackerjack/adapters/type/__init__.py +12 -0
- crackerjack/adapters/type/pyrefly.py +402 -0
- crackerjack/adapters/type/ty.py +402 -0
- crackerjack/adapters/type/zuban.py +522 -0
- crackerjack/adapters/utility/README.md +51 -0
- crackerjack/adapters/utility/__init__.py +10 -0
- crackerjack/adapters/utility/checks.py +884 -0
- crackerjack/agents/README.md +264 -0
- crackerjack/agents/__init__.py +66 -0
- crackerjack/agents/architect_agent.py +238 -0
- crackerjack/agents/base.py +167 -0
- crackerjack/agents/claude_code_bridge.py +641 -0
- crackerjack/agents/coordinator.py +600 -0
- crackerjack/agents/documentation_agent.py +520 -0
- crackerjack/agents/dry_agent.py +585 -0
- crackerjack/agents/enhanced_coordinator.py +279 -0
- crackerjack/agents/enhanced_proactive_agent.py +185 -0
- crackerjack/agents/error_middleware.py +53 -0
- crackerjack/agents/formatting_agent.py +230 -0
- crackerjack/agents/helpers/__init__.py +9 -0
- crackerjack/agents/helpers/performance/__init__.py +22 -0
- crackerjack/agents/helpers/performance/performance_ast_analyzer.py +357 -0
- crackerjack/agents/helpers/performance/performance_pattern_detector.py +909 -0
- crackerjack/agents/helpers/performance/performance_recommender.py +572 -0
- crackerjack/agents/helpers/refactoring/__init__.py +22 -0
- crackerjack/agents/helpers/refactoring/code_transformer.py +536 -0
- crackerjack/agents/helpers/refactoring/complexity_analyzer.py +344 -0
- crackerjack/agents/helpers/refactoring/dead_code_detector.py +437 -0
- crackerjack/agents/helpers/test_creation/__init__.py +19 -0
- crackerjack/agents/helpers/test_creation/test_ast_analyzer.py +216 -0
- crackerjack/agents/helpers/test_creation/test_coverage_analyzer.py +643 -0
- crackerjack/agents/helpers/test_creation/test_template_generator.py +1031 -0
- crackerjack/agents/import_optimization_agent.py +1181 -0
- crackerjack/agents/performance_agent.py +325 -0
- crackerjack/agents/performance_helpers.py +205 -0
- crackerjack/agents/proactive_agent.py +55 -0
- crackerjack/agents/refactoring_agent.py +511 -0
- crackerjack/agents/refactoring_helpers.py +247 -0
- crackerjack/agents/security_agent.py +793 -0
- crackerjack/agents/semantic_agent.py +479 -0
- crackerjack/agents/semantic_helpers.py +356 -0
- crackerjack/agents/test_creation_agent.py +570 -0
- crackerjack/agents/test_specialist_agent.py +526 -0
- crackerjack/agents/tracker.py +110 -0
- crackerjack/api.py +647 -0
- crackerjack/cli/README.md +394 -0
- crackerjack/cli/__init__.py +24 -0
- crackerjack/cli/cache_handlers.py +209 -0
- crackerjack/cli/cache_handlers_enhanced.py +680 -0
- crackerjack/cli/facade.py +162 -0
- crackerjack/cli/formatting.py +13 -0
- crackerjack/cli/handlers/__init__.py +85 -0
- crackerjack/cli/handlers/advanced.py +103 -0
- crackerjack/cli/handlers/ai_features.py +62 -0
- crackerjack/cli/handlers/analytics.py +479 -0
- crackerjack/cli/handlers/changelog.py +271 -0
- crackerjack/cli/handlers/config_handlers.py +16 -0
- crackerjack/cli/handlers/coverage.py +84 -0
- crackerjack/cli/handlers/documentation.py +280 -0
- crackerjack/cli/handlers/main_handlers.py +497 -0
- crackerjack/cli/handlers/monitoring.py +371 -0
- crackerjack/cli/handlers.py +700 -0
- crackerjack/cli/interactive.py +488 -0
- crackerjack/cli/options.py +1216 -0
- crackerjack/cli/semantic_handlers.py +292 -0
- crackerjack/cli/utils.py +19 -0
- crackerjack/cli/version.py +19 -0
- crackerjack/code_cleaner.py +1307 -0
- crackerjack/config/README.md +472 -0
- crackerjack/config/__init__.py +275 -0
- crackerjack/config/global_lock_config.py +207 -0
- crackerjack/config/hooks.py +390 -0
- crackerjack/config/loader.py +239 -0
- crackerjack/config/settings.py +141 -0
- crackerjack/config/tool_commands.py +331 -0
- crackerjack/core/README.md +393 -0
- crackerjack/core/__init__.py +0 -0
- crackerjack/core/async_workflow_orchestrator.py +738 -0
- crackerjack/core/autofix_coordinator.py +282 -0
- crackerjack/core/container.py +105 -0
- crackerjack/core/enhanced_container.py +583 -0
- crackerjack/core/file_lifecycle.py +472 -0
- crackerjack/core/performance.py +244 -0
- crackerjack/core/performance_monitor.py +357 -0
- crackerjack/core/phase_coordinator.py +1227 -0
- crackerjack/core/proactive_workflow.py +267 -0
- crackerjack/core/resource_manager.py +425 -0
- crackerjack/core/retry.py +275 -0
- crackerjack/core/service_watchdog.py +601 -0
- crackerjack/core/session_coordinator.py +239 -0
- crackerjack/core/timeout_manager.py +563 -0
- crackerjack/core/websocket_lifecycle.py +410 -0
- crackerjack/core/workflow/__init__.py +21 -0
- crackerjack/core/workflow/workflow_ai_coordinator.py +863 -0
- crackerjack/core/workflow/workflow_event_orchestrator.py +1107 -0
- crackerjack/core/workflow/workflow_issue_parser.py +714 -0
- crackerjack/core/workflow/workflow_phase_executor.py +1158 -0
- crackerjack/core/workflow/workflow_security_gates.py +400 -0
- crackerjack/core/workflow_orchestrator.py +2243 -0
- crackerjack/data/README.md +11 -0
- crackerjack/data/__init__.py +8 -0
- crackerjack/data/models.py +79 -0
- crackerjack/data/repository.py +210 -0
- crackerjack/decorators/README.md +180 -0
- crackerjack/decorators/__init__.py +35 -0
- crackerjack/decorators/error_handling.py +649 -0
- crackerjack/decorators/error_handling_decorators.py +334 -0
- crackerjack/decorators/helpers.py +58 -0
- crackerjack/decorators/patterns.py +281 -0
- crackerjack/decorators/utils.py +58 -0
- crackerjack/docs/INDEX.md +11 -0
- crackerjack/docs/README.md +11 -0
- crackerjack/docs/generated/api/API_REFERENCE.md +10895 -0
- crackerjack/docs/generated/api/CLI_REFERENCE.md +109 -0
- crackerjack/docs/generated/api/CROSS_REFERENCES.md +1755 -0
- crackerjack/docs/generated/api/PROTOCOLS.md +3 -0
- crackerjack/docs/generated/api/SERVICES.md +1252 -0
- crackerjack/documentation/README.md +11 -0
- crackerjack/documentation/__init__.py +31 -0
- crackerjack/documentation/ai_templates.py +756 -0
- crackerjack/documentation/dual_output_generator.py +767 -0
- crackerjack/documentation/mkdocs_integration.py +518 -0
- crackerjack/documentation/reference_generator.py +1065 -0
- crackerjack/dynamic_config.py +678 -0
- crackerjack/errors.py +378 -0
- crackerjack/events/README.md +11 -0
- crackerjack/events/__init__.py +16 -0
- crackerjack/events/telemetry.py +175 -0
- crackerjack/events/workflow_bus.py +346 -0
- crackerjack/exceptions/README.md +301 -0
- crackerjack/exceptions/__init__.py +5 -0
- crackerjack/exceptions/config.py +4 -0
- crackerjack/exceptions/tool_execution_error.py +245 -0
- crackerjack/executors/README.md +591 -0
- crackerjack/executors/__init__.py +13 -0
- crackerjack/executors/async_hook_executor.py +938 -0
- crackerjack/executors/cached_hook_executor.py +316 -0
- crackerjack/executors/hook_executor.py +1295 -0
- crackerjack/executors/hook_lock_manager.py +708 -0
- crackerjack/executors/individual_hook_executor.py +739 -0
- crackerjack/executors/lsp_aware_hook_executor.py +349 -0
- crackerjack/executors/progress_hook_executor.py +282 -0
- crackerjack/executors/tool_proxy.py +433 -0
- crackerjack/hooks/README.md +485 -0
- crackerjack/hooks/lsp_hook.py +93 -0
- crackerjack/intelligence/README.md +557 -0
- crackerjack/intelligence/__init__.py +37 -0
- crackerjack/intelligence/adaptive_learning.py +693 -0
- crackerjack/intelligence/agent_orchestrator.py +485 -0
- crackerjack/intelligence/agent_registry.py +377 -0
- crackerjack/intelligence/agent_selector.py +439 -0
- crackerjack/intelligence/integration.py +250 -0
- crackerjack/interactive.py +719 -0
- crackerjack/managers/README.md +369 -0
- crackerjack/managers/__init__.py +11 -0
- crackerjack/managers/async_hook_manager.py +135 -0
- crackerjack/managers/hook_manager.py +585 -0
- crackerjack/managers/publish_manager.py +631 -0
- crackerjack/managers/test_command_builder.py +391 -0
- crackerjack/managers/test_executor.py +474 -0
- crackerjack/managers/test_manager.py +1357 -0
- crackerjack/managers/test_progress.py +187 -0
- crackerjack/mcp/README.md +374 -0
- crackerjack/mcp/__init__.py +0 -0
- crackerjack/mcp/cache.py +352 -0
- crackerjack/mcp/client_runner.py +121 -0
- crackerjack/mcp/context.py +802 -0
- crackerjack/mcp/dashboard.py +657 -0
- crackerjack/mcp/enhanced_progress_monitor.py +493 -0
- crackerjack/mcp/file_monitor.py +394 -0
- crackerjack/mcp/progress_components.py +607 -0
- crackerjack/mcp/progress_monitor.py +1016 -0
- crackerjack/mcp/rate_limiter.py +336 -0
- crackerjack/mcp/server.py +24 -0
- crackerjack/mcp/server_core.py +526 -0
- crackerjack/mcp/service_watchdog.py +505 -0
- crackerjack/mcp/state.py +407 -0
- crackerjack/mcp/task_manager.py +259 -0
- crackerjack/mcp/tools/README.md +27 -0
- crackerjack/mcp/tools/__init__.py +19 -0
- crackerjack/mcp/tools/core_tools.py +469 -0
- crackerjack/mcp/tools/error_analyzer.py +283 -0
- crackerjack/mcp/tools/execution_tools.py +384 -0
- crackerjack/mcp/tools/intelligence_tool_registry.py +46 -0
- crackerjack/mcp/tools/intelligence_tools.py +264 -0
- crackerjack/mcp/tools/monitoring_tools.py +628 -0
- crackerjack/mcp/tools/proactive_tools.py +367 -0
- crackerjack/mcp/tools/progress_tools.py +222 -0
- crackerjack/mcp/tools/semantic_tools.py +584 -0
- crackerjack/mcp/tools/utility_tools.py +358 -0
- crackerjack/mcp/tools/workflow_executor.py +699 -0
- crackerjack/mcp/websocket/README.md +31 -0
- crackerjack/mcp/websocket/__init__.py +14 -0
- crackerjack/mcp/websocket/app.py +54 -0
- crackerjack/mcp/websocket/endpoints.py +492 -0
- crackerjack/mcp/websocket/event_bridge.py +188 -0
- crackerjack/mcp/websocket/jobs.py +406 -0
- crackerjack/mcp/websocket/monitoring/__init__.py +25 -0
- crackerjack/mcp/websocket/monitoring/api/__init__.py +19 -0
- crackerjack/mcp/websocket/monitoring/api/dependencies.py +141 -0
- crackerjack/mcp/websocket/monitoring/api/heatmap.py +154 -0
- crackerjack/mcp/websocket/monitoring/api/intelligence.py +199 -0
- crackerjack/mcp/websocket/monitoring/api/metrics.py +203 -0
- crackerjack/mcp/websocket/monitoring/api/telemetry.py +101 -0
- crackerjack/mcp/websocket/monitoring/dashboard.py +18 -0
- crackerjack/mcp/websocket/monitoring/factory.py +109 -0
- crackerjack/mcp/websocket/monitoring/filters.py +10 -0
- crackerjack/mcp/websocket/monitoring/metrics.py +64 -0
- crackerjack/mcp/websocket/monitoring/models.py +90 -0
- crackerjack/mcp/websocket/monitoring/utils.py +171 -0
- crackerjack/mcp/websocket/monitoring/websocket_manager.py +78 -0
- crackerjack/mcp/websocket/monitoring/websockets/__init__.py +17 -0
- crackerjack/mcp/websocket/monitoring/websockets/dependencies.py +126 -0
- crackerjack/mcp/websocket/monitoring/websockets/heatmap.py +176 -0
- crackerjack/mcp/websocket/monitoring/websockets/intelligence.py +291 -0
- crackerjack/mcp/websocket/monitoring/websockets/metrics.py +291 -0
- crackerjack/mcp/websocket/monitoring_endpoints.py +21 -0
- crackerjack/mcp/websocket/server.py +174 -0
- crackerjack/mcp/websocket/websocket_handler.py +276 -0
- crackerjack/mcp/websocket_server.py +10 -0
- crackerjack/models/README.md +308 -0
- crackerjack/models/__init__.py +40 -0
- crackerjack/models/config.py +730 -0
- crackerjack/models/config_adapter.py +265 -0
- crackerjack/models/protocols.py +1535 -0
- crackerjack/models/pydantic_models.py +320 -0
- crackerjack/models/qa_config.py +145 -0
- crackerjack/models/qa_results.py +134 -0
- crackerjack/models/resource_protocols.py +299 -0
- crackerjack/models/results.py +35 -0
- crackerjack/models/semantic_models.py +258 -0
- crackerjack/models/task.py +173 -0
- crackerjack/models/test_models.py +60 -0
- crackerjack/monitoring/README.md +11 -0
- crackerjack/monitoring/__init__.py +0 -0
- crackerjack/monitoring/ai_agent_watchdog.py +405 -0
- crackerjack/monitoring/metrics_collector.py +427 -0
- crackerjack/monitoring/regression_prevention.py +580 -0
- crackerjack/monitoring/websocket_server.py +406 -0
- crackerjack/orchestration/README.md +340 -0
- crackerjack/orchestration/__init__.py +43 -0
- crackerjack/orchestration/advanced_orchestrator.py +894 -0
- crackerjack/orchestration/cache/README.md +312 -0
- crackerjack/orchestration/cache/__init__.py +37 -0
- crackerjack/orchestration/cache/memory_cache.py +338 -0
- crackerjack/orchestration/cache/tool_proxy_cache.py +340 -0
- crackerjack/orchestration/config.py +297 -0
- crackerjack/orchestration/coverage_improvement.py +180 -0
- crackerjack/orchestration/execution_strategies.py +361 -0
- crackerjack/orchestration/hook_orchestrator.py +1398 -0
- crackerjack/orchestration/strategies/README.md +401 -0
- crackerjack/orchestration/strategies/__init__.py +39 -0
- crackerjack/orchestration/strategies/adaptive_strategy.py +630 -0
- crackerjack/orchestration/strategies/parallel_strategy.py +237 -0
- crackerjack/orchestration/strategies/sequential_strategy.py +299 -0
- crackerjack/orchestration/test_progress_streamer.py +647 -0
- crackerjack/plugins/README.md +11 -0
- crackerjack/plugins/__init__.py +15 -0
- crackerjack/plugins/base.py +200 -0
- crackerjack/plugins/hooks.py +254 -0
- crackerjack/plugins/loader.py +335 -0
- crackerjack/plugins/managers.py +264 -0
- crackerjack/py313.py +191 -0
- crackerjack/security/README.md +11 -0
- crackerjack/security/__init__.py +0 -0
- crackerjack/security/audit.py +197 -0
- crackerjack/services/README.md +374 -0
- crackerjack/services/__init__.py +9 -0
- crackerjack/services/ai/README.md +295 -0
- crackerjack/services/ai/__init__.py +7 -0
- crackerjack/services/ai/advanced_optimizer.py +878 -0
- crackerjack/services/ai/contextual_ai_assistant.py +542 -0
- crackerjack/services/ai/embeddings.py +444 -0
- crackerjack/services/ai/intelligent_commit.py +328 -0
- crackerjack/services/ai/predictive_analytics.py +510 -0
- crackerjack/services/anomaly_detector.py +392 -0
- crackerjack/services/api_extractor.py +617 -0
- crackerjack/services/backup_service.py +467 -0
- crackerjack/services/bounded_status_operations.py +530 -0
- crackerjack/services/cache.py +369 -0
- crackerjack/services/changelog_automation.py +399 -0
- crackerjack/services/command_execution_service.py +305 -0
- crackerjack/services/config_integrity.py +132 -0
- crackerjack/services/config_merge.py +546 -0
- crackerjack/services/config_service.py +198 -0
- crackerjack/services/config_template.py +493 -0
- crackerjack/services/coverage_badge_service.py +173 -0
- crackerjack/services/coverage_ratchet.py +381 -0
- crackerjack/services/debug.py +733 -0
- crackerjack/services/dependency_analyzer.py +460 -0
- crackerjack/services/dependency_monitor.py +622 -0
- crackerjack/services/documentation_generator.py +493 -0
- crackerjack/services/documentation_service.py +704 -0
- crackerjack/services/enhanced_filesystem.py +497 -0
- crackerjack/services/enterprise_optimizer.py +865 -0
- crackerjack/services/error_pattern_analyzer.py +676 -0
- crackerjack/services/file_filter.py +221 -0
- crackerjack/services/file_hasher.py +149 -0
- crackerjack/services/file_io_service.py +361 -0
- crackerjack/services/file_modifier.py +615 -0
- crackerjack/services/filesystem.py +381 -0
- crackerjack/services/git.py +422 -0
- crackerjack/services/health_metrics.py +615 -0
- crackerjack/services/heatmap_generator.py +744 -0
- crackerjack/services/incremental_executor.py +380 -0
- crackerjack/services/initialization.py +823 -0
- crackerjack/services/input_validator.py +668 -0
- crackerjack/services/intelligent_commit.py +327 -0
- crackerjack/services/log_manager.py +289 -0
- crackerjack/services/logging.py +228 -0
- crackerjack/services/lsp_client.py +628 -0
- crackerjack/services/memory_optimizer.py +414 -0
- crackerjack/services/metrics.py +587 -0
- crackerjack/services/monitoring/README.md +30 -0
- crackerjack/services/monitoring/__init__.py +9 -0
- crackerjack/services/monitoring/dependency_monitor.py +678 -0
- crackerjack/services/monitoring/error_pattern_analyzer.py +676 -0
- crackerjack/services/monitoring/health_metrics.py +716 -0
- crackerjack/services/monitoring/metrics.py +587 -0
- crackerjack/services/monitoring/performance_benchmarks.py +410 -0
- crackerjack/services/monitoring/performance_cache.py +388 -0
- crackerjack/services/monitoring/performance_monitor.py +569 -0
- crackerjack/services/parallel_executor.py +527 -0
- crackerjack/services/pattern_cache.py +333 -0
- crackerjack/services/pattern_detector.py +478 -0
- crackerjack/services/patterns/__init__.py +142 -0
- crackerjack/services/patterns/agents.py +107 -0
- crackerjack/services/patterns/code/__init__.py +15 -0
- crackerjack/services/patterns/code/detection.py +118 -0
- crackerjack/services/patterns/code/imports.py +107 -0
- crackerjack/services/patterns/code/paths.py +159 -0
- crackerjack/services/patterns/code/performance.py +119 -0
- crackerjack/services/patterns/code/replacement.py +36 -0
- crackerjack/services/patterns/core.py +212 -0
- crackerjack/services/patterns/documentation/__init__.py +14 -0
- crackerjack/services/patterns/documentation/badges_markdown.py +96 -0
- crackerjack/services/patterns/documentation/comments_blocks.py +83 -0
- crackerjack/services/patterns/documentation/docstrings.py +89 -0
- crackerjack/services/patterns/formatting.py +226 -0
- crackerjack/services/patterns/operations.py +339 -0
- crackerjack/services/patterns/security/__init__.py +23 -0
- crackerjack/services/patterns/security/code_injection.py +122 -0
- crackerjack/services/patterns/security/credentials.py +190 -0
- crackerjack/services/patterns/security/path_traversal.py +221 -0
- crackerjack/services/patterns/security/unsafe_operations.py +216 -0
- crackerjack/services/patterns/templates.py +62 -0
- crackerjack/services/patterns/testing/__init__.py +18 -0
- crackerjack/services/patterns/testing/error_patterns.py +107 -0
- crackerjack/services/patterns/testing/pytest_output.py +126 -0
- crackerjack/services/patterns/tool_output/__init__.py +16 -0
- crackerjack/services/patterns/tool_output/bandit.py +72 -0
- crackerjack/services/patterns/tool_output/other.py +97 -0
- crackerjack/services/patterns/tool_output/pyright.py +67 -0
- crackerjack/services/patterns/tool_output/ruff.py +44 -0
- crackerjack/services/patterns/url_sanitization.py +114 -0
- crackerjack/services/patterns/utilities.py +42 -0
- crackerjack/services/patterns/utils.py +339 -0
- crackerjack/services/patterns/validation.py +46 -0
- crackerjack/services/patterns/versioning.py +62 -0
- crackerjack/services/predictive_analytics.py +523 -0
- crackerjack/services/profiler.py +280 -0
- crackerjack/services/quality/README.md +415 -0
- crackerjack/services/quality/__init__.py +11 -0
- crackerjack/services/quality/anomaly_detector.py +392 -0
- crackerjack/services/quality/pattern_cache.py +333 -0
- crackerjack/services/quality/pattern_detector.py +479 -0
- crackerjack/services/quality/qa_orchestrator.py +491 -0
- crackerjack/services/quality/quality_baseline.py +395 -0
- crackerjack/services/quality/quality_baseline_enhanced.py +649 -0
- crackerjack/services/quality/quality_intelligence.py +949 -0
- crackerjack/services/regex_patterns.py +58 -0
- crackerjack/services/regex_utils.py +483 -0
- crackerjack/services/secure_path_utils.py +524 -0
- crackerjack/services/secure_status_formatter.py +450 -0
- crackerjack/services/secure_subprocess.py +635 -0
- crackerjack/services/security.py +239 -0
- crackerjack/services/security_logger.py +495 -0
- crackerjack/services/server_manager.py +411 -0
- crackerjack/services/smart_scheduling.py +167 -0
- crackerjack/services/status_authentication.py +460 -0
- crackerjack/services/status_security_manager.py +315 -0
- crackerjack/services/terminal_utils.py +0 -0
- crackerjack/services/thread_safe_status_collector.py +441 -0
- crackerjack/services/tool_filter.py +368 -0
- crackerjack/services/tool_version_service.py +43 -0
- crackerjack/services/unified_config.py +115 -0
- crackerjack/services/validation_rate_limiter.py +220 -0
- crackerjack/services/vector_store.py +689 -0
- crackerjack/services/version_analyzer.py +461 -0
- crackerjack/services/version_checker.py +223 -0
- crackerjack/services/websocket_resource_limiter.py +438 -0
- crackerjack/services/zuban_lsp_service.py +391 -0
- crackerjack/slash_commands/README.md +11 -0
- crackerjack/slash_commands/__init__.py +59 -0
- crackerjack/slash_commands/init.md +112 -0
- crackerjack/slash_commands/run.md +197 -0
- crackerjack/slash_commands/status.md +127 -0
- crackerjack/tools/README.md +11 -0
- crackerjack/tools/__init__.py +30 -0
- crackerjack/tools/_git_utils.py +105 -0
- crackerjack/tools/check_added_large_files.py +139 -0
- crackerjack/tools/check_ast.py +105 -0
- crackerjack/tools/check_json.py +103 -0
- crackerjack/tools/check_jsonschema.py +297 -0
- crackerjack/tools/check_toml.py +103 -0
- crackerjack/tools/check_yaml.py +110 -0
- crackerjack/tools/codespell_wrapper.py +72 -0
- crackerjack/tools/end_of_file_fixer.py +202 -0
- crackerjack/tools/format_json.py +128 -0
- crackerjack/tools/mdformat_wrapper.py +114 -0
- crackerjack/tools/trailing_whitespace.py +198 -0
- crackerjack/tools/validate_input_validator_patterns.py +236 -0
- crackerjack/tools/validate_regex_patterns.py +188 -0
- crackerjack/ui/README.md +11 -0
- crackerjack/ui/__init__.py +1 -0
- crackerjack/ui/dashboard_renderer.py +28 -0
- crackerjack/ui/templates/README.md +11 -0
- crackerjack/utils/console_utils.py +13 -0
- crackerjack/utils/dependency_guard.py +230 -0
- crackerjack/utils/retry_utils.py +275 -0
- crackerjack/workflows/README.md +590 -0
- crackerjack/workflows/__init__.py +46 -0
- crackerjack/workflows/actions.py +811 -0
- crackerjack/workflows/auto_fix.py +444 -0
- crackerjack/workflows/container_builder.py +499 -0
- crackerjack/workflows/definitions.py +443 -0
- crackerjack/workflows/engine.py +177 -0
- crackerjack/workflows/event_bridge.py +242 -0
- crackerjack-0.45.2.dist-info/METADATA +1678 -0
- crackerjack-0.45.2.dist-info/RECORD +478 -0
- {crackerjack-0.18.2.dist-info → crackerjack-0.45.2.dist-info}/WHEEL +1 -1
- crackerjack-0.45.2.dist-info/entry_points.txt +2 -0
- crackerjack/.gitignore +0 -14
- crackerjack/.libcst.codemod.yaml +0 -18
- crackerjack/.pdm.toml +0 -1
- crackerjack/.pre-commit-config.yaml +0 -91
- crackerjack/.pytest_cache/.gitignore +0 -2
- crackerjack/.pytest_cache/CACHEDIR.TAG +0 -4
- crackerjack/.pytest_cache/README.md +0 -8
- crackerjack/.pytest_cache/v/cache/nodeids +0 -1
- crackerjack/.pytest_cache/v/cache/stepwise +0 -1
- crackerjack/.ruff_cache/.gitignore +0 -1
- crackerjack/.ruff_cache/0.1.11/3256171999636029978 +0 -0
- crackerjack/.ruff_cache/0.1.14/602324811142551221 +0 -0
- crackerjack/.ruff_cache/0.1.4/10355199064880463147 +0 -0
- crackerjack/.ruff_cache/0.1.6/15140459877605758699 +0 -0
- crackerjack/.ruff_cache/0.1.7/1790508110482614856 +0 -0
- crackerjack/.ruff_cache/0.1.9/17041001205004563469 +0 -0
- crackerjack/.ruff_cache/0.11.2/4070660268492669020 +0 -0
- crackerjack/.ruff_cache/0.11.3/9818742842212983150 +0 -0
- crackerjack/.ruff_cache/0.11.4/9818742842212983150 +0 -0
- crackerjack/.ruff_cache/0.11.6/3557596832929915217 +0 -0
- crackerjack/.ruff_cache/0.11.7/10386934055395314831 +0 -0
- crackerjack/.ruff_cache/0.11.7/3557596832929915217 +0 -0
- crackerjack/.ruff_cache/0.11.8/530407680854991027 +0 -0
- crackerjack/.ruff_cache/0.2.0/10047773857155985907 +0 -0
- crackerjack/.ruff_cache/0.2.1/8522267973936635051 +0 -0
- crackerjack/.ruff_cache/0.2.2/18053836298936336950 +0 -0
- crackerjack/.ruff_cache/0.3.0/12548816621480535786 +0 -0
- crackerjack/.ruff_cache/0.3.3/11081883392474770722 +0 -0
- crackerjack/.ruff_cache/0.3.4/676973378459347183 +0 -0
- crackerjack/.ruff_cache/0.3.5/16311176246009842383 +0 -0
- crackerjack/.ruff_cache/0.5.7/1493622539551733492 +0 -0
- crackerjack/.ruff_cache/0.5.7/6231957614044513175 +0 -0
- crackerjack/.ruff_cache/0.5.7/9932762556785938009 +0 -0
- crackerjack/.ruff_cache/0.6.0/11982804814124138945 +0 -0
- crackerjack/.ruff_cache/0.6.0/12055761203849489982 +0 -0
- crackerjack/.ruff_cache/0.6.2/1206147804896221174 +0 -0
- crackerjack/.ruff_cache/0.6.4/1206147804896221174 +0 -0
- crackerjack/.ruff_cache/0.6.5/1206147804896221174 +0 -0
- crackerjack/.ruff_cache/0.6.7/3657366982708166874 +0 -0
- crackerjack/.ruff_cache/0.6.9/285614542852677309 +0 -0
- crackerjack/.ruff_cache/0.7.1/1024065805990144819 +0 -0
- crackerjack/.ruff_cache/0.7.1/285614542852677309 +0 -0
- crackerjack/.ruff_cache/0.7.3/16061516852537040135 +0 -0
- crackerjack/.ruff_cache/0.8.4/16354268377385700367 +0 -0
- crackerjack/.ruff_cache/0.9.10/12813592349865671909 +0 -0
- crackerjack/.ruff_cache/0.9.10/923908772239632759 +0 -0
- crackerjack/.ruff_cache/0.9.3/13948373885254993391 +0 -0
- crackerjack/.ruff_cache/0.9.9/12813592349865671909 +0 -0
- crackerjack/.ruff_cache/0.9.9/8843823720003377982 +0 -0
- crackerjack/.ruff_cache/CACHEDIR.TAG +0 -1
- crackerjack/crackerjack.py +0 -855
- crackerjack/pyproject.toml +0 -214
- crackerjack-0.18.2.dist-info/METADATA +0 -420
- crackerjack-0.18.2.dist-info/RECORD +0 -59
- crackerjack-0.18.2.dist-info/entry_points.txt +0 -4
- {crackerjack-0.18.2.dist-info → crackerjack-0.45.2.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,1398 @@
|
|
|
1
|
+
"""Hook Orchestrator for ACB integration.
|
|
2
|
+
|
|
3
|
+
ACB-powered orchestration layer managing hook lifecycle, dependency resolution,
|
|
4
|
+
and execution strategies. Supports dual execution modes for gradual migration.
|
|
5
|
+
|
|
6
|
+
ACB Patterns:
|
|
7
|
+
- MODULE_ID and MODULE_STATUS at module level
|
|
8
|
+
- depends.set() registration after class definition
|
|
9
|
+
- Structured logging with context fields
|
|
10
|
+
- Protocol-based interfaces
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from __future__ import annotations
|
|
14
|
+
|
|
15
|
+
import asyncio
|
|
16
|
+
import logging
|
|
17
|
+
import typing as t
|
|
18
|
+
from collections import Counter
|
|
19
|
+
from contextlib import suppress
|
|
20
|
+
from typing import cast
|
|
21
|
+
from uuid import UUID
|
|
22
|
+
|
|
23
|
+
from acb.depends import depends
|
|
24
|
+
from pydantic import BaseModel, Field
|
|
25
|
+
|
|
26
|
+
from crackerjack.config.hooks import HookDefinition, HookStrategy
|
|
27
|
+
from crackerjack.events import WorkflowEvent, WorkflowEventBus
|
|
28
|
+
from crackerjack.models.qa_results import QAResultStatus
|
|
29
|
+
from crackerjack.models.task import HookResult
|
|
30
|
+
|
|
31
|
+
if t.TYPE_CHECKING:
|
|
32
|
+
from crackerjack.executors.hook_executor import HookExecutor
|
|
33
|
+
from crackerjack.orchestration.cache.memory_cache import MemoryCacheAdapter
|
|
34
|
+
from crackerjack.orchestration.cache.tool_proxy_cache import ToolProxyCacheAdapter
|
|
35
|
+
from crackerjack.orchestration.execution_strategies import ExecutionContext
|
|
36
|
+
|
|
37
|
+
# ACB Module Registration (REQUIRED)
|
|
38
|
+
MODULE_ID = UUID(
|
|
39
|
+
"01937d86-ace0-7000-8000-000000000003"
|
|
40
|
+
) # Static UUID7 for reproducible module identity
|
|
41
|
+
MODULE_STATUS = "stable"
|
|
42
|
+
|
|
43
|
+
# Module-level logger for structured logging
|
|
44
|
+
logger = logging.getLogger(__name__)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class HookOrchestratorSettings(BaseModel):
|
|
48
|
+
"""Settings for hook orchestration."""
|
|
49
|
+
|
|
50
|
+
max_parallel_hooks: int = Field(default=3, ge=1, le=10)
|
|
51
|
+
default_timeout: int = Field(default=300, ge=30, le=1800)
|
|
52
|
+
enable_caching: bool = True
|
|
53
|
+
enable_dependency_resolution: bool = True
|
|
54
|
+
retry_on_failure: bool = False
|
|
55
|
+
cache_backend: str = Field(
|
|
56
|
+
default="tool_proxy", pattern="^(tool_proxy|redis|memory)$"
|
|
57
|
+
)
|
|
58
|
+
execution_mode: str = Field(default="acb", pattern="^(legacy|acb)$")
|
|
59
|
+
# Phase 5-7: Triple parallelism settings
|
|
60
|
+
enable_adaptive_execution: bool = True # Use adaptive strategy (dependency-aware)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class HookOrchestratorAdapter:
|
|
64
|
+
"""ACB-powered hook orchestration layer.
|
|
65
|
+
|
|
66
|
+
Manages hook lifecycle, dependency resolution, and execution strategies.
|
|
67
|
+
Supports dual execution mode: pre-commit CLI (legacy) and direct adapters (ACB).
|
|
68
|
+
|
|
69
|
+
Features:
|
|
70
|
+
- Async parallel execution with resource limits
|
|
71
|
+
- Dependency resolution between hooks
|
|
72
|
+
- Content-based caching integration
|
|
73
|
+
- Dual execution mode for gradual migration
|
|
74
|
+
- Comprehensive structured logging
|
|
75
|
+
|
|
76
|
+
Architecture:
|
|
77
|
+
- Legacy mode (Phase 3-7): Delegates to existing HookExecutor
|
|
78
|
+
- ACB mode (Phase 8+): Direct adapter.check() calls via depends.get()
|
|
79
|
+
|
|
80
|
+
Example:
|
|
81
|
+
```python
|
|
82
|
+
from acb.depends import depends
|
|
83
|
+
from crackerjack.orchestration import HookOrchestratorAdapter
|
|
84
|
+
|
|
85
|
+
# Initialize orchestrator
|
|
86
|
+
orchestrator = await depends.get(HookOrchestratorAdapter)
|
|
87
|
+
await orchestrator.init()
|
|
88
|
+
|
|
89
|
+
# Execute strategy (legacy mode during Phase 3-7)
|
|
90
|
+
results = await orchestrator.execute_strategy(
|
|
91
|
+
strategy=fast_strategy, execution_mode="legacy"
|
|
92
|
+
)
|
|
93
|
+
```
|
|
94
|
+
"""
|
|
95
|
+
|
|
96
|
+
def __init__(
|
|
97
|
+
self,
|
|
98
|
+
settings: HookOrchestratorSettings | None = None,
|
|
99
|
+
hook_executor: HookExecutor | None = None,
|
|
100
|
+
cache_adapter: ToolProxyCacheAdapter | MemoryCacheAdapter | None = None,
|
|
101
|
+
event_bus: WorkflowEventBus | None = None,
|
|
102
|
+
execution_context: ExecutionContext | None = None,
|
|
103
|
+
) -> None:
|
|
104
|
+
"""Initialize Hook Orchestrator.
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
settings: Optional settings override
|
|
108
|
+
hook_executor: Optional HookExecutor for legacy mode delegation
|
|
109
|
+
cache_adapter: Optional cache adapter (auto-selected from settings.cache_backend if not provided)
|
|
110
|
+
execution_context: Optional execution context for adapters that need it
|
|
111
|
+
"""
|
|
112
|
+
self.settings = settings or HookOrchestratorSettings()
|
|
113
|
+
self._hook_executor = hook_executor
|
|
114
|
+
self._cache_adapter = cache_adapter
|
|
115
|
+
self._dependency_graph: dict[str, list[str]] = {}
|
|
116
|
+
self.execution_context = execution_context
|
|
117
|
+
self._initialized = False
|
|
118
|
+
self._cache_hits = 0
|
|
119
|
+
self._cache_misses = 0
|
|
120
|
+
self._event_bus = event_bus or self._resolve_event_bus()
|
|
121
|
+
|
|
122
|
+
logger.debug(
|
|
123
|
+
"HookOrchestratorAdapter initialized",
|
|
124
|
+
extra={
|
|
125
|
+
"has_settings": settings is not None,
|
|
126
|
+
"has_executor": hook_executor is not None,
|
|
127
|
+
"has_cache": cache_adapter is not None,
|
|
128
|
+
},
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
@staticmethod
|
|
132
|
+
def _resolve_event_bus() -> WorkflowEventBus | None:
|
|
133
|
+
"""Resolve workflow event bus from dependency injection."""
|
|
134
|
+
try:
|
|
135
|
+
return cast(WorkflowEventBus, depends.get_sync(WorkflowEventBus))
|
|
136
|
+
except Exception:
|
|
137
|
+
logger.debug("Workflow event bus not available during orchestrator setup")
|
|
138
|
+
return None
|
|
139
|
+
|
|
140
|
+
async def init(self) -> None:
|
|
141
|
+
"""Initialize orchestrator and build dependency graph."""
|
|
142
|
+
if self._initialized:
|
|
143
|
+
logger.debug("HookOrchestratorAdapter already initialized")
|
|
144
|
+
return
|
|
145
|
+
|
|
146
|
+
# Build dependency graph for hook execution order
|
|
147
|
+
self._build_dependency_graph()
|
|
148
|
+
|
|
149
|
+
# Initialize cache adapter if caching enabled
|
|
150
|
+
if self.settings.enable_caching and not self._cache_adapter:
|
|
151
|
+
logger.debug(
|
|
152
|
+
"Initializing cache adapter",
|
|
153
|
+
extra={"cache_backend": self.settings.cache_backend},
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
# Auto-select cache backend
|
|
157
|
+
if self.settings.cache_backend == "tool_proxy":
|
|
158
|
+
from crackerjack.orchestration.cache.tool_proxy_cache import (
|
|
159
|
+
ToolProxyCacheAdapter,
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
self._cache_adapter = ToolProxyCacheAdapter()
|
|
163
|
+
elif self.settings.cache_backend == "memory":
|
|
164
|
+
from crackerjack.orchestration.cache.memory_cache import (
|
|
165
|
+
MemoryCacheAdapter,
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
self._cache_adapter = MemoryCacheAdapter()
|
|
169
|
+
else:
|
|
170
|
+
logger.warning(
|
|
171
|
+
f"Unknown cache backend: {self.settings.cache_backend}, disabling caching"
|
|
172
|
+
)
|
|
173
|
+
self.settings.enable_caching = False
|
|
174
|
+
|
|
175
|
+
# Initialize cache if provided
|
|
176
|
+
if self._cache_adapter:
|
|
177
|
+
await self._cache_adapter.init()
|
|
178
|
+
logger.debug("Cache adapter initialized")
|
|
179
|
+
|
|
180
|
+
self._initialized = True
|
|
181
|
+
logger.info(
|
|
182
|
+
"HookOrchestratorAdapter initialization complete",
|
|
183
|
+
extra={
|
|
184
|
+
"max_parallel_hooks": self.settings.max_parallel_hooks,
|
|
185
|
+
"enable_caching": self.settings.enable_caching,
|
|
186
|
+
"enable_dependency_resolution": self.settings.enable_dependency_resolution,
|
|
187
|
+
"execution_mode": self.settings.execution_mode,
|
|
188
|
+
"dependency_count": len(self._dependency_graph),
|
|
189
|
+
"cache_backend": self.settings.cache_backend
|
|
190
|
+
if self.settings.enable_caching
|
|
191
|
+
else "disabled",
|
|
192
|
+
},
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
@property
|
|
196
|
+
def module_id(self) -> UUID:
|
|
197
|
+
"""Reference to module-level MODULE_ID."""
|
|
198
|
+
return MODULE_ID
|
|
199
|
+
|
|
200
|
+
@property
|
|
201
|
+
def adapter_name(self) -> str:
|
|
202
|
+
"""Human-readable adapter name."""
|
|
203
|
+
return "Hook Orchestrator"
|
|
204
|
+
|
|
205
|
+
def _build_dependency_graph(self) -> None:
|
|
206
|
+
"""Build dependency graph for hook execution order.
|
|
207
|
+
|
|
208
|
+
Dependency rules:
|
|
209
|
+
- gitleaks must run before bandit (secrets before security)
|
|
210
|
+
- zuban must run before refurb (types before refactoring)
|
|
211
|
+
- formatting hooks run first (ruff-format, mdformat)
|
|
212
|
+
- validation hooks run early (check-yaml, check-toml)
|
|
213
|
+
"""
|
|
214
|
+
self._dependency_graph = {
|
|
215
|
+
# Gitleaks before security analysis
|
|
216
|
+
"bandit": ["gitleaks"],
|
|
217
|
+
"skylos": ["gitleaks"],
|
|
218
|
+
# Type checking before refactoring
|
|
219
|
+
"refurb": ["zuban"],
|
|
220
|
+
"creosote": ["zuban"],
|
|
221
|
+
# Formatting before linting
|
|
222
|
+
"ruff-check": ["ruff-format"],
|
|
223
|
+
"codespell": ["ruff-format", "mdformat"],
|
|
224
|
+
# Complexity analysis after refactoring
|
|
225
|
+
"complexipy": ["refurb"],
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
logger.debug(
|
|
229
|
+
"Built hook dependency graph",
|
|
230
|
+
extra={
|
|
231
|
+
"dependency_count": len(self._dependency_graph),
|
|
232
|
+
"dependent_hooks": list(self._dependency_graph.keys()),
|
|
233
|
+
},
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
async def execute_strategy(
|
|
237
|
+
self,
|
|
238
|
+
strategy: HookStrategy,
|
|
239
|
+
execution_mode: str | None = None,
|
|
240
|
+
progress_callback: t.Callable[[int, int], None] | None = None,
|
|
241
|
+
progress_start_callback: t.Callable[[int, int], None] | None = None,
|
|
242
|
+
execution_context: ExecutionContext | None = None,
|
|
243
|
+
) -> list[HookResult]:
|
|
244
|
+
"""Execute hook strategy with specified mode.
|
|
245
|
+
|
|
246
|
+
Args:
|
|
247
|
+
strategy: Hook strategy (fast or comprehensive)
|
|
248
|
+
execution_mode: "legacy" (pre-commit CLI) or "acb" (direct adapters)
|
|
249
|
+
Defaults to settings.execution_mode if not specified
|
|
250
|
+
progress_callback: Optional callback(completed, total) for progress updates
|
|
251
|
+
|
|
252
|
+
Returns:
|
|
253
|
+
List of HookResult objects
|
|
254
|
+
|
|
255
|
+
Raises:
|
|
256
|
+
ValueError: If execution_mode is invalid
|
|
257
|
+
RuntimeError: If orchestrator not initialized
|
|
258
|
+
"""
|
|
259
|
+
if not self._initialized:
|
|
260
|
+
raise RuntimeError("HookOrchestrator not initialized. Call init() first.")
|
|
261
|
+
|
|
262
|
+
mode = execution_mode or self.settings.execution_mode
|
|
263
|
+
|
|
264
|
+
logger.info(
|
|
265
|
+
"Executing hook strategy",
|
|
266
|
+
extra={
|
|
267
|
+
"strategy_name": strategy.name,
|
|
268
|
+
"hook_count": len(strategy.hooks),
|
|
269
|
+
"execution_mode": mode,
|
|
270
|
+
"parallel": strategy.parallel,
|
|
271
|
+
"max_workers": strategy.max_workers,
|
|
272
|
+
},
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
await self._publish_event(
|
|
276
|
+
WorkflowEvent.HOOK_STRATEGY_STARTED,
|
|
277
|
+
{
|
|
278
|
+
"strategy": strategy.name,
|
|
279
|
+
"execution_mode": mode,
|
|
280
|
+
"hook_count": len(strategy.hooks),
|
|
281
|
+
},
|
|
282
|
+
)
|
|
283
|
+
|
|
284
|
+
try:
|
|
285
|
+
if mode == "legacy":
|
|
286
|
+
results = await self._execute_legacy_mode(strategy)
|
|
287
|
+
elif mode == "acb":
|
|
288
|
+
results = await self._execute_acb_mode(
|
|
289
|
+
strategy,
|
|
290
|
+
progress_callback,
|
|
291
|
+
progress_start_callback,
|
|
292
|
+
)
|
|
293
|
+
else:
|
|
294
|
+
raise ValueError(
|
|
295
|
+
f"Invalid execution mode: {mode}. Must be 'legacy' or 'acb'"
|
|
296
|
+
)
|
|
297
|
+
except Exception as exc:
|
|
298
|
+
await self._publish_event(
|
|
299
|
+
WorkflowEvent.HOOK_STRATEGY_FAILED,
|
|
300
|
+
{
|
|
301
|
+
"strategy": strategy.name,
|
|
302
|
+
"execution_mode": mode,
|
|
303
|
+
"error": str(exc),
|
|
304
|
+
},
|
|
305
|
+
)
|
|
306
|
+
raise
|
|
307
|
+
|
|
308
|
+
await self._publish_event(
|
|
309
|
+
WorkflowEvent.HOOK_STRATEGY_COMPLETED,
|
|
310
|
+
{
|
|
311
|
+
"strategy": strategy.name,
|
|
312
|
+
"execution_mode": mode,
|
|
313
|
+
"summary": self._summarize_results(results),
|
|
314
|
+
},
|
|
315
|
+
)
|
|
316
|
+
|
|
317
|
+
return results
|
|
318
|
+
|
|
319
|
+
async def _execute_legacy_mode(self, strategy: HookStrategy) -> list[HookResult]:
|
|
320
|
+
"""Execute hooks via pre-commit CLI (existing HookExecutor).
|
|
321
|
+
|
|
322
|
+
This is the bridge to the existing system during Phase 3-7.
|
|
323
|
+
Delegates to HookExecutor which calls pre-commit CLI via subprocess.
|
|
324
|
+
|
|
325
|
+
Args:
|
|
326
|
+
strategy: Hook strategy to execute
|
|
327
|
+
|
|
328
|
+
Returns:
|
|
329
|
+
List of HookResult objects from HookExecutor
|
|
330
|
+
|
|
331
|
+
Raises:
|
|
332
|
+
RuntimeError: If HookExecutor not provided during initialization
|
|
333
|
+
"""
|
|
334
|
+
logger.debug(
|
|
335
|
+
"Using legacy pre-commit execution mode",
|
|
336
|
+
extra={
|
|
337
|
+
"strategy_name": strategy.name,
|
|
338
|
+
"has_executor": self._hook_executor is not None,
|
|
339
|
+
},
|
|
340
|
+
)
|
|
341
|
+
|
|
342
|
+
if not self._hook_executor:
|
|
343
|
+
raise RuntimeError(
|
|
344
|
+
"Legacy mode requires HookExecutor. "
|
|
345
|
+
"Pass hook_executor during initialization or use execution_mode='acb'"
|
|
346
|
+
)
|
|
347
|
+
|
|
348
|
+
# Delegate to existing HookExecutor
|
|
349
|
+
# This maintains full backward compatibility with current system
|
|
350
|
+
execution_result = self._hook_executor.execute_strategy(strategy)
|
|
351
|
+
|
|
352
|
+
logger.info(
|
|
353
|
+
"Legacy mode execution complete",
|
|
354
|
+
extra={
|
|
355
|
+
"strategy_name": strategy.name,
|
|
356
|
+
"total_hooks": len(execution_result.results),
|
|
357
|
+
"passed": execution_result.passed_count,
|
|
358
|
+
"failed": execution_result.failed_count,
|
|
359
|
+
"duration": execution_result.total_duration,
|
|
360
|
+
},
|
|
361
|
+
)
|
|
362
|
+
|
|
363
|
+
return execution_result.results
|
|
364
|
+
|
|
365
|
+
async def _execute_acb_mode(
|
|
366
|
+
self,
|
|
367
|
+
strategy: HookStrategy,
|
|
368
|
+
progress_callback: t.Callable[[int, int], None] | None = None,
|
|
369
|
+
progress_start_callback: t.Callable[[int, int], None] | None = None,
|
|
370
|
+
) -> list[HookResult]:
|
|
371
|
+
"""Execute hooks via direct adapter calls (ACB-powered).
|
|
372
|
+
|
|
373
|
+
This is the target architecture for Phase 8+.
|
|
374
|
+
Calls adapter.check() directly via depends.get() instead of subprocess.
|
|
375
|
+
|
|
376
|
+
Args:
|
|
377
|
+
strategy: Hook strategy to execute
|
|
378
|
+
progress_callback: Optional callback(completed, total) for progress updates
|
|
379
|
+
|
|
380
|
+
Returns:
|
|
381
|
+
List of HookResult objects from direct adapter execution
|
|
382
|
+
"""
|
|
383
|
+
logger.debug(
|
|
384
|
+
"Using ACB direct adapter execution mode",
|
|
385
|
+
extra={
|
|
386
|
+
"strategy_name": strategy.name,
|
|
387
|
+
"enable_adaptive_execution": self.settings.enable_adaptive_execution,
|
|
388
|
+
},
|
|
389
|
+
)
|
|
390
|
+
|
|
391
|
+
# NEW Phase 5-7: Use adaptive strategy for dependency-aware parallel execution
|
|
392
|
+
if self.settings.enable_adaptive_execution:
|
|
393
|
+
from crackerjack.orchestration.strategies.adaptive_strategy import (
|
|
394
|
+
AdaptiveExecutionStrategy,
|
|
395
|
+
)
|
|
396
|
+
|
|
397
|
+
logger.info(
|
|
398
|
+
"Using adaptive execution strategy with dependency-aware batching",
|
|
399
|
+
extra={
|
|
400
|
+
"strategy_name": strategy.name,
|
|
401
|
+
"max_parallel": strategy.max_workers
|
|
402
|
+
or self.settings.max_parallel_hooks,
|
|
403
|
+
"dependency_graph_size": len(self._dependency_graph),
|
|
404
|
+
},
|
|
405
|
+
)
|
|
406
|
+
|
|
407
|
+
execution_strategy = AdaptiveExecutionStrategy(
|
|
408
|
+
dependency_graph=self._dependency_graph,
|
|
409
|
+
max_parallel=strategy.max_workers or self.settings.max_parallel_hooks,
|
|
410
|
+
default_timeout=self.settings.default_timeout,
|
|
411
|
+
stop_on_critical_failure=True,
|
|
412
|
+
)
|
|
413
|
+
|
|
414
|
+
results = await execution_strategy.execute(
|
|
415
|
+
hooks=strategy.hooks,
|
|
416
|
+
executor_callable=self._execute_single_hook,
|
|
417
|
+
progress_callback=progress_callback,
|
|
418
|
+
progress_start_callback=progress_start_callback,
|
|
419
|
+
)
|
|
420
|
+
elif strategy.parallel:
|
|
421
|
+
# Fallback to simple parallel execution without dependency resolution
|
|
422
|
+
results = await self._execute_parallel(strategy.hooks, strategy.max_workers)
|
|
423
|
+
else:
|
|
424
|
+
# Sequential execution
|
|
425
|
+
results = await self._execute_sequential(strategy.hooks)
|
|
426
|
+
|
|
427
|
+
logger.info(
|
|
428
|
+
"ACB mode execution complete",
|
|
429
|
+
extra={
|
|
430
|
+
"strategy_name": strategy.name,
|
|
431
|
+
"total_hooks": len(results),
|
|
432
|
+
"passed": sum(1 for r in results if r.status == "passed"),
|
|
433
|
+
"failed": sum(1 for r in results if r.status == "failed"),
|
|
434
|
+
"errors": sum(1 for r in results if r.status in ("timeout", "error")),
|
|
435
|
+
},
|
|
436
|
+
)
|
|
437
|
+
|
|
438
|
+
return results
|
|
439
|
+
|
|
440
|
+
def _resolve_dependencies(
|
|
441
|
+
self, hooks: list[HookDefinition]
|
|
442
|
+
) -> list[HookDefinition]:
|
|
443
|
+
"""Resolve hook dependencies and return execution order.
|
|
444
|
+
|
|
445
|
+
Uses topological sort to order hooks based on dependency graph.
|
|
446
|
+
|
|
447
|
+
Args:
|
|
448
|
+
hooks: Unordered list of hooks
|
|
449
|
+
|
|
450
|
+
Returns:
|
|
451
|
+
Ordered list of hooks respecting dependencies
|
|
452
|
+
|
|
453
|
+
Algorithm:
|
|
454
|
+
1. Build in-degree map (count of dependencies per hook)
|
|
455
|
+
2. Start with hooks having zero dependencies
|
|
456
|
+
3. Process hooks in layers, removing satisfied dependencies
|
|
457
|
+
4. Hooks without dependencies execute in original order
|
|
458
|
+
"""
|
|
459
|
+
# Build hook name to hook object mapping with original indices
|
|
460
|
+
hook_map = {hook.name: hook for hook in hooks}
|
|
461
|
+
hook_indices = {hook.name: idx for idx, hook in enumerate(hooks)}
|
|
462
|
+
|
|
463
|
+
# Build in-degree map (how many dependencies each hook has)
|
|
464
|
+
# Only count dependencies that are actually present in the hooks list
|
|
465
|
+
in_degree = {hook.name: 0 for hook in hooks}
|
|
466
|
+
for hook_name in hook_map:
|
|
467
|
+
if hook_name in self._dependency_graph:
|
|
468
|
+
# Only count dependencies that are in the current hooks list
|
|
469
|
+
deps_in_list = [
|
|
470
|
+
dep for dep in self._dependency_graph[hook_name] if dep in hook_map
|
|
471
|
+
]
|
|
472
|
+
in_degree[hook_name] = len(deps_in_list)
|
|
473
|
+
|
|
474
|
+
# Queue of hooks ready to execute (zero dependencies)
|
|
475
|
+
# Maintain original order for hooks with same in-degree
|
|
476
|
+
ready_queue = [hook for hook in hooks if in_degree[hook.name] == 0]
|
|
477
|
+
ordered = []
|
|
478
|
+
|
|
479
|
+
# Process hooks in dependency order
|
|
480
|
+
while ready_queue:
|
|
481
|
+
# Take next ready hook (first in original order)
|
|
482
|
+
current_hook = ready_queue.pop(0)
|
|
483
|
+
ordered.append(current_hook)
|
|
484
|
+
|
|
485
|
+
# Update in-degrees for dependent hooks
|
|
486
|
+
for hook_name, deps in self._dependency_graph.items():
|
|
487
|
+
if current_hook.name in deps and hook_name in in_degree:
|
|
488
|
+
in_degree[hook_name] -= 1
|
|
489
|
+
if in_degree[hook_name] == 0 and hook_name in hook_map:
|
|
490
|
+
ready_queue.append(hook_map[hook_name])
|
|
491
|
+
|
|
492
|
+
# Re-sort ready_queue by original index to maintain stable order
|
|
493
|
+
ready_queue.sort(key=lambda h: hook_indices[h.name])
|
|
494
|
+
|
|
495
|
+
logger.debug(
|
|
496
|
+
"Resolved hook dependencies",
|
|
497
|
+
extra={
|
|
498
|
+
"input_count": len(hooks),
|
|
499
|
+
"output_count": len(ordered),
|
|
500
|
+
"reordered": len(hooks) != len(ordered) or hooks != ordered,
|
|
501
|
+
},
|
|
502
|
+
)
|
|
503
|
+
|
|
504
|
+
return ordered
|
|
505
|
+
|
|
506
|
+
async def _execute_parallel(
|
|
507
|
+
self,
|
|
508
|
+
hooks: list[HookDefinition],
|
|
509
|
+
max_workers: int = 3,
|
|
510
|
+
) -> list[HookResult]:
|
|
511
|
+
"""Execute hooks in parallel with resource limits.
|
|
512
|
+
|
|
513
|
+
Args:
|
|
514
|
+
hooks: Hooks to execute
|
|
515
|
+
max_workers: Maximum concurrent executions
|
|
516
|
+
|
|
517
|
+
Returns:
|
|
518
|
+
List of HookResult objects
|
|
519
|
+
"""
|
|
520
|
+
max_parallel = min(max_workers, self.settings.max_parallel_hooks)
|
|
521
|
+
semaphore = asyncio.Semaphore(max_parallel)
|
|
522
|
+
|
|
523
|
+
logger.debug(
|
|
524
|
+
"Starting parallel execution",
|
|
525
|
+
extra={
|
|
526
|
+
"hook_count": len(hooks),
|
|
527
|
+
"max_parallel": max_parallel,
|
|
528
|
+
},
|
|
529
|
+
)
|
|
530
|
+
|
|
531
|
+
async def execute_with_limit(hook: HookDefinition) -> HookResult:
|
|
532
|
+
async with semaphore:
|
|
533
|
+
return await self._execute_single_hook(hook)
|
|
534
|
+
|
|
535
|
+
tasks = [execute_with_limit(hook) for hook in hooks]
|
|
536
|
+
results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
537
|
+
|
|
538
|
+
# Convert exceptions to error HookResults
|
|
539
|
+
final_results = []
|
|
540
|
+
for hook, result in zip(hooks, results):
|
|
541
|
+
if isinstance(result, HookResult):
|
|
542
|
+
final_results.append(result)
|
|
543
|
+
else:
|
|
544
|
+
logger.error(
|
|
545
|
+
"Hook execution raised exception",
|
|
546
|
+
extra={
|
|
547
|
+
"hook": hook.name,
|
|
548
|
+
"exception": str(result),
|
|
549
|
+
"exception_type": type(result).__name__,
|
|
550
|
+
},
|
|
551
|
+
)
|
|
552
|
+
final_results.append(self._error_result(hook, result))
|
|
553
|
+
|
|
554
|
+
logger.debug(
|
|
555
|
+
"Parallel execution complete",
|
|
556
|
+
extra={
|
|
557
|
+
"total_hooks": len(final_results),
|
|
558
|
+
"successful": sum(
|
|
559
|
+
1 for r in final_results if isinstance(r, HookResult)
|
|
560
|
+
),
|
|
561
|
+
},
|
|
562
|
+
)
|
|
563
|
+
|
|
564
|
+
return final_results
|
|
565
|
+
|
|
566
|
+
async def _execute_sequential(
|
|
567
|
+
self, hooks: list[HookDefinition]
|
|
568
|
+
) -> list[HookResult]:
|
|
569
|
+
"""Execute hooks sequentially.
|
|
570
|
+
|
|
571
|
+
Args:
|
|
572
|
+
hooks: Hooks to execute
|
|
573
|
+
|
|
574
|
+
Returns:
|
|
575
|
+
List of HookResult objects
|
|
576
|
+
"""
|
|
577
|
+
logger.debug("Starting sequential execution", extra={"hook_count": len(hooks)})
|
|
578
|
+
|
|
579
|
+
results = []
|
|
580
|
+
for hook in hooks:
|
|
581
|
+
result = await self._execute_single_hook(hook)
|
|
582
|
+
results.append(result)
|
|
583
|
+
|
|
584
|
+
# Early exit on critical failures
|
|
585
|
+
if result.status == "failed" and hook.security_level.value == "critical":
|
|
586
|
+
logger.warning(
|
|
587
|
+
f"Critical hook {hook.name} failed, stopping execution",
|
|
588
|
+
extra={
|
|
589
|
+
"hook": hook.name,
|
|
590
|
+
"security_level": "critical",
|
|
591
|
+
"remaining_hooks": len(hooks) - len(results),
|
|
592
|
+
},
|
|
593
|
+
)
|
|
594
|
+
break
|
|
595
|
+
|
|
596
|
+
logger.debug(
|
|
597
|
+
"Sequential execution complete",
|
|
598
|
+
extra={
|
|
599
|
+
"total_hooks": len(results),
|
|
600
|
+
"executed": len(results),
|
|
601
|
+
"skipped": len(hooks) - len(results),
|
|
602
|
+
},
|
|
603
|
+
)
|
|
604
|
+
|
|
605
|
+
return results
|
|
606
|
+
|
|
607
|
+
async def _execute_single_hook(self, hook: HookDefinition) -> HookResult:
|
|
608
|
+
"""Execute a single hook (adapter or subprocess) with caching and events."""
|
|
609
|
+
logger.debug(
|
|
610
|
+
f"Executing hook: {hook.name}",
|
|
611
|
+
extra={
|
|
612
|
+
"hook": hook.name,
|
|
613
|
+
"timeout": hook.timeout,
|
|
614
|
+
"stage": hook.stage.value,
|
|
615
|
+
},
|
|
616
|
+
)
|
|
617
|
+
await self._publish_event(
|
|
618
|
+
WorkflowEvent.HOOK_EXECUTION_STARTED,
|
|
619
|
+
{
|
|
620
|
+
"hook": hook.name,
|
|
621
|
+
"stage": hook.stage.value,
|
|
622
|
+
"security_level": hook.security_level.value,
|
|
623
|
+
},
|
|
624
|
+
)
|
|
625
|
+
|
|
626
|
+
# Cache fast-path
|
|
627
|
+
cached = await self._try_get_cached(hook)
|
|
628
|
+
if cached is not None:
|
|
629
|
+
await self._publish_event(
|
|
630
|
+
WorkflowEvent.HOOK_EXECUTION_COMPLETED,
|
|
631
|
+
{
|
|
632
|
+
"hook": hook.name,
|
|
633
|
+
"stage": hook.stage.value,
|
|
634
|
+
"status": cached.status,
|
|
635
|
+
"duration": cached.duration,
|
|
636
|
+
"cached": True,
|
|
637
|
+
},
|
|
638
|
+
)
|
|
639
|
+
return cached
|
|
640
|
+
|
|
641
|
+
try:
|
|
642
|
+
import time
|
|
643
|
+
|
|
644
|
+
start_time = time.time()
|
|
645
|
+
|
|
646
|
+
# Execute hooks via direct adapter calls or subprocess if no adapter exists
|
|
647
|
+
adapter = self._build_adapter(hook)
|
|
648
|
+
if adapter is not None:
|
|
649
|
+
result = await self._run_adapter(adapter, hook, start_time)
|
|
650
|
+
else:
|
|
651
|
+
result = self._run_subprocess(hook, start_time)
|
|
652
|
+
|
|
653
|
+
await self._maybe_cache(hook, result)
|
|
654
|
+
except Exception as exc:
|
|
655
|
+
await self._publish_event(
|
|
656
|
+
WorkflowEvent.HOOK_EXECUTION_FAILED,
|
|
657
|
+
{"hook": hook.name, "stage": hook.stage.value, "error": str(exc)},
|
|
658
|
+
)
|
|
659
|
+
raise
|
|
660
|
+
|
|
661
|
+
await self._publish_event(
|
|
662
|
+
WorkflowEvent.HOOK_EXECUTION_COMPLETED,
|
|
663
|
+
{
|
|
664
|
+
"hook": hook.name,
|
|
665
|
+
"stage": hook.stage.value,
|
|
666
|
+
"status": result.status,
|
|
667
|
+
"duration": result.duration,
|
|
668
|
+
"cached": False,
|
|
669
|
+
},
|
|
670
|
+
)
|
|
671
|
+
return result
|
|
672
|
+
|
|
673
|
+
async def _try_get_cached(self, hook: HookDefinition) -> HookResult | None:
|
|
674
|
+
if not (self.settings.enable_caching and self._cache_adapter):
|
|
675
|
+
return None
|
|
676
|
+
cache_key = self._cache_adapter.compute_key(hook, files=[])
|
|
677
|
+
cached = await self._cache_adapter.get(cache_key)
|
|
678
|
+
if cached:
|
|
679
|
+
self._cache_hits += 1
|
|
680
|
+
logger.debug(
|
|
681
|
+
f"Cache hit for hook {hook.name}",
|
|
682
|
+
extra={
|
|
683
|
+
"hook": hook.name,
|
|
684
|
+
"cache_key": cache_key,
|
|
685
|
+
"cache_hits": self._cache_hits,
|
|
686
|
+
},
|
|
687
|
+
)
|
|
688
|
+
return cached
|
|
689
|
+
self._cache_misses += 1
|
|
690
|
+
logger.debug(
|
|
691
|
+
f"Cache miss for hook {hook.name}",
|
|
692
|
+
extra={
|
|
693
|
+
"hook": hook.name,
|
|
694
|
+
"cache_key": cache_key,
|
|
695
|
+
"cache_misses": self._cache_misses,
|
|
696
|
+
},
|
|
697
|
+
)
|
|
698
|
+
return None
|
|
699
|
+
|
|
700
|
+
@staticmethod
|
|
701
|
+
def _pass_result(hook: HookDefinition, duration: float) -> HookResult:
|
|
702
|
+
return HookResult(
|
|
703
|
+
id=hook.name,
|
|
704
|
+
name=hook.name,
|
|
705
|
+
status="passed",
|
|
706
|
+
duration=duration,
|
|
707
|
+
files_processed=0,
|
|
708
|
+
issues_found=[],
|
|
709
|
+
stage=hook.stage.value,
|
|
710
|
+
exit_code=None, # No error for passed hooks
|
|
711
|
+
error_message=None,
|
|
712
|
+
is_timeout=False,
|
|
713
|
+
)
|
|
714
|
+
|
|
715
|
+
def _build_adapter(self, hook: HookDefinition) -> t.Any | None:
|
|
716
|
+
"""Build adapter for hook, dispatching to specific adapter factories."""
|
|
717
|
+
try:
|
|
718
|
+
adapter_factory = self._get_adapter_factory(hook.name)
|
|
719
|
+
if adapter_factory:
|
|
720
|
+
return adapter_factory(hook)
|
|
721
|
+
except Exception:
|
|
722
|
+
return None
|
|
723
|
+
return None
|
|
724
|
+
|
|
725
|
+
def _get_adapter_factory(
|
|
726
|
+
self, hook_name: str
|
|
727
|
+
) -> t.Callable[[HookDefinition], t.Any] | None:
|
|
728
|
+
"""Get adapter factory function for hook name."""
|
|
729
|
+
factories: dict[str, t.Callable[[HookDefinition], t.Any]] = {
|
|
730
|
+
"ruff-check": self._build_ruff_adapter,
|
|
731
|
+
"ruff-format": self._build_ruff_adapter,
|
|
732
|
+
"bandit": self._build_bandit_adapter,
|
|
733
|
+
"codespell": self._build_codespell_adapter,
|
|
734
|
+
"gitleaks": self._build_gitleaks_adapter,
|
|
735
|
+
"skylos": self._build_skylos_adapter,
|
|
736
|
+
"zuban": self._build_zuban_adapter,
|
|
737
|
+
"complexipy": self._build_complexipy_adapter,
|
|
738
|
+
"creosote": self._build_creosote_adapter,
|
|
739
|
+
"refurb": self._build_refurb_adapter,
|
|
740
|
+
"pyrefly": self._build_refurb_adapter,
|
|
741
|
+
"mdformat": self._build_mdformat_adapter,
|
|
742
|
+
}
|
|
743
|
+
return factories.get(hook_name)
|
|
744
|
+
|
|
745
|
+
@staticmethod
|
|
746
|
+
def _build_ruff_adapter(hook: HookDefinition) -> t.Any:
|
|
747
|
+
"""Build Ruff adapter for format or check mode."""
|
|
748
|
+
from crackerjack.adapters.format.ruff import RuffAdapter, RuffSettings
|
|
749
|
+
|
|
750
|
+
is_format_mode = "format" in hook.name
|
|
751
|
+
is_check_mode = "check" in hook.name
|
|
752
|
+
|
|
753
|
+
return RuffAdapter(
|
|
754
|
+
settings=RuffSettings(
|
|
755
|
+
mode="format" if is_format_mode else "check",
|
|
756
|
+
fix_enabled=True, # Enable fixing for both check and format modes
|
|
757
|
+
unsafe_fixes=is_check_mode, # Enable unsafe fixes for check mode only
|
|
758
|
+
)
|
|
759
|
+
)
|
|
760
|
+
|
|
761
|
+
@staticmethod
|
|
762
|
+
def _build_bandit_adapter(hook: HookDefinition) -> t.Any:
|
|
763
|
+
"""Build Bandit security adapter."""
|
|
764
|
+
from crackerjack.adapters.sast.bandit import BanditAdapter
|
|
765
|
+
|
|
766
|
+
return BanditAdapter()
|
|
767
|
+
|
|
768
|
+
@staticmethod
|
|
769
|
+
def _build_codespell_adapter(hook: HookDefinition) -> t.Any:
|
|
770
|
+
"""Build Codespell lint adapter."""
|
|
771
|
+
from crackerjack.adapters.lint.codespell import CodespellAdapter
|
|
772
|
+
|
|
773
|
+
return CodespellAdapter()
|
|
774
|
+
|
|
775
|
+
@staticmethod
|
|
776
|
+
def _build_gitleaks_adapter(hook: HookDefinition) -> t.Any:
|
|
777
|
+
"""Build Gitleaks security adapter."""
|
|
778
|
+
from crackerjack.adapters.security.gitleaks import GitleaksAdapter
|
|
779
|
+
|
|
780
|
+
return GitleaksAdapter()
|
|
781
|
+
|
|
782
|
+
def _build_skylos_adapter(self, hook: HookDefinition) -> t.Any:
|
|
783
|
+
"""Build Skylos LSP adapter."""
|
|
784
|
+
from crackerjack.adapters.lsp.skylos import SkylosAdapter
|
|
785
|
+
|
|
786
|
+
if self.execution_context is None:
|
|
787
|
+
msg = f"Execution context required for {hook.name} adapter"
|
|
788
|
+
raise ValueError(msg)
|
|
789
|
+
return SkylosAdapter(context=self.execution_context)
|
|
790
|
+
|
|
791
|
+
@staticmethod
|
|
792
|
+
def _build_zuban_adapter(hook: HookDefinition) -> t.Any:
|
|
793
|
+
"""Build Zuban type checking adapter."""
|
|
794
|
+
from crackerjack.adapters.type.zuban import ZubanAdapter, ZubanSettings
|
|
795
|
+
|
|
796
|
+
return ZubanAdapter(settings=ZubanSettings())
|
|
797
|
+
|
|
798
|
+
@staticmethod
|
|
799
|
+
def _build_complexipy_adapter(hook: HookDefinition) -> t.Any:
|
|
800
|
+
"""Build Complexipy complexity adapter."""
|
|
801
|
+
from crackerjack.adapters.complexity.complexipy import ComplexipyAdapter
|
|
802
|
+
|
|
803
|
+
return ComplexipyAdapter()
|
|
804
|
+
|
|
805
|
+
@staticmethod
|
|
806
|
+
def _build_creosote_adapter(hook: HookDefinition) -> t.Any:
|
|
807
|
+
"""Build Creosote refactor adapter."""
|
|
808
|
+
from crackerjack.adapters.refactor.creosote import CreosoteAdapter
|
|
809
|
+
|
|
810
|
+
return CreosoteAdapter()
|
|
811
|
+
|
|
812
|
+
@staticmethod
|
|
813
|
+
def _build_refurb_adapter(hook: HookDefinition) -> t.Any:
|
|
814
|
+
"""Build Refurb refactor adapter."""
|
|
815
|
+
from crackerjack.adapters.refactor.refurb import RefurbAdapter
|
|
816
|
+
|
|
817
|
+
return RefurbAdapter()
|
|
818
|
+
|
|
819
|
+
@staticmethod
|
|
820
|
+
def _build_mdformat_adapter(hook: HookDefinition) -> t.Any:
|
|
821
|
+
"""Build Mdformat markdown adapter."""
|
|
822
|
+
from crackerjack.adapters.format.mdformat import MdformatAdapter
|
|
823
|
+
|
|
824
|
+
return MdformatAdapter()
|
|
825
|
+
|
|
826
|
+
@staticmethod
|
|
827
|
+
def _get_reporting_tools() -> set[str]:
|
|
828
|
+
"""Get the set of tools that report issues."""
|
|
829
|
+
return {"complexipy", "refurb", "gitleaks", "creosote"}
|
|
830
|
+
|
|
831
|
+
@staticmethod
|
|
832
|
+
def _get_formatters() -> set[str]:
|
|
833
|
+
"""Get the set of formatting tools."""
|
|
834
|
+
return {"ruff-format"}
|
|
835
|
+
|
|
836
|
+
def _determine_status(self, hook: HookDefinition, qa_result: t.Any) -> str:
|
|
837
|
+
"""Determine the status based on hook name and QA result."""
|
|
838
|
+
reporting_tools = self._get_reporting_tools()
|
|
839
|
+
formatters = self._get_formatters()
|
|
840
|
+
|
|
841
|
+
# Override status for tools that found issues but returned SUCCESS/WARNING
|
|
842
|
+
if (
|
|
843
|
+
(hook.name in reporting_tools or hook.name in formatters)
|
|
844
|
+
and qa_result.issues_found > 0
|
|
845
|
+
and qa_result.status in (QAResultStatus.SUCCESS, QAResultStatus.WARNING)
|
|
846
|
+
):
|
|
847
|
+
return "failed" # Trigger auto-fix stage
|
|
848
|
+
return (
|
|
849
|
+
"passed"
|
|
850
|
+
if qa_result.status in (QAResultStatus.SUCCESS, QAResultStatus.WARNING)
|
|
851
|
+
else "failed"
|
|
852
|
+
)
|
|
853
|
+
|
|
854
|
+
@staticmethod
|
|
855
|
+
def _build_issues_list(qa_result: t.Any) -> list[str]:
|
|
856
|
+
"""Build the issues list from the QA result.
|
|
857
|
+
|
|
858
|
+
This method uses the adapter's pre-formatted details string directly
|
|
859
|
+
instead of re-parsing it, which preserves the original formatting and
|
|
860
|
+
prevents loss of detailed error information.
|
|
861
|
+
|
|
862
|
+
Args:
|
|
863
|
+
qa_result: QAResult from adapter execution
|
|
864
|
+
|
|
865
|
+
Returns:
|
|
866
|
+
List of issue strings for display. Returns empty list if no issues.
|
|
867
|
+
"""
|
|
868
|
+
if qa_result.issues_found == 0:
|
|
869
|
+
return []
|
|
870
|
+
|
|
871
|
+
# NEW: Use adapter's pre-formatted details directly
|
|
872
|
+
if qa_result.details:
|
|
873
|
+
# Parse detail lines from the adapter's formatted output
|
|
874
|
+
detail_lines = [
|
|
875
|
+
line.strip()
|
|
876
|
+
for line in qa_result.details.split("\n")
|
|
877
|
+
if line.strip() and not line.strip().startswith("...")
|
|
878
|
+
]
|
|
879
|
+
|
|
880
|
+
# If we successfully parsed details, use them
|
|
881
|
+
if detail_lines:
|
|
882
|
+
# Show first 20 issues, then add summary for remainder
|
|
883
|
+
max_displayed = 20
|
|
884
|
+
if len(detail_lines) > max_displayed:
|
|
885
|
+
issues = detail_lines[:max_displayed]
|
|
886
|
+
remaining = len(detail_lines) - max_displayed
|
|
887
|
+
issues.append(
|
|
888
|
+
f"... and {remaining} more issue{'s' if remaining != 1 else ''} "
|
|
889
|
+
f"(run with --ai-debug for full details)"
|
|
890
|
+
)
|
|
891
|
+
else:
|
|
892
|
+
issues = detail_lines
|
|
893
|
+
|
|
894
|
+
# If qa_result reports more issues than we have details for, note it
|
|
895
|
+
if qa_result.issues_found > len(detail_lines):
|
|
896
|
+
extra = qa_result.issues_found - len(detail_lines)
|
|
897
|
+
issues.append(
|
|
898
|
+
f"... and {extra} additional issue{'s' if extra != 1 else ''} without details"
|
|
899
|
+
)
|
|
900
|
+
|
|
901
|
+
return issues
|
|
902
|
+
|
|
903
|
+
# Fallback: No details available or details parsing failed
|
|
904
|
+
# This should only happen when the adapter doesn't provide detailed output
|
|
905
|
+
count = qa_result.issues_found
|
|
906
|
+
return [
|
|
907
|
+
f"{count} issue{'s' if count != 1 else ''} found (run with --ai-debug for full details)"
|
|
908
|
+
]
|
|
909
|
+
|
|
910
|
+
@staticmethod
|
|
911
|
+
def _extract_error_details(
|
|
912
|
+
hook: HookDefinition, qa_result: t.Any, status: str, issues: list[str]
|
|
913
|
+
) -> tuple[int | None, str | None, list[str]]:
|
|
914
|
+
"""Extract error details for failed hooks from adapter results.
|
|
915
|
+
|
|
916
|
+
Note: This method should only add the generic fallback if _build_issues_list
|
|
917
|
+
hasn't already provided a fallback message. This prevents double-fallback.
|
|
918
|
+
|
|
919
|
+
Args:
|
|
920
|
+
hook: Hook definition
|
|
921
|
+
qa_result: QAResult from adapter execution
|
|
922
|
+
status: Hook status (passed/failed)
|
|
923
|
+
issues: Issues list from _build_issues_list
|
|
924
|
+
|
|
925
|
+
Returns:
|
|
926
|
+
Tuple of (exit_code, error_message, updated_issues)
|
|
927
|
+
"""
|
|
928
|
+
exit_code = None
|
|
929
|
+
error_message = None
|
|
930
|
+
|
|
931
|
+
if status == "failed":
|
|
932
|
+
if hasattr(qa_result, "details") and qa_result.details:
|
|
933
|
+
# For adapter-based hooks, use details as error message
|
|
934
|
+
error_message = qa_result.details[:500] # Truncate if very long
|
|
935
|
+
|
|
936
|
+
# Only extract error from details if issues list is truly empty
|
|
937
|
+
# (not just a fallback message from _build_issues_list)
|
|
938
|
+
if not issues:
|
|
939
|
+
error_lines = [
|
|
940
|
+
line.strip()
|
|
941
|
+
for line in qa_result.details.split("\n")
|
|
942
|
+
if line.strip()
|
|
943
|
+
][:10]
|
|
944
|
+
issues = error_lines or ["Hook failed with no parseable output"]
|
|
945
|
+
elif not issues:
|
|
946
|
+
# Only add generic fallback if we have absolutely no information
|
|
947
|
+
# This should be rare since _build_issues_list provides a fallback
|
|
948
|
+
issues = [
|
|
949
|
+
f"Hook {hook.name} failed with no detailed output (exit code: "
|
|
950
|
+
f"{
|
|
951
|
+
qa_result.exit_code
|
|
952
|
+
if hasattr(qa_result, 'exit_code')
|
|
953
|
+
else 'unknown'
|
|
954
|
+
})"
|
|
955
|
+
]
|
|
956
|
+
|
|
957
|
+
return exit_code, error_message, issues
|
|
958
|
+
|
|
959
|
+
@staticmethod
|
|
960
|
+
def _calculate_total_issues(
|
|
961
|
+
qa_result: t.Any, status: str, issues: list[str]
|
|
962
|
+
) -> int:
|
|
963
|
+
"""Calculate the total count of issues from qa_result.
|
|
964
|
+
|
|
965
|
+
This method distinguishes between:
|
|
966
|
+
1. Genuine code issues (show actual count)
|
|
967
|
+
2. Configuration/tool errors (show 0, not forced to 1)
|
|
968
|
+
3. Parsing failures (may show 1 if no issues parseable)
|
|
969
|
+
|
|
970
|
+
The key insight: QAResultStatus.ERROR indicates a config/tool error,
|
|
971
|
+
not a code quality issue. These should show 0 issues, not 1.
|
|
972
|
+
"""
|
|
973
|
+
# Get the actual total count of issues from qa_result
|
|
974
|
+
# This may be larger than len(issues) if issues were truncated for display
|
|
975
|
+
total_issues = (
|
|
976
|
+
qa_result.issues_found
|
|
977
|
+
if hasattr(qa_result, "issues_found")
|
|
978
|
+
else len(issues)
|
|
979
|
+
)
|
|
980
|
+
|
|
981
|
+
# Only force "1 issue" for genuine parsing failures, not config errors
|
|
982
|
+
if status == "failed" and total_issues == 0:
|
|
983
|
+
# Check if this is a config/tool error vs code quality failure
|
|
984
|
+
if (
|
|
985
|
+
hasattr(qa_result, "status")
|
|
986
|
+
and qa_result.status == QAResultStatus.ERROR
|
|
987
|
+
):
|
|
988
|
+
# Config/tool error - show actual count (0)
|
|
989
|
+
# This prevents misleading "1 issue" for things like:
|
|
990
|
+
# - Missing binary
|
|
991
|
+
# - Invalid configuration
|
|
992
|
+
# - Tool initialization failures
|
|
993
|
+
return 0
|
|
994
|
+
else:
|
|
995
|
+
# Parsing failure or unexpected error - show 1 to indicate problem
|
|
996
|
+
# This handles cases where the tool found issues but we couldn't parse them
|
|
997
|
+
return max(total_issues, 1)
|
|
998
|
+
|
|
999
|
+
return total_issues
|
|
1000
|
+
|
|
1001
|
+
def _create_success_result(
|
|
1002
|
+
self, hook: HookDefinition, qa_result: t.Any, start_time: float
|
|
1003
|
+
) -> HookResult:
|
|
1004
|
+
"""Create a HookResult for successful execution."""
|
|
1005
|
+
files_processed = (
|
|
1006
|
+
len(qa_result.files_checked) if hasattr(qa_result, "files_checked") else 0
|
|
1007
|
+
)
|
|
1008
|
+
status = self._determine_status(hook, qa_result)
|
|
1009
|
+
issues = self._build_issues_list(qa_result)
|
|
1010
|
+
|
|
1011
|
+
# Extract error details for failed hooks from adapter results
|
|
1012
|
+
exit_code, error_message, issues = self._extract_error_details(
|
|
1013
|
+
hook, qa_result, status, issues
|
|
1014
|
+
)
|
|
1015
|
+
|
|
1016
|
+
# Calculate the total issues count
|
|
1017
|
+
total_issues = self._calculate_total_issues(qa_result, status, issues)
|
|
1018
|
+
|
|
1019
|
+
# Determine if this is a config/tool error (not code issues)
|
|
1020
|
+
is_config_error = (
|
|
1021
|
+
status == "failed"
|
|
1022
|
+
and hasattr(qa_result, "status")
|
|
1023
|
+
and qa_result.status == QAResultStatus.ERROR
|
|
1024
|
+
)
|
|
1025
|
+
|
|
1026
|
+
return HookResult(
|
|
1027
|
+
id=hook.name,
|
|
1028
|
+
name=hook.name,
|
|
1029
|
+
status=status,
|
|
1030
|
+
duration=self._elapsed(start_time),
|
|
1031
|
+
files_processed=files_processed,
|
|
1032
|
+
issues_found=issues,
|
|
1033
|
+
issues_count=total_issues, # Store the actual total count
|
|
1034
|
+
stage=hook.stage.value,
|
|
1035
|
+
exit_code=exit_code, # Adapters don't provide exit codes directly
|
|
1036
|
+
error_message=error_message,
|
|
1037
|
+
is_timeout=False,
|
|
1038
|
+
is_config_error=is_config_error, # Mark config/tool errors
|
|
1039
|
+
)
|
|
1040
|
+
|
|
1041
|
+
def _create_timeout_result(
|
|
1042
|
+
self, hook: HookDefinition, start_time: float
|
|
1043
|
+
) -> HookResult:
|
|
1044
|
+
"""Create a HookResult for timeout."""
|
|
1045
|
+
duration = self._elapsed(start_time)
|
|
1046
|
+
return HookResult(
|
|
1047
|
+
id=hook.name,
|
|
1048
|
+
name=hook.name,
|
|
1049
|
+
status="timeout",
|
|
1050
|
+
duration=duration,
|
|
1051
|
+
files_processed=0,
|
|
1052
|
+
issues_found=[f"Hook timed out after {hook.timeout}s"],
|
|
1053
|
+
issues_count=1, # Timeout counts as 1 issue
|
|
1054
|
+
stage=hook.stage.value,
|
|
1055
|
+
exit_code=124, # Standard timeout exit code
|
|
1056
|
+
error_message=f"Execution exceeded timeout of {hook.timeout}s",
|
|
1057
|
+
is_timeout=True,
|
|
1058
|
+
)
|
|
1059
|
+
|
|
1060
|
+
def _create_error_result(
|
|
1061
|
+
self, hook: HookDefinition, start_time: float, error: Exception
|
|
1062
|
+
) -> HookResult:
|
|
1063
|
+
"""Create a HookResult for error."""
|
|
1064
|
+
return HookResult(
|
|
1065
|
+
id=hook.name,
|
|
1066
|
+
name=hook.name,
|
|
1067
|
+
status="error",
|
|
1068
|
+
duration=self._elapsed(start_time),
|
|
1069
|
+
files_processed=0,
|
|
1070
|
+
issues_found=[f"Adapter execution error: {error}"],
|
|
1071
|
+
issues_count=1, # Error counts as 1 issue
|
|
1072
|
+
stage=hook.stage.value,
|
|
1073
|
+
exit_code=1,
|
|
1074
|
+
error_message=str(error),
|
|
1075
|
+
is_timeout=False,
|
|
1076
|
+
)
|
|
1077
|
+
|
|
1078
|
+
async def _run_adapter(
|
|
1079
|
+
self, adapter: t.Any, hook: HookDefinition, start_time: float
|
|
1080
|
+
) -> HookResult:
|
|
1081
|
+
import asyncio
|
|
1082
|
+
|
|
1083
|
+
try:
|
|
1084
|
+
await adapter.init()
|
|
1085
|
+
# Let the adapter determine the appropriate files to check
|
|
1086
|
+
# Pass None to allow the adapter to scan for appropriate files
|
|
1087
|
+
qa_result = await asyncio.wait_for(
|
|
1088
|
+
adapter.check(files=None), timeout=hook.timeout
|
|
1089
|
+
)
|
|
1090
|
+
return self._create_success_result(hook, qa_result, start_time)
|
|
1091
|
+
except TimeoutError:
|
|
1092
|
+
return self._create_timeout_result(hook, start_time)
|
|
1093
|
+
except Exception as e:
|
|
1094
|
+
return self._create_error_result(hook, start_time, e)
|
|
1095
|
+
|
|
1096
|
+
def _run_subprocess(self, hook: HookDefinition, start_time: float) -> HookResult:
|
|
1097
|
+
import subprocess
|
|
1098
|
+
|
|
1099
|
+
cmd = hook.get_command()
|
|
1100
|
+
proc_result = subprocess.run(
|
|
1101
|
+
cmd, capture_output=True, text=True, timeout=hook.timeout
|
|
1102
|
+
)
|
|
1103
|
+
output_text = (proc_result.stdout or "") + (proc_result.stderr or "")
|
|
1104
|
+
|
|
1105
|
+
files_processed = self._extract_file_count(output_text)
|
|
1106
|
+
status = self._determine_hook_status(hook, proc_result, output_text)
|
|
1107
|
+
issues = self._collect_issues(status, proc_result)
|
|
1108
|
+
|
|
1109
|
+
# Semgrep-specific JSON error parsing
|
|
1110
|
+
if hook.name == "semgrep" and status == "failed":
|
|
1111
|
+
issues = self._parse_semgrep_json_errors(output_text, issues)
|
|
1112
|
+
|
|
1113
|
+
# Extract error details for failed hooks
|
|
1114
|
+
exit_code = proc_result.returncode if status == "failed" else None
|
|
1115
|
+
error_message = None
|
|
1116
|
+
if status == "failed" and output_text.strip():
|
|
1117
|
+
# Capture stdout + stderr for failed hooks (truncate if very long)
|
|
1118
|
+
error_message = output_text.strip()[:500]
|
|
1119
|
+
|
|
1120
|
+
# Ensure failed hooks always have at least 1 issue count
|
|
1121
|
+
issues_count = max(len(issues), 1 if status == "failed" else 0)
|
|
1122
|
+
|
|
1123
|
+
return HookResult(
|
|
1124
|
+
id=hook.name,
|
|
1125
|
+
name=hook.name,
|
|
1126
|
+
status=status,
|
|
1127
|
+
duration=self._elapsed(start_time),
|
|
1128
|
+
files_processed=files_processed,
|
|
1129
|
+
issues_found=issues,
|
|
1130
|
+
issues_count=issues_count,
|
|
1131
|
+
stage=hook.stage.value,
|
|
1132
|
+
exit_code=exit_code,
|
|
1133
|
+
error_message=error_message,
|
|
1134
|
+
is_timeout=False,
|
|
1135
|
+
)
|
|
1136
|
+
|
|
1137
|
+
@staticmethod
|
|
1138
|
+
def _extract_file_count(output_text: str) -> int:
|
|
1139
|
+
"""Extract file count from subprocess output using regex patterns."""
|
|
1140
|
+
import re
|
|
1141
|
+
|
|
1142
|
+
file_count_patterns = [
|
|
1143
|
+
r"(\d+)\s+files?\s+would\s+be",
|
|
1144
|
+
r"(\d+)\s+files?\s+already\s+formatted",
|
|
1145
|
+
r"(\d+)\s+files?\s+processed",
|
|
1146
|
+
r"(\d+)\s+files?\s+checked",
|
|
1147
|
+
r"(\d+)\s+files?\s+analyzed",
|
|
1148
|
+
r"Checking\s+(\d+)\s+files?",
|
|
1149
|
+
r"Found\s+(\d+)\s+files?",
|
|
1150
|
+
r"(\d+)\s+files?",
|
|
1151
|
+
]
|
|
1152
|
+
|
|
1153
|
+
all_matches = []
|
|
1154
|
+
for pattern in file_count_patterns:
|
|
1155
|
+
matches = re.findall(pattern, output_text, re.IGNORECASE)
|
|
1156
|
+
if matches:
|
|
1157
|
+
all_matches.extend([int(m) for m in matches if m.isdigit()])
|
|
1158
|
+
|
|
1159
|
+
return max(all_matches) if all_matches else 0
|
|
1160
|
+
|
|
1161
|
+
def _determine_hook_status(
|
|
1162
|
+
self, hook: HookDefinition, proc_result: t.Any, output_text: str
|
|
1163
|
+
) -> str:
|
|
1164
|
+
"""Determine hook status from subprocess return code and output."""
|
|
1165
|
+
base_status = "passed" if proc_result.returncode == 0 else "failed"
|
|
1166
|
+
|
|
1167
|
+
if base_status == "passed":
|
|
1168
|
+
return "passed"
|
|
1169
|
+
|
|
1170
|
+
# Check special cases where return code 1 indicates success
|
|
1171
|
+
if self._is_formatting_success(hook, proc_result, output_text):
|
|
1172
|
+
return "passed"
|
|
1173
|
+
|
|
1174
|
+
# For tools like ruff that return 1 when they detect issues but execute successfully
|
|
1175
|
+
# These should still be considered "passed" execution-wise but may have issues found
|
|
1176
|
+
if self._is_analysis_tool_success(hook, proc_result):
|
|
1177
|
+
return "passed"
|
|
1178
|
+
|
|
1179
|
+
if self._is_bandit_success(hook, proc_result, output_text):
|
|
1180
|
+
return "passed"
|
|
1181
|
+
|
|
1182
|
+
return "failed"
|
|
1183
|
+
|
|
1184
|
+
@staticmethod
|
|
1185
|
+
def _is_formatting_success(
|
|
1186
|
+
hook: HookDefinition, proc_result: t.Any, output_text: str
|
|
1187
|
+
) -> bool:
|
|
1188
|
+
"""Check if formatting tool return code 1 indicates successful modification."""
|
|
1189
|
+
if not hook.is_formatting or proc_result.returncode != 1:
|
|
1190
|
+
return False
|
|
1191
|
+
return "files were modified by this hook" in output_text.lower()
|
|
1192
|
+
|
|
1193
|
+
@staticmethod
|
|
1194
|
+
def _is_analysis_tool_success(hook: HookDefinition, proc_result: t.Any) -> bool:
|
|
1195
|
+
"""Check if analysis tool return code 1 indicates findings (not failure)."""
|
|
1196
|
+
if proc_result.returncode != 1:
|
|
1197
|
+
return False
|
|
1198
|
+
return hook.name in {
|
|
1199
|
+
"creosote",
|
|
1200
|
+
"complexipy",
|
|
1201
|
+
"refurb",
|
|
1202
|
+
"ruff-check",
|
|
1203
|
+
"ruff-format",
|
|
1204
|
+
}
|
|
1205
|
+
|
|
1206
|
+
@staticmethod
|
|
1207
|
+
def _is_bandit_success(
|
|
1208
|
+
hook: HookDefinition, proc_result: t.Any, output_text: str
|
|
1209
|
+
) -> bool:
|
|
1210
|
+
"""Check if bandit return code 1 indicates findings (not failure)."""
|
|
1211
|
+
if hook.name != "bandit" or proc_result.returncode != 1:
|
|
1212
|
+
return False
|
|
1213
|
+
output_text_lower = output_text.lower()
|
|
1214
|
+
return (
|
|
1215
|
+
"potential issues" in output_text_lower
|
|
1216
|
+
or "no issues identified" not in output_text_lower
|
|
1217
|
+
)
|
|
1218
|
+
|
|
1219
|
+
@staticmethod
|
|
1220
|
+
def _collect_issues(status: str, proc_result: t.Any) -> list[str]:
|
|
1221
|
+
"""Collect issues from subprocess output if hook failed.
|
|
1222
|
+
|
|
1223
|
+
For subprocess hooks (non-adapter), extracts error information from output.
|
|
1224
|
+
Returns list of strings since the display layer handles both string and object types.
|
|
1225
|
+
"""
|
|
1226
|
+
if status == "passed":
|
|
1227
|
+
return []
|
|
1228
|
+
|
|
1229
|
+
# Get combined output
|
|
1230
|
+
output_text = (proc_result.stdout or "") + (proc_result.stderr or "")
|
|
1231
|
+
if not output_text.strip():
|
|
1232
|
+
return [
|
|
1233
|
+
f"Hook failed with exit code {
|
|
1234
|
+
getattr(proc_result, 'returncode', 'unknown')
|
|
1235
|
+
} and no output"
|
|
1236
|
+
]
|
|
1237
|
+
|
|
1238
|
+
# Try to extract meaningful error lines (first 10 non-empty lines)
|
|
1239
|
+
error_lines = [
|
|
1240
|
+
line.strip() for line in output_text.split("\n") if line.strip()
|
|
1241
|
+
][:10]
|
|
1242
|
+
|
|
1243
|
+
return error_lines or ["Hook failed with non-zero exit code"]
|
|
1244
|
+
|
|
1245
|
+
@staticmethod
|
|
1246
|
+
def _parse_semgrep_json_errors(
|
|
1247
|
+
output_text: str, fallback_issues: list[str]
|
|
1248
|
+
) -> list[str]:
|
|
1249
|
+
"""Parse semgrep JSON output to extract errors from errors array.
|
|
1250
|
+
|
|
1251
|
+
Semgrep returns JSON with:
|
|
1252
|
+
- "results": Security/code quality findings (usually empty when download fails)
|
|
1253
|
+
- "errors": Configuration errors, download failures, etc.
|
|
1254
|
+
|
|
1255
|
+
Args:
|
|
1256
|
+
output_text: Combined stdout + stderr from semgrep
|
|
1257
|
+
fallback_issues: Issues collected from raw output (used if JSON parsing fails)
|
|
1258
|
+
|
|
1259
|
+
Returns:
|
|
1260
|
+
List of formatted error strings
|
|
1261
|
+
"""
|
|
1262
|
+
import json
|
|
1263
|
+
|
|
1264
|
+
try:
|
|
1265
|
+
json_data = json.loads(output_text.strip())
|
|
1266
|
+
|
|
1267
|
+
issues = []
|
|
1268
|
+
|
|
1269
|
+
# Extract security findings from results array
|
|
1270
|
+
if "results" in json_data:
|
|
1271
|
+
for result in json_data.get("results", []):
|
|
1272
|
+
path = result.get("path", "unknown")
|
|
1273
|
+
line_num = result.get("start", {}).get("line", "?")
|
|
1274
|
+
rule_id = result.get("check_id", "unknown-rule")
|
|
1275
|
+
message = result.get("extra", {}).get(
|
|
1276
|
+
"message", "Security issue detected"
|
|
1277
|
+
)
|
|
1278
|
+
issues.append(f"{path}:{line_num} - {rule_id}: {message}")
|
|
1279
|
+
|
|
1280
|
+
# Extract errors (download failures, config errors, etc.)
|
|
1281
|
+
if "errors" in json_data:
|
|
1282
|
+
for error in json_data.get("errors", []):
|
|
1283
|
+
error_type = error.get("type", "SemgrepError")
|
|
1284
|
+
error_msg = error.get("message", str(error))
|
|
1285
|
+
issues.append(f"{error_type}: {error_msg}")
|
|
1286
|
+
|
|
1287
|
+
return issues or fallback_issues
|
|
1288
|
+
|
|
1289
|
+
except json.JSONDecodeError:
|
|
1290
|
+
# JSON parsing failed, use fallback
|
|
1291
|
+
return fallback_issues
|
|
1292
|
+
|
|
1293
|
+
async def _maybe_cache(self, hook: HookDefinition, result: HookResult) -> None:
|
|
1294
|
+
if not (self.settings.enable_caching and self._cache_adapter):
|
|
1295
|
+
return
|
|
1296
|
+
cache_key = self._cache_adapter.compute_key(hook, files=[])
|
|
1297
|
+
await self._cache_adapter.set(cache_key, result)
|
|
1298
|
+
logger.debug(
|
|
1299
|
+
f"Cached result for hook {hook.name}",
|
|
1300
|
+
extra={
|
|
1301
|
+
"hook": hook.name,
|
|
1302
|
+
"cache_key": cache_key,
|
|
1303
|
+
"status": result.status,
|
|
1304
|
+
"files_processed": result.files_processed,
|
|
1305
|
+
},
|
|
1306
|
+
)
|
|
1307
|
+
|
|
1308
|
+
@staticmethod
|
|
1309
|
+
def _elapsed(start_time: float) -> float:
|
|
1310
|
+
import time
|
|
1311
|
+
|
|
1312
|
+
return time.time() - start_time
|
|
1313
|
+
|
|
1314
|
+
@staticmethod
|
|
1315
|
+
def _error_result(hook: HookDefinition, error: BaseException) -> HookResult:
|
|
1316
|
+
"""Create error HookResult from exception.
|
|
1317
|
+
|
|
1318
|
+
Args:
|
|
1319
|
+
hook: Hook that raised exception
|
|
1320
|
+
error: Exception that was raised
|
|
1321
|
+
|
|
1322
|
+
Returns:
|
|
1323
|
+
HookResult with error status
|
|
1324
|
+
"""
|
|
1325
|
+
return HookResult(
|
|
1326
|
+
id=hook.name,
|
|
1327
|
+
name=hook.name,
|
|
1328
|
+
status="error",
|
|
1329
|
+
duration=0.0,
|
|
1330
|
+
files_processed=0,
|
|
1331
|
+
issues_found=[str(error)],
|
|
1332
|
+
issues_count=1, # Error counts as 1 issue
|
|
1333
|
+
stage=hook.stage.value,
|
|
1334
|
+
exit_code=1,
|
|
1335
|
+
error_message=str(error),
|
|
1336
|
+
is_timeout=False,
|
|
1337
|
+
)
|
|
1338
|
+
|
|
1339
|
+
async def get_cache_stats(self) -> dict[str, t.Any]:
|
|
1340
|
+
"""Get cache statistics including hit/miss ratios.
|
|
1341
|
+
|
|
1342
|
+
Returns:
|
|
1343
|
+
Dictionary with cache statistics
|
|
1344
|
+
"""
|
|
1345
|
+
stats = {
|
|
1346
|
+
"caching_enabled": self.settings.enable_caching,
|
|
1347
|
+
"cache_backend": self.settings.cache_backend
|
|
1348
|
+
if self.settings.enable_caching
|
|
1349
|
+
else "disabled",
|
|
1350
|
+
"cache_hits": self._cache_hits,
|
|
1351
|
+
"cache_misses": self._cache_misses,
|
|
1352
|
+
"total_requests": self._cache_hits + self._cache_misses,
|
|
1353
|
+
"hit_ratio": (
|
|
1354
|
+
self._cache_hits / (self._cache_hits + self._cache_misses)
|
|
1355
|
+
if (self._cache_hits + self._cache_misses) > 0
|
|
1356
|
+
else 0.0
|
|
1357
|
+
),
|
|
1358
|
+
}
|
|
1359
|
+
|
|
1360
|
+
# Get adapter-specific stats if available
|
|
1361
|
+
if self._cache_adapter:
|
|
1362
|
+
adapter_stats = await self._cache_adapter.get_stats()
|
|
1363
|
+
stats["adapter_stats"] = adapter_stats
|
|
1364
|
+
|
|
1365
|
+
logger.debug("Cache statistics", extra=stats)
|
|
1366
|
+
|
|
1367
|
+
return stats
|
|
1368
|
+
|
|
1369
|
+
async def _publish_event(
|
|
1370
|
+
self,
|
|
1371
|
+
event: WorkflowEvent,
|
|
1372
|
+
payload: dict[str, t.Any],
|
|
1373
|
+
) -> None:
|
|
1374
|
+
"""Publish an event to the workflow bus if available."""
|
|
1375
|
+
if not self._event_bus:
|
|
1376
|
+
return
|
|
1377
|
+
|
|
1378
|
+
try:
|
|
1379
|
+
await self._event_bus.publish(event, payload)
|
|
1380
|
+
except Exception as exc:
|
|
1381
|
+
logger.debug(
|
|
1382
|
+
"Failed to publish orchestrator event",
|
|
1383
|
+
extra={"event": event.value, "error": str(exc)},
|
|
1384
|
+
)
|
|
1385
|
+
|
|
1386
|
+
@staticmethod
|
|
1387
|
+
def _summarize_results(results: list[HookResult]) -> dict[str, t.Any]:
|
|
1388
|
+
"""Summarize hook results for telemetry payloads."""
|
|
1389
|
+
counts = Counter(result.status for result in results)
|
|
1390
|
+
return {
|
|
1391
|
+
"counts": dict(counts),
|
|
1392
|
+
"total": len(results),
|
|
1393
|
+
}
|
|
1394
|
+
|
|
1395
|
+
|
|
1396
|
+
# ACB Registration (REQUIRED at module level)
|
|
1397
|
+
with suppress(Exception):
|
|
1398
|
+
depends.set(HookOrchestratorAdapter)
|