crackerjack 0.18.2__py3-none-any.whl → 0.45.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- crackerjack/README.md +19 -0
- crackerjack/__init__.py +96 -2
- crackerjack/__main__.py +637 -138
- crackerjack/adapters/README.md +18 -0
- crackerjack/adapters/__init__.py +39 -0
- crackerjack/adapters/_output_paths.py +167 -0
- crackerjack/adapters/_qa_adapter_base.py +309 -0
- crackerjack/adapters/_tool_adapter_base.py +706 -0
- crackerjack/adapters/ai/README.md +65 -0
- crackerjack/adapters/ai/__init__.py +5 -0
- crackerjack/adapters/ai/claude.py +853 -0
- crackerjack/adapters/complexity/README.md +53 -0
- crackerjack/adapters/complexity/__init__.py +10 -0
- crackerjack/adapters/complexity/complexipy.py +641 -0
- crackerjack/adapters/dependency/__init__.py +22 -0
- crackerjack/adapters/dependency/pip_audit.py +418 -0
- crackerjack/adapters/format/README.md +72 -0
- crackerjack/adapters/format/__init__.py +11 -0
- crackerjack/adapters/format/mdformat.py +313 -0
- crackerjack/adapters/format/ruff.py +516 -0
- crackerjack/adapters/lint/README.md +47 -0
- crackerjack/adapters/lint/__init__.py +11 -0
- crackerjack/adapters/lint/codespell.py +273 -0
- crackerjack/adapters/lsp/README.md +49 -0
- crackerjack/adapters/lsp/__init__.py +27 -0
- crackerjack/adapters/lsp/_base.py +194 -0
- crackerjack/adapters/lsp/_client.py +358 -0
- crackerjack/adapters/lsp/_manager.py +193 -0
- crackerjack/adapters/lsp/skylos.py +283 -0
- crackerjack/adapters/lsp/zuban.py +557 -0
- crackerjack/adapters/refactor/README.md +59 -0
- crackerjack/adapters/refactor/__init__.py +12 -0
- crackerjack/adapters/refactor/creosote.py +318 -0
- crackerjack/adapters/refactor/refurb.py +406 -0
- crackerjack/adapters/refactor/skylos.py +494 -0
- crackerjack/adapters/sast/README.md +132 -0
- crackerjack/adapters/sast/__init__.py +32 -0
- crackerjack/adapters/sast/_base.py +201 -0
- crackerjack/adapters/sast/bandit.py +423 -0
- crackerjack/adapters/sast/pyscn.py +405 -0
- crackerjack/adapters/sast/semgrep.py +241 -0
- crackerjack/adapters/security/README.md +111 -0
- crackerjack/adapters/security/__init__.py +17 -0
- crackerjack/adapters/security/gitleaks.py +339 -0
- crackerjack/adapters/type/README.md +52 -0
- crackerjack/adapters/type/__init__.py +12 -0
- crackerjack/adapters/type/pyrefly.py +402 -0
- crackerjack/adapters/type/ty.py +402 -0
- crackerjack/adapters/type/zuban.py +522 -0
- crackerjack/adapters/utility/README.md +51 -0
- crackerjack/adapters/utility/__init__.py +10 -0
- crackerjack/adapters/utility/checks.py +884 -0
- crackerjack/agents/README.md +264 -0
- crackerjack/agents/__init__.py +66 -0
- crackerjack/agents/architect_agent.py +238 -0
- crackerjack/agents/base.py +167 -0
- crackerjack/agents/claude_code_bridge.py +641 -0
- crackerjack/agents/coordinator.py +600 -0
- crackerjack/agents/documentation_agent.py +520 -0
- crackerjack/agents/dry_agent.py +585 -0
- crackerjack/agents/enhanced_coordinator.py +279 -0
- crackerjack/agents/enhanced_proactive_agent.py +185 -0
- crackerjack/agents/error_middleware.py +53 -0
- crackerjack/agents/formatting_agent.py +230 -0
- crackerjack/agents/helpers/__init__.py +9 -0
- crackerjack/agents/helpers/performance/__init__.py +22 -0
- crackerjack/agents/helpers/performance/performance_ast_analyzer.py +357 -0
- crackerjack/agents/helpers/performance/performance_pattern_detector.py +909 -0
- crackerjack/agents/helpers/performance/performance_recommender.py +572 -0
- crackerjack/agents/helpers/refactoring/__init__.py +22 -0
- crackerjack/agents/helpers/refactoring/code_transformer.py +536 -0
- crackerjack/agents/helpers/refactoring/complexity_analyzer.py +344 -0
- crackerjack/agents/helpers/refactoring/dead_code_detector.py +437 -0
- crackerjack/agents/helpers/test_creation/__init__.py +19 -0
- crackerjack/agents/helpers/test_creation/test_ast_analyzer.py +216 -0
- crackerjack/agents/helpers/test_creation/test_coverage_analyzer.py +643 -0
- crackerjack/agents/helpers/test_creation/test_template_generator.py +1031 -0
- crackerjack/agents/import_optimization_agent.py +1181 -0
- crackerjack/agents/performance_agent.py +325 -0
- crackerjack/agents/performance_helpers.py +205 -0
- crackerjack/agents/proactive_agent.py +55 -0
- crackerjack/agents/refactoring_agent.py +511 -0
- crackerjack/agents/refactoring_helpers.py +247 -0
- crackerjack/agents/security_agent.py +793 -0
- crackerjack/agents/semantic_agent.py +479 -0
- crackerjack/agents/semantic_helpers.py +356 -0
- crackerjack/agents/test_creation_agent.py +570 -0
- crackerjack/agents/test_specialist_agent.py +526 -0
- crackerjack/agents/tracker.py +110 -0
- crackerjack/api.py +647 -0
- crackerjack/cli/README.md +394 -0
- crackerjack/cli/__init__.py +24 -0
- crackerjack/cli/cache_handlers.py +209 -0
- crackerjack/cli/cache_handlers_enhanced.py +680 -0
- crackerjack/cli/facade.py +162 -0
- crackerjack/cli/formatting.py +13 -0
- crackerjack/cli/handlers/__init__.py +85 -0
- crackerjack/cli/handlers/advanced.py +103 -0
- crackerjack/cli/handlers/ai_features.py +62 -0
- crackerjack/cli/handlers/analytics.py +479 -0
- crackerjack/cli/handlers/changelog.py +271 -0
- crackerjack/cli/handlers/config_handlers.py +16 -0
- crackerjack/cli/handlers/coverage.py +84 -0
- crackerjack/cli/handlers/documentation.py +280 -0
- crackerjack/cli/handlers/main_handlers.py +497 -0
- crackerjack/cli/handlers/monitoring.py +371 -0
- crackerjack/cli/handlers.py +700 -0
- crackerjack/cli/interactive.py +488 -0
- crackerjack/cli/options.py +1216 -0
- crackerjack/cli/semantic_handlers.py +292 -0
- crackerjack/cli/utils.py +19 -0
- crackerjack/cli/version.py +19 -0
- crackerjack/code_cleaner.py +1307 -0
- crackerjack/config/README.md +472 -0
- crackerjack/config/__init__.py +275 -0
- crackerjack/config/global_lock_config.py +207 -0
- crackerjack/config/hooks.py +390 -0
- crackerjack/config/loader.py +239 -0
- crackerjack/config/settings.py +141 -0
- crackerjack/config/tool_commands.py +331 -0
- crackerjack/core/README.md +393 -0
- crackerjack/core/__init__.py +0 -0
- crackerjack/core/async_workflow_orchestrator.py +738 -0
- crackerjack/core/autofix_coordinator.py +282 -0
- crackerjack/core/container.py +105 -0
- crackerjack/core/enhanced_container.py +583 -0
- crackerjack/core/file_lifecycle.py +472 -0
- crackerjack/core/performance.py +244 -0
- crackerjack/core/performance_monitor.py +357 -0
- crackerjack/core/phase_coordinator.py +1227 -0
- crackerjack/core/proactive_workflow.py +267 -0
- crackerjack/core/resource_manager.py +425 -0
- crackerjack/core/retry.py +275 -0
- crackerjack/core/service_watchdog.py +601 -0
- crackerjack/core/session_coordinator.py +239 -0
- crackerjack/core/timeout_manager.py +563 -0
- crackerjack/core/websocket_lifecycle.py +410 -0
- crackerjack/core/workflow/__init__.py +21 -0
- crackerjack/core/workflow/workflow_ai_coordinator.py +863 -0
- crackerjack/core/workflow/workflow_event_orchestrator.py +1107 -0
- crackerjack/core/workflow/workflow_issue_parser.py +714 -0
- crackerjack/core/workflow/workflow_phase_executor.py +1158 -0
- crackerjack/core/workflow/workflow_security_gates.py +400 -0
- crackerjack/core/workflow_orchestrator.py +2243 -0
- crackerjack/data/README.md +11 -0
- crackerjack/data/__init__.py +8 -0
- crackerjack/data/models.py +79 -0
- crackerjack/data/repository.py +210 -0
- crackerjack/decorators/README.md +180 -0
- crackerjack/decorators/__init__.py +35 -0
- crackerjack/decorators/error_handling.py +649 -0
- crackerjack/decorators/error_handling_decorators.py +334 -0
- crackerjack/decorators/helpers.py +58 -0
- crackerjack/decorators/patterns.py +281 -0
- crackerjack/decorators/utils.py +58 -0
- crackerjack/docs/INDEX.md +11 -0
- crackerjack/docs/README.md +11 -0
- crackerjack/docs/generated/api/API_REFERENCE.md +10895 -0
- crackerjack/docs/generated/api/CLI_REFERENCE.md +109 -0
- crackerjack/docs/generated/api/CROSS_REFERENCES.md +1755 -0
- crackerjack/docs/generated/api/PROTOCOLS.md +3 -0
- crackerjack/docs/generated/api/SERVICES.md +1252 -0
- crackerjack/documentation/README.md +11 -0
- crackerjack/documentation/__init__.py +31 -0
- crackerjack/documentation/ai_templates.py +756 -0
- crackerjack/documentation/dual_output_generator.py +767 -0
- crackerjack/documentation/mkdocs_integration.py +518 -0
- crackerjack/documentation/reference_generator.py +1065 -0
- crackerjack/dynamic_config.py +678 -0
- crackerjack/errors.py +378 -0
- crackerjack/events/README.md +11 -0
- crackerjack/events/__init__.py +16 -0
- crackerjack/events/telemetry.py +175 -0
- crackerjack/events/workflow_bus.py +346 -0
- crackerjack/exceptions/README.md +301 -0
- crackerjack/exceptions/__init__.py +5 -0
- crackerjack/exceptions/config.py +4 -0
- crackerjack/exceptions/tool_execution_error.py +245 -0
- crackerjack/executors/README.md +591 -0
- crackerjack/executors/__init__.py +13 -0
- crackerjack/executors/async_hook_executor.py +938 -0
- crackerjack/executors/cached_hook_executor.py +316 -0
- crackerjack/executors/hook_executor.py +1295 -0
- crackerjack/executors/hook_lock_manager.py +708 -0
- crackerjack/executors/individual_hook_executor.py +739 -0
- crackerjack/executors/lsp_aware_hook_executor.py +349 -0
- crackerjack/executors/progress_hook_executor.py +282 -0
- crackerjack/executors/tool_proxy.py +433 -0
- crackerjack/hooks/README.md +485 -0
- crackerjack/hooks/lsp_hook.py +93 -0
- crackerjack/intelligence/README.md +557 -0
- crackerjack/intelligence/__init__.py +37 -0
- crackerjack/intelligence/adaptive_learning.py +693 -0
- crackerjack/intelligence/agent_orchestrator.py +485 -0
- crackerjack/intelligence/agent_registry.py +377 -0
- crackerjack/intelligence/agent_selector.py +439 -0
- crackerjack/intelligence/integration.py +250 -0
- crackerjack/interactive.py +719 -0
- crackerjack/managers/README.md +369 -0
- crackerjack/managers/__init__.py +11 -0
- crackerjack/managers/async_hook_manager.py +135 -0
- crackerjack/managers/hook_manager.py +585 -0
- crackerjack/managers/publish_manager.py +631 -0
- crackerjack/managers/test_command_builder.py +391 -0
- crackerjack/managers/test_executor.py +474 -0
- crackerjack/managers/test_manager.py +1357 -0
- crackerjack/managers/test_progress.py +187 -0
- crackerjack/mcp/README.md +374 -0
- crackerjack/mcp/__init__.py +0 -0
- crackerjack/mcp/cache.py +352 -0
- crackerjack/mcp/client_runner.py +121 -0
- crackerjack/mcp/context.py +802 -0
- crackerjack/mcp/dashboard.py +657 -0
- crackerjack/mcp/enhanced_progress_monitor.py +493 -0
- crackerjack/mcp/file_monitor.py +394 -0
- crackerjack/mcp/progress_components.py +607 -0
- crackerjack/mcp/progress_monitor.py +1016 -0
- crackerjack/mcp/rate_limiter.py +336 -0
- crackerjack/mcp/server.py +24 -0
- crackerjack/mcp/server_core.py +526 -0
- crackerjack/mcp/service_watchdog.py +505 -0
- crackerjack/mcp/state.py +407 -0
- crackerjack/mcp/task_manager.py +259 -0
- crackerjack/mcp/tools/README.md +27 -0
- crackerjack/mcp/tools/__init__.py +19 -0
- crackerjack/mcp/tools/core_tools.py +469 -0
- crackerjack/mcp/tools/error_analyzer.py +283 -0
- crackerjack/mcp/tools/execution_tools.py +384 -0
- crackerjack/mcp/tools/intelligence_tool_registry.py +46 -0
- crackerjack/mcp/tools/intelligence_tools.py +264 -0
- crackerjack/mcp/tools/monitoring_tools.py +628 -0
- crackerjack/mcp/tools/proactive_tools.py +367 -0
- crackerjack/mcp/tools/progress_tools.py +222 -0
- crackerjack/mcp/tools/semantic_tools.py +584 -0
- crackerjack/mcp/tools/utility_tools.py +358 -0
- crackerjack/mcp/tools/workflow_executor.py +699 -0
- crackerjack/mcp/websocket/README.md +31 -0
- crackerjack/mcp/websocket/__init__.py +14 -0
- crackerjack/mcp/websocket/app.py +54 -0
- crackerjack/mcp/websocket/endpoints.py +492 -0
- crackerjack/mcp/websocket/event_bridge.py +188 -0
- crackerjack/mcp/websocket/jobs.py +406 -0
- crackerjack/mcp/websocket/monitoring/__init__.py +25 -0
- crackerjack/mcp/websocket/monitoring/api/__init__.py +19 -0
- crackerjack/mcp/websocket/monitoring/api/dependencies.py +141 -0
- crackerjack/mcp/websocket/monitoring/api/heatmap.py +154 -0
- crackerjack/mcp/websocket/monitoring/api/intelligence.py +199 -0
- crackerjack/mcp/websocket/monitoring/api/metrics.py +203 -0
- crackerjack/mcp/websocket/monitoring/api/telemetry.py +101 -0
- crackerjack/mcp/websocket/monitoring/dashboard.py +18 -0
- crackerjack/mcp/websocket/monitoring/factory.py +109 -0
- crackerjack/mcp/websocket/monitoring/filters.py +10 -0
- crackerjack/mcp/websocket/monitoring/metrics.py +64 -0
- crackerjack/mcp/websocket/monitoring/models.py +90 -0
- crackerjack/mcp/websocket/monitoring/utils.py +171 -0
- crackerjack/mcp/websocket/monitoring/websocket_manager.py +78 -0
- crackerjack/mcp/websocket/monitoring/websockets/__init__.py +17 -0
- crackerjack/mcp/websocket/monitoring/websockets/dependencies.py +126 -0
- crackerjack/mcp/websocket/monitoring/websockets/heatmap.py +176 -0
- crackerjack/mcp/websocket/monitoring/websockets/intelligence.py +291 -0
- crackerjack/mcp/websocket/monitoring/websockets/metrics.py +291 -0
- crackerjack/mcp/websocket/monitoring_endpoints.py +21 -0
- crackerjack/mcp/websocket/server.py +174 -0
- crackerjack/mcp/websocket/websocket_handler.py +276 -0
- crackerjack/mcp/websocket_server.py +10 -0
- crackerjack/models/README.md +308 -0
- crackerjack/models/__init__.py +40 -0
- crackerjack/models/config.py +730 -0
- crackerjack/models/config_adapter.py +265 -0
- crackerjack/models/protocols.py +1535 -0
- crackerjack/models/pydantic_models.py +320 -0
- crackerjack/models/qa_config.py +145 -0
- crackerjack/models/qa_results.py +134 -0
- crackerjack/models/resource_protocols.py +299 -0
- crackerjack/models/results.py +35 -0
- crackerjack/models/semantic_models.py +258 -0
- crackerjack/models/task.py +173 -0
- crackerjack/models/test_models.py +60 -0
- crackerjack/monitoring/README.md +11 -0
- crackerjack/monitoring/__init__.py +0 -0
- crackerjack/monitoring/ai_agent_watchdog.py +405 -0
- crackerjack/monitoring/metrics_collector.py +427 -0
- crackerjack/monitoring/regression_prevention.py +580 -0
- crackerjack/monitoring/websocket_server.py +406 -0
- crackerjack/orchestration/README.md +340 -0
- crackerjack/orchestration/__init__.py +43 -0
- crackerjack/orchestration/advanced_orchestrator.py +894 -0
- crackerjack/orchestration/cache/README.md +312 -0
- crackerjack/orchestration/cache/__init__.py +37 -0
- crackerjack/orchestration/cache/memory_cache.py +338 -0
- crackerjack/orchestration/cache/tool_proxy_cache.py +340 -0
- crackerjack/orchestration/config.py +297 -0
- crackerjack/orchestration/coverage_improvement.py +180 -0
- crackerjack/orchestration/execution_strategies.py +361 -0
- crackerjack/orchestration/hook_orchestrator.py +1398 -0
- crackerjack/orchestration/strategies/README.md +401 -0
- crackerjack/orchestration/strategies/__init__.py +39 -0
- crackerjack/orchestration/strategies/adaptive_strategy.py +630 -0
- crackerjack/orchestration/strategies/parallel_strategy.py +237 -0
- crackerjack/orchestration/strategies/sequential_strategy.py +299 -0
- crackerjack/orchestration/test_progress_streamer.py +647 -0
- crackerjack/plugins/README.md +11 -0
- crackerjack/plugins/__init__.py +15 -0
- crackerjack/plugins/base.py +200 -0
- crackerjack/plugins/hooks.py +254 -0
- crackerjack/plugins/loader.py +335 -0
- crackerjack/plugins/managers.py +264 -0
- crackerjack/py313.py +191 -0
- crackerjack/security/README.md +11 -0
- crackerjack/security/__init__.py +0 -0
- crackerjack/security/audit.py +197 -0
- crackerjack/services/README.md +374 -0
- crackerjack/services/__init__.py +9 -0
- crackerjack/services/ai/README.md +295 -0
- crackerjack/services/ai/__init__.py +7 -0
- crackerjack/services/ai/advanced_optimizer.py +878 -0
- crackerjack/services/ai/contextual_ai_assistant.py +542 -0
- crackerjack/services/ai/embeddings.py +444 -0
- crackerjack/services/ai/intelligent_commit.py +328 -0
- crackerjack/services/ai/predictive_analytics.py +510 -0
- crackerjack/services/anomaly_detector.py +392 -0
- crackerjack/services/api_extractor.py +617 -0
- crackerjack/services/backup_service.py +467 -0
- crackerjack/services/bounded_status_operations.py +530 -0
- crackerjack/services/cache.py +369 -0
- crackerjack/services/changelog_automation.py +399 -0
- crackerjack/services/command_execution_service.py +305 -0
- crackerjack/services/config_integrity.py +132 -0
- crackerjack/services/config_merge.py +546 -0
- crackerjack/services/config_service.py +198 -0
- crackerjack/services/config_template.py +493 -0
- crackerjack/services/coverage_badge_service.py +173 -0
- crackerjack/services/coverage_ratchet.py +381 -0
- crackerjack/services/debug.py +733 -0
- crackerjack/services/dependency_analyzer.py +460 -0
- crackerjack/services/dependency_monitor.py +622 -0
- crackerjack/services/documentation_generator.py +493 -0
- crackerjack/services/documentation_service.py +704 -0
- crackerjack/services/enhanced_filesystem.py +497 -0
- crackerjack/services/enterprise_optimizer.py +865 -0
- crackerjack/services/error_pattern_analyzer.py +676 -0
- crackerjack/services/file_filter.py +221 -0
- crackerjack/services/file_hasher.py +149 -0
- crackerjack/services/file_io_service.py +361 -0
- crackerjack/services/file_modifier.py +615 -0
- crackerjack/services/filesystem.py +381 -0
- crackerjack/services/git.py +422 -0
- crackerjack/services/health_metrics.py +615 -0
- crackerjack/services/heatmap_generator.py +744 -0
- crackerjack/services/incremental_executor.py +380 -0
- crackerjack/services/initialization.py +823 -0
- crackerjack/services/input_validator.py +668 -0
- crackerjack/services/intelligent_commit.py +327 -0
- crackerjack/services/log_manager.py +289 -0
- crackerjack/services/logging.py +228 -0
- crackerjack/services/lsp_client.py +628 -0
- crackerjack/services/memory_optimizer.py +414 -0
- crackerjack/services/metrics.py +587 -0
- crackerjack/services/monitoring/README.md +30 -0
- crackerjack/services/monitoring/__init__.py +9 -0
- crackerjack/services/monitoring/dependency_monitor.py +678 -0
- crackerjack/services/monitoring/error_pattern_analyzer.py +676 -0
- crackerjack/services/monitoring/health_metrics.py +716 -0
- crackerjack/services/monitoring/metrics.py +587 -0
- crackerjack/services/monitoring/performance_benchmarks.py +410 -0
- crackerjack/services/monitoring/performance_cache.py +388 -0
- crackerjack/services/monitoring/performance_monitor.py +569 -0
- crackerjack/services/parallel_executor.py +527 -0
- crackerjack/services/pattern_cache.py +333 -0
- crackerjack/services/pattern_detector.py +478 -0
- crackerjack/services/patterns/__init__.py +142 -0
- crackerjack/services/patterns/agents.py +107 -0
- crackerjack/services/patterns/code/__init__.py +15 -0
- crackerjack/services/patterns/code/detection.py +118 -0
- crackerjack/services/patterns/code/imports.py +107 -0
- crackerjack/services/patterns/code/paths.py +159 -0
- crackerjack/services/patterns/code/performance.py +119 -0
- crackerjack/services/patterns/code/replacement.py +36 -0
- crackerjack/services/patterns/core.py +212 -0
- crackerjack/services/patterns/documentation/__init__.py +14 -0
- crackerjack/services/patterns/documentation/badges_markdown.py +96 -0
- crackerjack/services/patterns/documentation/comments_blocks.py +83 -0
- crackerjack/services/patterns/documentation/docstrings.py +89 -0
- crackerjack/services/patterns/formatting.py +226 -0
- crackerjack/services/patterns/operations.py +339 -0
- crackerjack/services/patterns/security/__init__.py +23 -0
- crackerjack/services/patterns/security/code_injection.py +122 -0
- crackerjack/services/patterns/security/credentials.py +190 -0
- crackerjack/services/patterns/security/path_traversal.py +221 -0
- crackerjack/services/patterns/security/unsafe_operations.py +216 -0
- crackerjack/services/patterns/templates.py +62 -0
- crackerjack/services/patterns/testing/__init__.py +18 -0
- crackerjack/services/patterns/testing/error_patterns.py +107 -0
- crackerjack/services/patterns/testing/pytest_output.py +126 -0
- crackerjack/services/patterns/tool_output/__init__.py +16 -0
- crackerjack/services/patterns/tool_output/bandit.py +72 -0
- crackerjack/services/patterns/tool_output/other.py +97 -0
- crackerjack/services/patterns/tool_output/pyright.py +67 -0
- crackerjack/services/patterns/tool_output/ruff.py +44 -0
- crackerjack/services/patterns/url_sanitization.py +114 -0
- crackerjack/services/patterns/utilities.py +42 -0
- crackerjack/services/patterns/utils.py +339 -0
- crackerjack/services/patterns/validation.py +46 -0
- crackerjack/services/patterns/versioning.py +62 -0
- crackerjack/services/predictive_analytics.py +523 -0
- crackerjack/services/profiler.py +280 -0
- crackerjack/services/quality/README.md +415 -0
- crackerjack/services/quality/__init__.py +11 -0
- crackerjack/services/quality/anomaly_detector.py +392 -0
- crackerjack/services/quality/pattern_cache.py +333 -0
- crackerjack/services/quality/pattern_detector.py +479 -0
- crackerjack/services/quality/qa_orchestrator.py +491 -0
- crackerjack/services/quality/quality_baseline.py +395 -0
- crackerjack/services/quality/quality_baseline_enhanced.py +649 -0
- crackerjack/services/quality/quality_intelligence.py +949 -0
- crackerjack/services/regex_patterns.py +58 -0
- crackerjack/services/regex_utils.py +483 -0
- crackerjack/services/secure_path_utils.py +524 -0
- crackerjack/services/secure_status_formatter.py +450 -0
- crackerjack/services/secure_subprocess.py +635 -0
- crackerjack/services/security.py +239 -0
- crackerjack/services/security_logger.py +495 -0
- crackerjack/services/server_manager.py +411 -0
- crackerjack/services/smart_scheduling.py +167 -0
- crackerjack/services/status_authentication.py +460 -0
- crackerjack/services/status_security_manager.py +315 -0
- crackerjack/services/terminal_utils.py +0 -0
- crackerjack/services/thread_safe_status_collector.py +441 -0
- crackerjack/services/tool_filter.py +368 -0
- crackerjack/services/tool_version_service.py +43 -0
- crackerjack/services/unified_config.py +115 -0
- crackerjack/services/validation_rate_limiter.py +220 -0
- crackerjack/services/vector_store.py +689 -0
- crackerjack/services/version_analyzer.py +461 -0
- crackerjack/services/version_checker.py +223 -0
- crackerjack/services/websocket_resource_limiter.py +438 -0
- crackerjack/services/zuban_lsp_service.py +391 -0
- crackerjack/slash_commands/README.md +11 -0
- crackerjack/slash_commands/__init__.py +59 -0
- crackerjack/slash_commands/init.md +112 -0
- crackerjack/slash_commands/run.md +197 -0
- crackerjack/slash_commands/status.md +127 -0
- crackerjack/tools/README.md +11 -0
- crackerjack/tools/__init__.py +30 -0
- crackerjack/tools/_git_utils.py +105 -0
- crackerjack/tools/check_added_large_files.py +139 -0
- crackerjack/tools/check_ast.py +105 -0
- crackerjack/tools/check_json.py +103 -0
- crackerjack/tools/check_jsonschema.py +297 -0
- crackerjack/tools/check_toml.py +103 -0
- crackerjack/tools/check_yaml.py +110 -0
- crackerjack/tools/codespell_wrapper.py +72 -0
- crackerjack/tools/end_of_file_fixer.py +202 -0
- crackerjack/tools/format_json.py +128 -0
- crackerjack/tools/mdformat_wrapper.py +114 -0
- crackerjack/tools/trailing_whitespace.py +198 -0
- crackerjack/tools/validate_input_validator_patterns.py +236 -0
- crackerjack/tools/validate_regex_patterns.py +188 -0
- crackerjack/ui/README.md +11 -0
- crackerjack/ui/__init__.py +1 -0
- crackerjack/ui/dashboard_renderer.py +28 -0
- crackerjack/ui/templates/README.md +11 -0
- crackerjack/utils/console_utils.py +13 -0
- crackerjack/utils/dependency_guard.py +230 -0
- crackerjack/utils/retry_utils.py +275 -0
- crackerjack/workflows/README.md +590 -0
- crackerjack/workflows/__init__.py +46 -0
- crackerjack/workflows/actions.py +811 -0
- crackerjack/workflows/auto_fix.py +444 -0
- crackerjack/workflows/container_builder.py +499 -0
- crackerjack/workflows/definitions.py +443 -0
- crackerjack/workflows/engine.py +177 -0
- crackerjack/workflows/event_bridge.py +242 -0
- crackerjack-0.45.2.dist-info/METADATA +1678 -0
- crackerjack-0.45.2.dist-info/RECORD +478 -0
- {crackerjack-0.18.2.dist-info → crackerjack-0.45.2.dist-info}/WHEEL +1 -1
- crackerjack-0.45.2.dist-info/entry_points.txt +2 -0
- crackerjack/.gitignore +0 -14
- crackerjack/.libcst.codemod.yaml +0 -18
- crackerjack/.pdm.toml +0 -1
- crackerjack/.pre-commit-config.yaml +0 -91
- crackerjack/.pytest_cache/.gitignore +0 -2
- crackerjack/.pytest_cache/CACHEDIR.TAG +0 -4
- crackerjack/.pytest_cache/README.md +0 -8
- crackerjack/.pytest_cache/v/cache/nodeids +0 -1
- crackerjack/.pytest_cache/v/cache/stepwise +0 -1
- crackerjack/.ruff_cache/.gitignore +0 -1
- crackerjack/.ruff_cache/0.1.11/3256171999636029978 +0 -0
- crackerjack/.ruff_cache/0.1.14/602324811142551221 +0 -0
- crackerjack/.ruff_cache/0.1.4/10355199064880463147 +0 -0
- crackerjack/.ruff_cache/0.1.6/15140459877605758699 +0 -0
- crackerjack/.ruff_cache/0.1.7/1790508110482614856 +0 -0
- crackerjack/.ruff_cache/0.1.9/17041001205004563469 +0 -0
- crackerjack/.ruff_cache/0.11.2/4070660268492669020 +0 -0
- crackerjack/.ruff_cache/0.11.3/9818742842212983150 +0 -0
- crackerjack/.ruff_cache/0.11.4/9818742842212983150 +0 -0
- crackerjack/.ruff_cache/0.11.6/3557596832929915217 +0 -0
- crackerjack/.ruff_cache/0.11.7/10386934055395314831 +0 -0
- crackerjack/.ruff_cache/0.11.7/3557596832929915217 +0 -0
- crackerjack/.ruff_cache/0.11.8/530407680854991027 +0 -0
- crackerjack/.ruff_cache/0.2.0/10047773857155985907 +0 -0
- crackerjack/.ruff_cache/0.2.1/8522267973936635051 +0 -0
- crackerjack/.ruff_cache/0.2.2/18053836298936336950 +0 -0
- crackerjack/.ruff_cache/0.3.0/12548816621480535786 +0 -0
- crackerjack/.ruff_cache/0.3.3/11081883392474770722 +0 -0
- crackerjack/.ruff_cache/0.3.4/676973378459347183 +0 -0
- crackerjack/.ruff_cache/0.3.5/16311176246009842383 +0 -0
- crackerjack/.ruff_cache/0.5.7/1493622539551733492 +0 -0
- crackerjack/.ruff_cache/0.5.7/6231957614044513175 +0 -0
- crackerjack/.ruff_cache/0.5.7/9932762556785938009 +0 -0
- crackerjack/.ruff_cache/0.6.0/11982804814124138945 +0 -0
- crackerjack/.ruff_cache/0.6.0/12055761203849489982 +0 -0
- crackerjack/.ruff_cache/0.6.2/1206147804896221174 +0 -0
- crackerjack/.ruff_cache/0.6.4/1206147804896221174 +0 -0
- crackerjack/.ruff_cache/0.6.5/1206147804896221174 +0 -0
- crackerjack/.ruff_cache/0.6.7/3657366982708166874 +0 -0
- crackerjack/.ruff_cache/0.6.9/285614542852677309 +0 -0
- crackerjack/.ruff_cache/0.7.1/1024065805990144819 +0 -0
- crackerjack/.ruff_cache/0.7.1/285614542852677309 +0 -0
- crackerjack/.ruff_cache/0.7.3/16061516852537040135 +0 -0
- crackerjack/.ruff_cache/0.8.4/16354268377385700367 +0 -0
- crackerjack/.ruff_cache/0.9.10/12813592349865671909 +0 -0
- crackerjack/.ruff_cache/0.9.10/923908772239632759 +0 -0
- crackerjack/.ruff_cache/0.9.3/13948373885254993391 +0 -0
- crackerjack/.ruff_cache/0.9.9/12813592349865671909 +0 -0
- crackerjack/.ruff_cache/0.9.9/8843823720003377982 +0 -0
- crackerjack/.ruff_cache/CACHEDIR.TAG +0 -1
- crackerjack/crackerjack.py +0 -855
- crackerjack/pyproject.toml +0 -214
- crackerjack-0.18.2.dist-info/METADATA +0 -420
- crackerjack-0.18.2.dist-info/RECORD +0 -59
- crackerjack-0.18.2.dist-info/entry_points.txt +0 -4
- {crackerjack-0.18.2.dist-info → crackerjack-0.45.2.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,2243 @@
|
|
|
1
|
+
"""Workflow Orchestrator for ACB integration.
|
|
2
|
+
|
|
3
|
+
ACB-powered orchestration layer managing workflow lifecycle, dependency resolution,
|
|
4
|
+
and execution strategies. Supports dual execution modes for gradual migration.
|
|
5
|
+
|
|
6
|
+
ACB Patterns:
|
|
7
|
+
- MODULE_ID and MODULE_STATUS at module level
|
|
8
|
+
- depends.set() registration after class definition
|
|
9
|
+
- Structured logging with context fields
|
|
10
|
+
- Protocol-based interfaces
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from __future__ import annotations
|
|
14
|
+
|
|
15
|
+
import asyncio
|
|
16
|
+
import time
|
|
17
|
+
import typing as t
|
|
18
|
+
from contextlib import suppress
|
|
19
|
+
from importlib.metadata import version
|
|
20
|
+
from pathlib import Path
|
|
21
|
+
|
|
22
|
+
from acb.config import Config
|
|
23
|
+
from acb.console import Console
|
|
24
|
+
from acb.depends import Inject, depends
|
|
25
|
+
from acb.events import Event, EventHandlerResult
|
|
26
|
+
|
|
27
|
+
from crackerjack.agents.base import AgentContext, Issue, IssueType, Priority
|
|
28
|
+
from crackerjack.agents.enhanced_coordinator import EnhancedAgentCoordinator
|
|
29
|
+
from crackerjack.events import WorkflowEvent, WorkflowEventBus
|
|
30
|
+
from crackerjack.models.protocols import (
|
|
31
|
+
DebugServiceProtocol,
|
|
32
|
+
LoggerProtocol,
|
|
33
|
+
MemoryOptimizerProtocol,
|
|
34
|
+
OptionsProtocol,
|
|
35
|
+
PerformanceBenchmarkProtocol,
|
|
36
|
+
PerformanceCacheProtocol,
|
|
37
|
+
PerformanceMonitorProtocol,
|
|
38
|
+
QualityIntelligenceProtocol,
|
|
39
|
+
)
|
|
40
|
+
from crackerjack.services.logging import LoggingContext
|
|
41
|
+
from crackerjack.services.memory_optimizer import memory_optimized
|
|
42
|
+
|
|
43
|
+
from .phase_coordinator import PhaseCoordinator
|
|
44
|
+
from .session_coordinator import SessionController, SessionCoordinator
|
|
45
|
+
from .workflow import WorkflowPhaseExecutor
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class WorkflowPipeline:
|
|
49
|
+
@depends.inject
|
|
50
|
+
def __init__(
|
|
51
|
+
self,
|
|
52
|
+
console: Inject[Console],
|
|
53
|
+
config: Inject[Config],
|
|
54
|
+
performance_monitor: Inject[PerformanceMonitorProtocol],
|
|
55
|
+
memory_optimizer: Inject[MemoryOptimizerProtocol],
|
|
56
|
+
performance_cache: Inject[PerformanceCacheProtocol],
|
|
57
|
+
debugger: Inject[DebugServiceProtocol],
|
|
58
|
+
logger: Inject[LoggerProtocol],
|
|
59
|
+
session: Inject[SessionCoordinator],
|
|
60
|
+
phases: Inject[PhaseCoordinator],
|
|
61
|
+
phase_executor: Inject[WorkflowPhaseExecutor],
|
|
62
|
+
quality_intelligence: Inject[QualityIntelligenceProtocol] | None = None,
|
|
63
|
+
performance_benchmarks: Inject[PerformanceBenchmarkProtocol] | None = None,
|
|
64
|
+
) -> None:
|
|
65
|
+
self.console = console
|
|
66
|
+
self.config = config
|
|
67
|
+
self.pkg_path = config.root_path
|
|
68
|
+
self.session = session
|
|
69
|
+
self.phases = phases
|
|
70
|
+
self._mcp_state_manager: t.Any = None
|
|
71
|
+
self._last_security_audit: t.Any = None
|
|
72
|
+
|
|
73
|
+
# Services injected via ACB DI
|
|
74
|
+
self._debugger = debugger
|
|
75
|
+
self._performance_monitor = performance_monitor
|
|
76
|
+
self._memory_optimizer = memory_optimizer
|
|
77
|
+
self._cache = performance_cache
|
|
78
|
+
self._quality_intelligence = quality_intelligence
|
|
79
|
+
self._performance_benchmarks = performance_benchmarks
|
|
80
|
+
self.logger = logger
|
|
81
|
+
|
|
82
|
+
# Event bus with graceful fallback
|
|
83
|
+
try:
|
|
84
|
+
self._event_bus: WorkflowEventBus | None = depends.get_sync(
|
|
85
|
+
WorkflowEventBus
|
|
86
|
+
)
|
|
87
|
+
except Exception as e:
|
|
88
|
+
print(f"WARNING: WorkflowEventBus not available: {type(e).__name__}: {e}")
|
|
89
|
+
self._event_bus = None
|
|
90
|
+
|
|
91
|
+
# Phase executor for workflow execution
|
|
92
|
+
self._phase_executor = phase_executor
|
|
93
|
+
self._phase_executor.configure(session, phases, self._event_bus)
|
|
94
|
+
self._phase_executor._mcp_state_manager = self._mcp_state_manager
|
|
95
|
+
|
|
96
|
+
self._session_controller = SessionController(self)
|
|
97
|
+
|
|
98
|
+
@property
|
|
99
|
+
def debugger(self) -> DebugServiceProtocol:
|
|
100
|
+
"""Get debug service (already injected via DI)."""
|
|
101
|
+
return self._debugger
|
|
102
|
+
|
|
103
|
+
def _should_debug(self) -> bool:
|
|
104
|
+
import os
|
|
105
|
+
|
|
106
|
+
return os.environ.get("AI_AGENT_DEBUG", "0") == "1"
|
|
107
|
+
|
|
108
|
+
@memory_optimized
|
|
109
|
+
async def run_complete_workflow(self, options: OptionsProtocol) -> bool:
|
|
110
|
+
workflow_id = f"workflow_{int(time.time())}"
|
|
111
|
+
event_context = self._workflow_context(workflow_id, options)
|
|
112
|
+
start_time = time.time()
|
|
113
|
+
|
|
114
|
+
self._performance_monitor.start_workflow(workflow_id)
|
|
115
|
+
await self._cache.start()
|
|
116
|
+
await self._publish_event(WorkflowEvent.WORKFLOW_STARTED, event_context)
|
|
117
|
+
|
|
118
|
+
success = False
|
|
119
|
+
try:
|
|
120
|
+
with LoggingContext(
|
|
121
|
+
"workflow_execution",
|
|
122
|
+
testing=getattr(options, "test", False),
|
|
123
|
+
skip_hooks=getattr(options, "skip_hooks", False),
|
|
124
|
+
):
|
|
125
|
+
success = await self._execute_workflow(
|
|
126
|
+
options, workflow_id, event_context, start_time
|
|
127
|
+
)
|
|
128
|
+
return success
|
|
129
|
+
except KeyboardInterrupt:
|
|
130
|
+
return await self._handle_keyboard_interrupt(workflow_id, event_context)
|
|
131
|
+
except Exception as e:
|
|
132
|
+
return await self._handle_general_exception(e, workflow_id, event_context)
|
|
133
|
+
finally:
|
|
134
|
+
await self._cleanup_workflow_resources()
|
|
135
|
+
|
|
136
|
+
async def _execute_workflow(
|
|
137
|
+
self,
|
|
138
|
+
options: OptionsProtocol,
|
|
139
|
+
workflow_id: str,
|
|
140
|
+
event_context: dict[str, t.Any],
|
|
141
|
+
start_time: float,
|
|
142
|
+
) -> bool:
|
|
143
|
+
"""Execute the workflow either event-driven or sequentially."""
|
|
144
|
+
if self._event_bus:
|
|
145
|
+
return await self._run_event_driven_workflow(
|
|
146
|
+
options, workflow_id, event_context, start_time
|
|
147
|
+
)
|
|
148
|
+
return await self._run_sequential_workflow(
|
|
149
|
+
options, workflow_id, event_context, start_time
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
async def _run_sequential_workflow(
|
|
153
|
+
self,
|
|
154
|
+
options: OptionsProtocol,
|
|
155
|
+
workflow_id: str,
|
|
156
|
+
event_context: dict[str, t.Any],
|
|
157
|
+
start_time: float,
|
|
158
|
+
) -> bool:
|
|
159
|
+
"""Execute the workflow sequentially."""
|
|
160
|
+
await self._publish_event(
|
|
161
|
+
WorkflowEvent.WORKFLOW_SESSION_INITIALIZING,
|
|
162
|
+
event_context,
|
|
163
|
+
)
|
|
164
|
+
self._session_controller.initialize(options)
|
|
165
|
+
await self._publish_event(
|
|
166
|
+
WorkflowEvent.WORKFLOW_SESSION_READY,
|
|
167
|
+
event_context,
|
|
168
|
+
)
|
|
169
|
+
success = await self._execute_workflow_with_timing(
|
|
170
|
+
options, start_time, workflow_id
|
|
171
|
+
)
|
|
172
|
+
final_event = (
|
|
173
|
+
WorkflowEvent.WORKFLOW_COMPLETED
|
|
174
|
+
if success
|
|
175
|
+
else WorkflowEvent.WORKFLOW_FAILED
|
|
176
|
+
)
|
|
177
|
+
await self._publish_event(
|
|
178
|
+
final_event,
|
|
179
|
+
event_context | {"success": success},
|
|
180
|
+
)
|
|
181
|
+
self._performance_monitor.end_workflow(workflow_id, success)
|
|
182
|
+
return success
|
|
183
|
+
|
|
184
|
+
async def _handle_keyboard_interrupt(
|
|
185
|
+
self, workflow_id: str, event_context: dict[str, t.Any]
|
|
186
|
+
) -> bool:
|
|
187
|
+
"""Handle keyboard interrupt during workflow execution."""
|
|
188
|
+
self._performance_monitor.end_workflow(workflow_id, False)
|
|
189
|
+
await self._publish_event(
|
|
190
|
+
WorkflowEvent.WORKFLOW_INTERRUPTED,
|
|
191
|
+
event_context,
|
|
192
|
+
)
|
|
193
|
+
return self._handle_user_interruption()
|
|
194
|
+
|
|
195
|
+
async def _handle_general_exception(
|
|
196
|
+
self, e: Exception, workflow_id: str, event_context: dict[str, t.Any]
|
|
197
|
+
) -> bool:
|
|
198
|
+
"""Handle general exceptions during workflow execution."""
|
|
199
|
+
self._performance_monitor.end_workflow(workflow_id, False)
|
|
200
|
+
await self._publish_event(
|
|
201
|
+
WorkflowEvent.WORKFLOW_FAILED,
|
|
202
|
+
event_context
|
|
203
|
+
| {
|
|
204
|
+
"error": str(e),
|
|
205
|
+
"error_type": type(e).__name__,
|
|
206
|
+
},
|
|
207
|
+
)
|
|
208
|
+
return self._handle_workflow_exception(e)
|
|
209
|
+
|
|
210
|
+
async def _cleanup_workflow_resources(self) -> None:
|
|
211
|
+
"""Clean up workflow resources in the finally block."""
|
|
212
|
+
self.session.cleanup_resources()
|
|
213
|
+
self._memory_optimizer.optimize_memory()
|
|
214
|
+
await self._cache.stop()
|
|
215
|
+
|
|
216
|
+
def _unsubscribe_all_subscriptions(self, subscriptions: list[str]) -> None:
|
|
217
|
+
"""Unsubscribe from all event subscriptions."""
|
|
218
|
+
for subscription_id in subscriptions.copy():
|
|
219
|
+
if self._event_bus:
|
|
220
|
+
self._event_bus.unsubscribe(subscription_id)
|
|
221
|
+
subscriptions.remove(subscription_id)
|
|
222
|
+
|
|
223
|
+
async def _finalize_workflow(
|
|
224
|
+
self,
|
|
225
|
+
start_time: float,
|
|
226
|
+
workflow_id: str,
|
|
227
|
+
success: bool,
|
|
228
|
+
completion_future: asyncio.Future[bool],
|
|
229
|
+
subscriptions: list[str],
|
|
230
|
+
payload: dict[str, t.Any] | None = None,
|
|
231
|
+
) -> EventHandlerResult:
|
|
232
|
+
"""Finalize the workflow execution."""
|
|
233
|
+
if completion_future.done():
|
|
234
|
+
return EventHandlerResult(success=success)
|
|
235
|
+
|
|
236
|
+
self.session.finalize_session(start_time, success)
|
|
237
|
+
duration = time.time() - start_time
|
|
238
|
+
self._log_workflow_completion(success, duration)
|
|
239
|
+
self._log_workflow_completion_debug(success, duration)
|
|
240
|
+
|
|
241
|
+
workflow_perf = self._performance_monitor.end_workflow(workflow_id, success)
|
|
242
|
+
self.logger.info(
|
|
243
|
+
f"Workflow performance: {workflow_perf.performance_score: .1f} score, "
|
|
244
|
+
f"{workflow_perf.total_duration_seconds: .2f}s duration"
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
await self._generate_performance_benchmark_report(
|
|
248
|
+
workflow_id, duration, success
|
|
249
|
+
)
|
|
250
|
+
|
|
251
|
+
self._unsubscribe_all_subscriptions(subscriptions)
|
|
252
|
+
completion_future.set_result(success)
|
|
253
|
+
|
|
254
|
+
return EventHandlerResult(success=success)
|
|
255
|
+
|
|
256
|
+
async def _publish_workflow_failure(
|
|
257
|
+
self,
|
|
258
|
+
event_context: dict[str, t.Any],
|
|
259
|
+
stage: str,
|
|
260
|
+
error: Exception | None = None,
|
|
261
|
+
) -> None:
|
|
262
|
+
"""Publish workflow failure event."""
|
|
263
|
+
payload: dict[str, t.Any] = event_context | {"stage": stage}
|
|
264
|
+
if error is not None:
|
|
265
|
+
payload["error"] = str(error)
|
|
266
|
+
payload["error_type"] = type(error).__name__
|
|
267
|
+
|
|
268
|
+
await self._publish_event(WorkflowEvent.WORKFLOW_FAILED, payload)
|
|
269
|
+
|
|
270
|
+
async def _handle_session_ready(
|
|
271
|
+
self,
|
|
272
|
+
event: Event,
|
|
273
|
+
state_flags: dict[str, bool],
|
|
274
|
+
workflow_id: str,
|
|
275
|
+
options: OptionsProtocol,
|
|
276
|
+
) -> EventHandlerResult:
|
|
277
|
+
"""Handle session ready event."""
|
|
278
|
+
if state_flags["configuration"]:
|
|
279
|
+
return EventHandlerResult(success=True)
|
|
280
|
+
state_flags["configuration"] = True
|
|
281
|
+
|
|
282
|
+
try:
|
|
283
|
+
await self._publish_event(
|
|
284
|
+
WorkflowEvent.CONFIG_PHASE_STARTED,
|
|
285
|
+
{"workflow_id": workflow_id},
|
|
286
|
+
)
|
|
287
|
+
config_success = await asyncio.to_thread(
|
|
288
|
+
self.phases.run_configuration_phase,
|
|
289
|
+
options,
|
|
290
|
+
)
|
|
291
|
+
await self._publish_event(
|
|
292
|
+
WorkflowEvent.CONFIG_PHASE_COMPLETED,
|
|
293
|
+
{
|
|
294
|
+
"workflow_id": workflow_id,
|
|
295
|
+
"success": config_success,
|
|
296
|
+
},
|
|
297
|
+
)
|
|
298
|
+
if not config_success:
|
|
299
|
+
await self._publish_workflow_failure(
|
|
300
|
+
{"workflow_id": workflow_id}, "configuration"
|
|
301
|
+
)
|
|
302
|
+
return EventHandlerResult(success=config_success)
|
|
303
|
+
except Exception as exc: # pragma: no cover - defensive
|
|
304
|
+
await self._publish_workflow_failure(
|
|
305
|
+
{"workflow_id": workflow_id}, "configuration", exc
|
|
306
|
+
)
|
|
307
|
+
return EventHandlerResult(success=False, error_message=str(exc))
|
|
308
|
+
|
|
309
|
+
async def _handle_config_completed(
|
|
310
|
+
self,
|
|
311
|
+
event: Event,
|
|
312
|
+
state_flags: dict[str, bool],
|
|
313
|
+
workflow_id: str,
|
|
314
|
+
options: OptionsProtocol,
|
|
315
|
+
) -> EventHandlerResult:
|
|
316
|
+
"""Handle configuration completed event."""
|
|
317
|
+
if not event.payload.get("success", False):
|
|
318
|
+
return EventHandlerResult(success=False)
|
|
319
|
+
if state_flags["quality"]:
|
|
320
|
+
return EventHandlerResult(success=True)
|
|
321
|
+
state_flags["quality"] = True
|
|
322
|
+
|
|
323
|
+
try:
|
|
324
|
+
await self._publish_event(
|
|
325
|
+
WorkflowEvent.QUALITY_PHASE_STARTED,
|
|
326
|
+
{"workflow_id": workflow_id},
|
|
327
|
+
)
|
|
328
|
+
quality_success = await self._execute_quality_phase(options, workflow_id)
|
|
329
|
+
await self._publish_event(
|
|
330
|
+
WorkflowEvent.QUALITY_PHASE_COMPLETED,
|
|
331
|
+
{
|
|
332
|
+
"workflow_id": workflow_id,
|
|
333
|
+
"success": quality_success,
|
|
334
|
+
},
|
|
335
|
+
)
|
|
336
|
+
if not quality_success:
|
|
337
|
+
await self._publish_workflow_failure(
|
|
338
|
+
{"workflow_id": workflow_id}, "quality"
|
|
339
|
+
)
|
|
340
|
+
return EventHandlerResult(success=quality_success)
|
|
341
|
+
except Exception as exc: # pragma: no cover - defensive
|
|
342
|
+
await self._publish_workflow_failure(
|
|
343
|
+
{"workflow_id": workflow_id}, "quality", exc
|
|
344
|
+
)
|
|
345
|
+
return EventHandlerResult(success=False, error_message=str(exc))
|
|
346
|
+
|
|
347
|
+
async def _handle_quality_completed(
|
|
348
|
+
self,
|
|
349
|
+
event: Event,
|
|
350
|
+
state_flags: dict[str, bool],
|
|
351
|
+
workflow_id: str,
|
|
352
|
+
options: OptionsProtocol,
|
|
353
|
+
publish_requested: bool,
|
|
354
|
+
) -> EventHandlerResult:
|
|
355
|
+
"""Handle quality phase completed event."""
|
|
356
|
+
if not event.payload.get("success", False):
|
|
357
|
+
return EventHandlerResult(success=False)
|
|
358
|
+
if state_flags["publishing"]:
|
|
359
|
+
return EventHandlerResult(success=True)
|
|
360
|
+
state_flags["publishing"] = True
|
|
361
|
+
|
|
362
|
+
try:
|
|
363
|
+
if publish_requested:
|
|
364
|
+
await self._publish_event(
|
|
365
|
+
WorkflowEvent.PUBLISH_PHASE_STARTED,
|
|
366
|
+
{"workflow_id": workflow_id},
|
|
367
|
+
)
|
|
368
|
+
publishing_success = await self._execute_publishing_workflow(
|
|
369
|
+
options, workflow_id
|
|
370
|
+
)
|
|
371
|
+
await self._publish_event(
|
|
372
|
+
WorkflowEvent.PUBLISH_PHASE_COMPLETED,
|
|
373
|
+
{
|
|
374
|
+
"workflow_id": workflow_id,
|
|
375
|
+
"success": publishing_success,
|
|
376
|
+
},
|
|
377
|
+
)
|
|
378
|
+
if not publishing_success:
|
|
379
|
+
await self._publish_workflow_failure(
|
|
380
|
+
{"workflow_id": workflow_id}, "publishing"
|
|
381
|
+
)
|
|
382
|
+
return EventHandlerResult(success=False)
|
|
383
|
+
else:
|
|
384
|
+
await self._publish_event(
|
|
385
|
+
WorkflowEvent.PUBLISH_PHASE_COMPLETED,
|
|
386
|
+
{
|
|
387
|
+
"workflow_id": workflow_id,
|
|
388
|
+
"success": True,
|
|
389
|
+
"skipped": True,
|
|
390
|
+
},
|
|
391
|
+
)
|
|
392
|
+
return EventHandlerResult(success=True)
|
|
393
|
+
except Exception as exc: # pragma: no cover - defensive
|
|
394
|
+
await self._publish_workflow_failure(
|
|
395
|
+
{"workflow_id": workflow_id}, "publishing", exc
|
|
396
|
+
)
|
|
397
|
+
return EventHandlerResult(success=False, error_message=str(exc))
|
|
398
|
+
|
|
399
|
+
async def _handle_publish_completed(
|
|
400
|
+
self,
|
|
401
|
+
event: Event,
|
|
402
|
+
state_flags: dict[str, bool],
|
|
403
|
+
workflow_id: str,
|
|
404
|
+
options: OptionsProtocol,
|
|
405
|
+
commit_requested: bool,
|
|
406
|
+
publish_requested: bool,
|
|
407
|
+
event_context: dict[str, t.Any],
|
|
408
|
+
) -> EventHandlerResult:
|
|
409
|
+
"""Handle publishing completed event."""
|
|
410
|
+
if publish_requested and not event.payload.get("success", False):
|
|
411
|
+
return EventHandlerResult(success=False)
|
|
412
|
+
if state_flags["commit"]:
|
|
413
|
+
return EventHandlerResult(success=True)
|
|
414
|
+
state_flags["commit"] = True
|
|
415
|
+
|
|
416
|
+
try:
|
|
417
|
+
if commit_requested:
|
|
418
|
+
await self._publish_event(
|
|
419
|
+
WorkflowEvent.COMMIT_PHASE_STARTED,
|
|
420
|
+
{"workflow_id": workflow_id},
|
|
421
|
+
)
|
|
422
|
+
commit_success = await self._execute_commit_workflow(
|
|
423
|
+
options, workflow_id
|
|
424
|
+
)
|
|
425
|
+
await self._publish_event(
|
|
426
|
+
WorkflowEvent.COMMIT_PHASE_COMPLETED,
|
|
427
|
+
{
|
|
428
|
+
"workflow_id": workflow_id,
|
|
429
|
+
"success": commit_success,
|
|
430
|
+
},
|
|
431
|
+
)
|
|
432
|
+
if not commit_success:
|
|
433
|
+
await self._publish_workflow_failure(
|
|
434
|
+
{"workflow_id": workflow_id}, "commit"
|
|
435
|
+
)
|
|
436
|
+
return EventHandlerResult(success=False)
|
|
437
|
+
else:
|
|
438
|
+
await self._publish_event(
|
|
439
|
+
WorkflowEvent.COMMIT_PHASE_COMPLETED,
|
|
440
|
+
{
|
|
441
|
+
"workflow_id": workflow_id,
|
|
442
|
+
"success": True,
|
|
443
|
+
"skipped": True,
|
|
444
|
+
},
|
|
445
|
+
)
|
|
446
|
+
|
|
447
|
+
await self._publish_event(
|
|
448
|
+
WorkflowEvent.WORKFLOW_COMPLETED,
|
|
449
|
+
event_context | {"success": True},
|
|
450
|
+
)
|
|
451
|
+
return EventHandlerResult(success=True)
|
|
452
|
+
except Exception as exc: # pragma: no cover - defensive
|
|
453
|
+
await self._publish_workflow_failure(
|
|
454
|
+
{"workflow_id": workflow_id}, "commit", exc
|
|
455
|
+
)
|
|
456
|
+
return EventHandlerResult(success=False, error_message=str(exc))
|
|
457
|
+
|
|
458
|
+
async def _handle_workflow_completed(
|
|
459
|
+
self,
|
|
460
|
+
event: Event,
|
|
461
|
+
start_time: float,
|
|
462
|
+
workflow_id: str,
|
|
463
|
+
completion_future: asyncio.Future[bool],
|
|
464
|
+
subscriptions: list[str],
|
|
465
|
+
) -> EventHandlerResult:
|
|
466
|
+
"""Handle workflow completed event."""
|
|
467
|
+
return await self._finalize_workflow(
|
|
468
|
+
start_time,
|
|
469
|
+
workflow_id,
|
|
470
|
+
True,
|
|
471
|
+
completion_future,
|
|
472
|
+
subscriptions,
|
|
473
|
+
event.payload,
|
|
474
|
+
)
|
|
475
|
+
|
|
476
|
+
async def _handle_workflow_failed(
|
|
477
|
+
self,
|
|
478
|
+
event: Event,
|
|
479
|
+
start_time: float,
|
|
480
|
+
workflow_id: str,
|
|
481
|
+
completion_future: asyncio.Future[bool],
|
|
482
|
+
subscriptions: list[str],
|
|
483
|
+
) -> EventHandlerResult:
|
|
484
|
+
"""Handle workflow failed event."""
|
|
485
|
+
return await self._finalize_workflow(
|
|
486
|
+
start_time,
|
|
487
|
+
workflow_id,
|
|
488
|
+
False,
|
|
489
|
+
completion_future,
|
|
490
|
+
subscriptions,
|
|
491
|
+
event.payload,
|
|
492
|
+
)
|
|
493
|
+
|
|
494
|
+
async def _run_event_driven_workflow(
|
|
495
|
+
self,
|
|
496
|
+
options: OptionsProtocol,
|
|
497
|
+
workflow_id: str,
|
|
498
|
+
event_context: dict[str, t.Any],
|
|
499
|
+
start_time: float,
|
|
500
|
+
) -> bool:
|
|
501
|
+
if not self._event_bus:
|
|
502
|
+
raise RuntimeError("Workflow event bus is not configured.")
|
|
503
|
+
|
|
504
|
+
loop = asyncio.get_running_loop()
|
|
505
|
+
completion_future: asyncio.Future[bool] = loop.create_future()
|
|
506
|
+
subscriptions: list[str] = []
|
|
507
|
+
|
|
508
|
+
publish_requested = bool(
|
|
509
|
+
getattr(options, "publish", False) or getattr(options, "all", False)
|
|
510
|
+
)
|
|
511
|
+
commit_requested = bool(getattr(options, "commit", False))
|
|
512
|
+
|
|
513
|
+
state_flags = {
|
|
514
|
+
"configuration": False,
|
|
515
|
+
"quality": False,
|
|
516
|
+
"publishing": False,
|
|
517
|
+
"commit": False,
|
|
518
|
+
}
|
|
519
|
+
|
|
520
|
+
# Subscribe to events
|
|
521
|
+
async def on_session_ready(event: Event) -> EventHandlerResult:
|
|
522
|
+
return await self._handle_session_ready(
|
|
523
|
+
event, state_flags, workflow_id, options
|
|
524
|
+
)
|
|
525
|
+
|
|
526
|
+
async def on_config_completed(event: Event) -> EventHandlerResult:
|
|
527
|
+
return await self._handle_config_completed(
|
|
528
|
+
event, state_flags, workflow_id, options
|
|
529
|
+
)
|
|
530
|
+
|
|
531
|
+
async def on_quality_completed(event: Event) -> EventHandlerResult:
|
|
532
|
+
return await self._handle_quality_completed(
|
|
533
|
+
event, state_flags, workflow_id, options, publish_requested
|
|
534
|
+
)
|
|
535
|
+
|
|
536
|
+
async def on_publish_completed(event: Event) -> EventHandlerResult:
|
|
537
|
+
return await self._handle_publish_completed(
|
|
538
|
+
event,
|
|
539
|
+
state_flags,
|
|
540
|
+
workflow_id,
|
|
541
|
+
options,
|
|
542
|
+
commit_requested,
|
|
543
|
+
publish_requested,
|
|
544
|
+
event_context,
|
|
545
|
+
)
|
|
546
|
+
|
|
547
|
+
async def on_workflow_completed(event: Event) -> EventHandlerResult:
|
|
548
|
+
return await self._handle_workflow_completed(
|
|
549
|
+
event, start_time, workflow_id, completion_future, subscriptions
|
|
550
|
+
)
|
|
551
|
+
|
|
552
|
+
async def on_workflow_failed(event: Event) -> EventHandlerResult:
|
|
553
|
+
return await self._handle_workflow_failed(
|
|
554
|
+
event, start_time, workflow_id, completion_future, subscriptions
|
|
555
|
+
)
|
|
556
|
+
|
|
557
|
+
subscriptions.extend(
|
|
558
|
+
(
|
|
559
|
+
self._event_bus.subscribe(
|
|
560
|
+
WorkflowEvent.WORKFLOW_SESSION_READY,
|
|
561
|
+
on_session_ready,
|
|
562
|
+
),
|
|
563
|
+
self._event_bus.subscribe(
|
|
564
|
+
WorkflowEvent.CONFIG_PHASE_COMPLETED,
|
|
565
|
+
on_config_completed,
|
|
566
|
+
),
|
|
567
|
+
self._event_bus.subscribe(
|
|
568
|
+
WorkflowEvent.QUALITY_PHASE_COMPLETED,
|
|
569
|
+
on_quality_completed,
|
|
570
|
+
),
|
|
571
|
+
self._event_bus.subscribe(
|
|
572
|
+
WorkflowEvent.PUBLISH_PHASE_COMPLETED,
|
|
573
|
+
on_publish_completed,
|
|
574
|
+
),
|
|
575
|
+
self._event_bus.subscribe(
|
|
576
|
+
WorkflowEvent.WORKFLOW_COMPLETED,
|
|
577
|
+
on_workflow_completed,
|
|
578
|
+
),
|
|
579
|
+
self._event_bus.subscribe(
|
|
580
|
+
WorkflowEvent.WORKFLOW_FAILED,
|
|
581
|
+
on_workflow_failed,
|
|
582
|
+
),
|
|
583
|
+
)
|
|
584
|
+
)
|
|
585
|
+
|
|
586
|
+
try:
|
|
587
|
+
await self._publish_event(
|
|
588
|
+
WorkflowEvent.WORKFLOW_SESSION_INITIALIZING,
|
|
589
|
+
event_context,
|
|
590
|
+
)
|
|
591
|
+
self._session_controller.initialize(options)
|
|
592
|
+
await self._publish_event(
|
|
593
|
+
WorkflowEvent.WORKFLOW_SESSION_READY,
|
|
594
|
+
event_context,
|
|
595
|
+
)
|
|
596
|
+
except Exception as exc: # pragma: no cover - defensive
|
|
597
|
+
await self._publish_workflow_failure(
|
|
598
|
+
event_context, "session_initialization", exc
|
|
599
|
+
)
|
|
600
|
+
await self._finalize_workflow(
|
|
601
|
+
start_time, workflow_id, False, completion_future, subscriptions
|
|
602
|
+
)
|
|
603
|
+
return False
|
|
604
|
+
|
|
605
|
+
return await completion_future
|
|
606
|
+
|
|
607
|
+
def _log_workflow_startup_debug(self, options: OptionsProtocol) -> None:
|
|
608
|
+
if not self._should_debug():
|
|
609
|
+
return
|
|
610
|
+
|
|
611
|
+
self.debugger.log_workflow_phase(
|
|
612
|
+
"workflow_execution",
|
|
613
|
+
"started",
|
|
614
|
+
details={
|
|
615
|
+
"testing": getattr(options, "test", False),
|
|
616
|
+
"skip_hooks": getattr(options, "skip_hooks", False),
|
|
617
|
+
"ai_agent": getattr(options, "ai_agent", False),
|
|
618
|
+
},
|
|
619
|
+
)
|
|
620
|
+
|
|
621
|
+
def _log_zuban_lsp_status(self) -> None:
|
|
622
|
+
"""Display current Zuban LSP server status during workflow startup."""
|
|
623
|
+
from crackerjack.services.server_manager import find_zuban_lsp_processes
|
|
624
|
+
|
|
625
|
+
try:
|
|
626
|
+
lsp_processes = find_zuban_lsp_processes()
|
|
627
|
+
|
|
628
|
+
if lsp_processes:
|
|
629
|
+
proc = lsp_processes[0] # Show first running process
|
|
630
|
+
self.logger.info(
|
|
631
|
+
f"🔍 Zuban LSP server running (PID: {proc['pid']}, "
|
|
632
|
+
f"CPU: {proc['cpu']}%, Memory: {proc['mem']}%)"
|
|
633
|
+
)
|
|
634
|
+
else:
|
|
635
|
+
self.logger.info("🔍 Zuban LSP server not running")
|
|
636
|
+
|
|
637
|
+
except Exception as e:
|
|
638
|
+
self.logger.debug(f"Failed to check Zuban LSP status: {e}")
|
|
639
|
+
|
|
640
|
+
def _log_workflow_startup_info(self, options: OptionsProtocol) -> None:
|
|
641
|
+
self.logger.info(
|
|
642
|
+
"Starting complete workflow execution",
|
|
643
|
+
testing=getattr(options, "test", False),
|
|
644
|
+
skip_hooks=getattr(options, "skip_hooks", False),
|
|
645
|
+
package_path=str(self.pkg_path),
|
|
646
|
+
)
|
|
647
|
+
|
|
648
|
+
# Display Zuban LSP server status
|
|
649
|
+
self._log_zuban_lsp_status()
|
|
650
|
+
|
|
651
|
+
async def _execute_workflow_with_timing(
|
|
652
|
+
self, options: OptionsProtocol, start_time: float, workflow_id: str
|
|
653
|
+
) -> bool:
|
|
654
|
+
success = await self._phase_executor._execute_workflow_phases(
|
|
655
|
+
options, workflow_id
|
|
656
|
+
)
|
|
657
|
+
self.session.finalize_session(start_time, success)
|
|
658
|
+
|
|
659
|
+
duration = time.time() - start_time
|
|
660
|
+
self._log_workflow_completion(success, duration)
|
|
661
|
+
self._log_workflow_completion_debug(success, duration)
|
|
662
|
+
await self._generate_performance_benchmark_report(
|
|
663
|
+
workflow_id, duration, success
|
|
664
|
+
)
|
|
665
|
+
|
|
666
|
+
return success
|
|
667
|
+
|
|
668
|
+
def _log_workflow_completion(self, success: bool, duration: float) -> None:
|
|
669
|
+
self.logger.info(
|
|
670
|
+
"Workflow execution completed",
|
|
671
|
+
success=success,
|
|
672
|
+
duration_seconds=round(duration, 2),
|
|
673
|
+
)
|
|
674
|
+
|
|
675
|
+
def _log_workflow_completion_debug(self, success: bool, duration: float) -> None:
|
|
676
|
+
if not self._should_debug():
|
|
677
|
+
return
|
|
678
|
+
|
|
679
|
+
self.debugger.set_workflow_success(success)
|
|
680
|
+
self.debugger.log_workflow_phase(
|
|
681
|
+
"workflow_execution",
|
|
682
|
+
"completed" if success else "failed",
|
|
683
|
+
duration=duration,
|
|
684
|
+
)
|
|
685
|
+
|
|
686
|
+
async def _generate_performance_benchmark_report(
|
|
687
|
+
self, workflow_id: str, duration: float, success: bool
|
|
688
|
+
) -> None:
|
|
689
|
+
"""Generate and display performance benchmark report for workflow execution."""
|
|
690
|
+
if not self._performance_benchmarks:
|
|
691
|
+
return
|
|
692
|
+
|
|
693
|
+
try:
|
|
694
|
+
self._gather_performance_metrics(workflow_id, duration, success)
|
|
695
|
+
benchmark_results = await self._performance_benchmarks.run_benchmark_suite()
|
|
696
|
+
self._display_benchmark_results(benchmark_results, duration)
|
|
697
|
+
|
|
698
|
+
except Exception as e:
|
|
699
|
+
self.console.print(
|
|
700
|
+
f"[dim]⚠️ Performance benchmark failed: {str(e)[:50]}...[/dim]"
|
|
701
|
+
)
|
|
702
|
+
|
|
703
|
+
if self.debugger.enabled:
|
|
704
|
+
self.debugger.print_debug_summary()
|
|
705
|
+
|
|
706
|
+
def _gather_performance_metrics(
|
|
707
|
+
self, workflow_id: str, duration: float, success: bool
|
|
708
|
+
) -> dict[str, t.Any]:
|
|
709
|
+
"""Gather performance metrics from workflow execution."""
|
|
710
|
+
return {
|
|
711
|
+
"workflow_id": workflow_id,
|
|
712
|
+
"total_duration": duration,
|
|
713
|
+
"success": success,
|
|
714
|
+
"cache_metrics": self._cache.get_stats() if self._cache else {},
|
|
715
|
+
"memory_metrics": self._memory_optimizer.get_stats()
|
|
716
|
+
if hasattr(self._memory_optimizer, "get_stats")
|
|
717
|
+
else {},
|
|
718
|
+
}
|
|
719
|
+
|
|
720
|
+
def _display_benchmark_results(
|
|
721
|
+
self, benchmark_results: t.Any, duration: float
|
|
722
|
+
) -> None:
|
|
723
|
+
"""Display compact performance summary."""
|
|
724
|
+
if not benchmark_results:
|
|
725
|
+
return
|
|
726
|
+
|
|
727
|
+
self.console.print("\n[cyan]📊 Performance Benchmark Summary[/cyan]")
|
|
728
|
+
self.console.print(f"Workflow Duration: [bold]{duration:.2f}s[/bold]")
|
|
729
|
+
|
|
730
|
+
self._show_performance_improvements(benchmark_results)
|
|
731
|
+
|
|
732
|
+
def _show_performance_improvements(self, benchmark_results: t.Any) -> None:
|
|
733
|
+
"""Show key performance improvements from benchmark results."""
|
|
734
|
+
for result in benchmark_results.results[:3]: # Top 3 results
|
|
735
|
+
self._display_time_improvement(result)
|
|
736
|
+
self._display_cache_efficiency(result)
|
|
737
|
+
|
|
738
|
+
def _display_time_improvement(self, result: t.Any) -> None:
|
|
739
|
+
"""Display time improvement percentage if available."""
|
|
740
|
+
if result.time_improvement_percentage > 0:
|
|
741
|
+
self.console.print(
|
|
742
|
+
f"[green]⚡[/green] {result.test_name}:"
|
|
743
|
+
f" {result.time_improvement_percentage:.1f}% faster"
|
|
744
|
+
)
|
|
745
|
+
|
|
746
|
+
def _display_cache_efficiency(self, result: t.Any) -> None:
|
|
747
|
+
"""Display cache hit ratio if available."""
|
|
748
|
+
if result.cache_hit_ratio > 0:
|
|
749
|
+
self.console.print(
|
|
750
|
+
f"[blue]🎯[/blue] Cache efficiency: {result.cache_hit_ratio:.0%}"
|
|
751
|
+
)
|
|
752
|
+
|
|
753
|
+
def _handle_user_interruption(self) -> bool:
|
|
754
|
+
self.console.print("Interrupted by user")
|
|
755
|
+
self.session.fail_task("workflow", "Interrupted by user")
|
|
756
|
+
self.logger.warning("Workflow interrupted by user")
|
|
757
|
+
return False
|
|
758
|
+
|
|
759
|
+
def _handle_workflow_exception(self, error: Exception) -> bool:
|
|
760
|
+
self.console.print(f"Error: {error}")
|
|
761
|
+
self.session.fail_task("workflow", f"Unexpected error: {error}")
|
|
762
|
+
self.logger.exception(
|
|
763
|
+
"Workflow execution failed",
|
|
764
|
+
error=str(error),
|
|
765
|
+
error_type=type(error).__name__,
|
|
766
|
+
)
|
|
767
|
+
return False
|
|
768
|
+
|
|
769
|
+
def _handle_quality_phase_result(
|
|
770
|
+
self, success: bool, quality_success: bool, options: OptionsProtocol
|
|
771
|
+
) -> bool:
|
|
772
|
+
"""Handle the result of the quality phase execution."""
|
|
773
|
+
if not quality_success:
|
|
774
|
+
if self._is_publishing_workflow(options):
|
|
775
|
+
# For publishing workflows, quality failures should stop execution
|
|
776
|
+
return False
|
|
777
|
+
# For non-publishing workflows, we continue but mark as failed
|
|
778
|
+
return False
|
|
779
|
+
return success
|
|
780
|
+
|
|
781
|
+
def _handle_workflow_completion(
|
|
782
|
+
self, success: bool, publishing_success: bool, options: OptionsProtocol
|
|
783
|
+
) -> bool:
|
|
784
|
+
"""Handle workflow completion and determine final success status."""
|
|
785
|
+
# Only fail the overall workflow if publishing was explicitly requested and failed
|
|
786
|
+
if not publishing_success and (options.publish or options.all):
|
|
787
|
+
self.console.print(
|
|
788
|
+
"[red]❌ Publishing failed - overall workflow marked as failed[/red]"
|
|
789
|
+
)
|
|
790
|
+
return False
|
|
791
|
+
return success
|
|
792
|
+
|
|
793
|
+
def _run_initial_fast_hooks(self, options: OptionsProtocol, iteration: int) -> bool:
|
|
794
|
+
fast_hooks_passed = self._phase_executor._run_fast_hooks_phase(options)
|
|
795
|
+
if not fast_hooks_passed:
|
|
796
|
+
if options.ai_agent and self._should_debug():
|
|
797
|
+
self.debugger.log_iteration_end(iteration, False)
|
|
798
|
+
return False
|
|
799
|
+
return True
|
|
800
|
+
|
|
801
|
+
async def _process_security_gates(self, options: OptionsProtocol) -> bool:
|
|
802
|
+
publishing_requested, security_blocks = (
|
|
803
|
+
self._check_security_gates_for_publishing(options)
|
|
804
|
+
)
|
|
805
|
+
|
|
806
|
+
if not (publishing_requested and security_blocks):
|
|
807
|
+
return True
|
|
808
|
+
|
|
809
|
+
security_fix_result = await self._handle_security_gate_failure(
|
|
810
|
+
options, allow_ai_fixing=True
|
|
811
|
+
)
|
|
812
|
+
return security_fix_result
|
|
813
|
+
|
|
814
|
+
def _execute_standard_hooks_workflow(self, options: OptionsProtocol) -> bool:
|
|
815
|
+
self._phase_executor._update_hooks_status_running()
|
|
816
|
+
|
|
817
|
+
if not self._execute_fast_hooks_workflow(options):
|
|
818
|
+
self._phase_executor._handle_hooks_completion(False)
|
|
819
|
+
return False
|
|
820
|
+
|
|
821
|
+
if not self._execute_cleaning_workflow_if_needed(options):
|
|
822
|
+
self._phase_executor._handle_hooks_completion(False)
|
|
823
|
+
return False
|
|
824
|
+
|
|
825
|
+
comprehensive_success = self._phase_executor._run_comprehensive_hooks_phase(
|
|
826
|
+
options
|
|
827
|
+
)
|
|
828
|
+
self._phase_executor._handle_hooks_completion(comprehensive_success)
|
|
829
|
+
|
|
830
|
+
return comprehensive_success
|
|
831
|
+
|
|
832
|
+
def _execute_fast_hooks_workflow(self, options: OptionsProtocol) -> bool:
|
|
833
|
+
"""Execute fast hooks phase."""
|
|
834
|
+
return self._phase_executor._run_fast_hooks_phase(options)
|
|
835
|
+
|
|
836
|
+
def _execute_cleaning_workflow_if_needed(self, options: OptionsProtocol) -> bool:
|
|
837
|
+
"""Execute cleaning workflow if requested."""
|
|
838
|
+
if not getattr(options, "clean", False):
|
|
839
|
+
return True
|
|
840
|
+
|
|
841
|
+
if not self._phase_executor._run_code_cleaning_phase(options):
|
|
842
|
+
return False
|
|
843
|
+
|
|
844
|
+
if not self._phase_executor._run_post_cleaning_fast_hooks(options):
|
|
845
|
+
return False
|
|
846
|
+
|
|
847
|
+
self._phase_executor._mark_code_cleaning_complete()
|
|
848
|
+
return True
|
|
849
|
+
|
|
850
|
+
def _is_publishing_workflow(self, options: OptionsProtocol) -> bool:
|
|
851
|
+
"""Check if this is a publishing workflow."""
|
|
852
|
+
return bool(
|
|
853
|
+
getattr(options, "publish", False) or getattr(options, "all", False)
|
|
854
|
+
)
|
|
855
|
+
|
|
856
|
+
def _update_mcp_status(self, phase: str, status: str) -> None:
|
|
857
|
+
"""Update MCP (Model Context Protocol) status."""
|
|
858
|
+
# Check if _mcp_state_manager exists and is not None
|
|
859
|
+
mcp_state_manager = getattr(self, "_mcp_state_manager", None)
|
|
860
|
+
if mcp_state_manager:
|
|
861
|
+
try:
|
|
862
|
+
mcp_state_manager.update_status(phase, status)
|
|
863
|
+
except (AttributeError, TypeError, RuntimeError) as e:
|
|
864
|
+
# If MCP is not available or fails, continue without error
|
|
865
|
+
self.logger.debug(f"MCP status update failed: {e}")
|
|
866
|
+
|
|
867
|
+
async def _execute_quality_phase(
|
|
868
|
+
self, options: OptionsProtocol, workflow_id: str
|
|
869
|
+
) -> bool:
|
|
870
|
+
"""Execute the quality phase of the workflow."""
|
|
871
|
+
try:
|
|
872
|
+
# Check if this is a publishing workflow
|
|
873
|
+
is_publishing = self._is_publishing_workflow(options)
|
|
874
|
+
|
|
875
|
+
# Run fast hooks phase first
|
|
876
|
+
fast_success = self.phases.run_fast_hooks_only(options)
|
|
877
|
+
if not fast_success and is_publishing:
|
|
878
|
+
return False # For publishing workflows, fast hook failures should stop execution
|
|
879
|
+
|
|
880
|
+
# Run comprehensive hooks phase
|
|
881
|
+
comprehensive_success = self.phases.run_comprehensive_hooks_only(options)
|
|
882
|
+
if not comprehensive_success and is_publishing:
|
|
883
|
+
return False # For publishing workflows, comprehensive hook failures should stop execution
|
|
884
|
+
|
|
885
|
+
# Both fast and comprehensive hooks must pass for success
|
|
886
|
+
quality_success = fast_success and comprehensive_success
|
|
887
|
+
|
|
888
|
+
# Run testing phase if requested
|
|
889
|
+
if getattr(options, "test", False):
|
|
890
|
+
testing_success = self.phases.run_testing_phase(options)
|
|
891
|
+
if not testing_success and is_publishing:
|
|
892
|
+
return False # For publishing workflows, test failures should stop execution
|
|
893
|
+
# For non-publishing workflows, testing failures should factor into overall success too
|
|
894
|
+
quality_success = quality_success and testing_success
|
|
895
|
+
|
|
896
|
+
return quality_success
|
|
897
|
+
except Exception as e:
|
|
898
|
+
self.logger.error(f"Quality phase execution failed: {e}")
|
|
899
|
+
return False
|
|
900
|
+
|
|
901
|
+
async def _execute_publishing_workflow(
|
|
902
|
+
self, options: OptionsProtocol, workflow_id: str
|
|
903
|
+
) -> bool:
|
|
904
|
+
"""Execute the publishing workflow phase."""
|
|
905
|
+
try:
|
|
906
|
+
# Run publishing phase
|
|
907
|
+
publishing_success = self.phases.run_publishing_phase(options)
|
|
908
|
+
return publishing_success
|
|
909
|
+
except Exception as e:
|
|
910
|
+
self.logger.error(f"Publishing workflow execution failed: {e}")
|
|
911
|
+
return False
|
|
912
|
+
|
|
913
|
+
async def _execute_commit_workflow(
|
|
914
|
+
self, options: OptionsProtocol, workflow_id: str
|
|
915
|
+
) -> bool:
|
|
916
|
+
"""Execute the commit workflow phase."""
|
|
917
|
+
try:
|
|
918
|
+
# Run commit phase
|
|
919
|
+
commit_success = self.phases.run_commit_phase(options)
|
|
920
|
+
return commit_success
|
|
921
|
+
except Exception as e:
|
|
922
|
+
self.logger.error(f"Commit workflow execution failed: {e}")
|
|
923
|
+
return False
|
|
924
|
+
|
|
925
|
+
def _has_code_cleaning_run(self) -> bool:
|
|
926
|
+
"""Check if code cleaning has already run in this session."""
|
|
927
|
+
# Check session metadata or a dedicated flag
|
|
928
|
+
if (
|
|
929
|
+
self.session.session_tracker
|
|
930
|
+
and "code_cleaning_completed" in self.session.session_tracker.metadata
|
|
931
|
+
):
|
|
932
|
+
return bool(
|
|
933
|
+
self.session.session_tracker.metadata["code_cleaning_completed"]
|
|
934
|
+
)
|
|
935
|
+
return False
|
|
936
|
+
|
|
937
|
+
def _mark_code_cleaning_complete(self) -> None:
|
|
938
|
+
"""Mark that code cleaning has been completed."""
|
|
939
|
+
if self.session.session_tracker:
|
|
940
|
+
self.session.session_tracker.metadata["code_cleaning_completed"] = True
|
|
941
|
+
|
|
942
|
+
def _run_code_cleaning_phase(self, options: OptionsProtocol) -> bool:
|
|
943
|
+
"""Execute code cleaning phase - wrapper for ACB workflow compatibility."""
|
|
944
|
+
result: bool = self.phases.run_cleaning_phase(options) # type: ignore[arg-type,assignment]
|
|
945
|
+
return result
|
|
946
|
+
|
|
947
|
+
def _run_post_cleaning_fast_hooks(self, options: OptionsProtocol) -> bool:
|
|
948
|
+
"""Run fast hooks after code cleaning phase."""
|
|
949
|
+
result: bool = self.phases.run_fast_hooks_only(options) # type: ignore[arg-type,assignment]
|
|
950
|
+
return result
|
|
951
|
+
|
|
952
|
+
def _run_fast_hooks_phase(self, options: OptionsProtocol) -> bool:
|
|
953
|
+
"""Execute fast hooks phase - wrapper for ACB workflow compatibility."""
|
|
954
|
+
result: bool = self.phases.run_fast_hooks_only(options) # type: ignore[arg-type,assignment]
|
|
955
|
+
return result
|
|
956
|
+
|
|
957
|
+
def _run_comprehensive_hooks_phase(self, options: OptionsProtocol) -> bool:
|
|
958
|
+
"""Execute comprehensive hooks phase - wrapper for ACB workflow compatibility."""
|
|
959
|
+
result: bool = self.phases.run_comprehensive_hooks_only(options) # type: ignore[arg-type,assignment]
|
|
960
|
+
return result
|
|
961
|
+
|
|
962
|
+
def _run_testing_phase(self, options: OptionsProtocol) -> bool:
|
|
963
|
+
"""Execute testing phase - wrapper for ACB workflow compatibility."""
|
|
964
|
+
result: bool = self.phases.run_testing_phase(options) # type: ignore[arg-type,assignment]
|
|
965
|
+
return result
|
|
966
|
+
|
|
967
|
+
def _configure_session_cleanup(self, options: OptionsProtocol) -> None:
|
|
968
|
+
"""Configure session cleanup handlers."""
|
|
969
|
+
# Add any necessary session cleanup configuration here
|
|
970
|
+
self.session.register_cleanup(self._cleanup_workflow_resources)
|
|
971
|
+
if hasattr(self, "_mcp_state_manager") and self._mcp_state_manager:
|
|
972
|
+
self.session.register_cleanup(self._mcp_state_manager.cleanup)
|
|
973
|
+
|
|
974
|
+
def _initialize_zuban_lsp(self, options: OptionsProtocol) -> None:
|
|
975
|
+
"""Initialize Zuban LSP server if needed."""
|
|
976
|
+
# Placeholder implementation - actual LSP initialization would go here
|
|
977
|
+
pass
|
|
978
|
+
|
|
979
|
+
def _configure_hook_manager_lsp(self, options: OptionsProtocol) -> None:
|
|
980
|
+
"""Configure hook manager LSP settings."""
|
|
981
|
+
# Placeholder implementation - actual hook manager LSP configuration would go here
|
|
982
|
+
pass
|
|
983
|
+
|
|
984
|
+
def _register_lsp_cleanup_handler(self, options: OptionsProtocol) -> None:
|
|
985
|
+
"""Register LSP cleanup handler."""
|
|
986
|
+
# Placeholder implementation - actual LSP cleanup handler would go here
|
|
987
|
+
pass
|
|
988
|
+
|
|
989
|
+
async def _run_ai_agent_fixing_phase(self, options: OptionsProtocol) -> bool:
|
|
990
|
+
self._initialize_ai_fixing_phase(options)
|
|
991
|
+
|
|
992
|
+
try:
|
|
993
|
+
self._prepare_ai_fixing_environment(options)
|
|
994
|
+
|
|
995
|
+
agent_coordinator, issues = await self._setup_ai_fixing_workflow()
|
|
996
|
+
|
|
997
|
+
if not issues:
|
|
998
|
+
return self._handle_no_issues_found()
|
|
999
|
+
|
|
1000
|
+
return await self._execute_ai_fixes(options, agent_coordinator, issues)
|
|
1001
|
+
|
|
1002
|
+
except Exception as e:
|
|
1003
|
+
return self._handle_fixing_phase_error(e)
|
|
1004
|
+
|
|
1005
|
+
def _initialize_ai_fixing_phase(self, options: OptionsProtocol) -> None:
|
|
1006
|
+
self._update_mcp_status("ai_fixing", "running")
|
|
1007
|
+
self.logger.info("Starting AI agent fixing phase")
|
|
1008
|
+
# Always log this important phase start for AI consumption
|
|
1009
|
+
self.logger.info(
|
|
1010
|
+
"AI agent fixing phase started",
|
|
1011
|
+
ai_agent_fixing=True,
|
|
1012
|
+
event_type="ai_fix_init",
|
|
1013
|
+
)
|
|
1014
|
+
self._log_debug_phase_start()
|
|
1015
|
+
|
|
1016
|
+
def _prepare_ai_fixing_environment(self, options: OptionsProtocol) -> None:
|
|
1017
|
+
should_run_cleaning = (
|
|
1018
|
+
getattr(options, "clean", False) and not self._has_code_cleaning_run()
|
|
1019
|
+
)
|
|
1020
|
+
|
|
1021
|
+
if not should_run_cleaning:
|
|
1022
|
+
return
|
|
1023
|
+
|
|
1024
|
+
self.console.print(
|
|
1025
|
+
"\n[bold yellow]🤖 AI agents recommend running code cleaning first for better results...[/bold yellow]"
|
|
1026
|
+
)
|
|
1027
|
+
|
|
1028
|
+
if self._run_code_cleaning_phase(options):
|
|
1029
|
+
self._run_post_cleaning_fast_hooks(options)
|
|
1030
|
+
self._mark_code_cleaning_complete()
|
|
1031
|
+
|
|
1032
|
+
async def _setup_ai_fixing_workflow(
|
|
1033
|
+
self,
|
|
1034
|
+
) -> tuple[EnhancedAgentCoordinator, list[t.Any]]:
|
|
1035
|
+
agent_coordinator = self._setup_agent_coordinator()
|
|
1036
|
+
issues = await self._collect_issues_from_failures()
|
|
1037
|
+
return agent_coordinator, issues
|
|
1038
|
+
|
|
1039
|
+
async def _execute_ai_fixes(
|
|
1040
|
+
self,
|
|
1041
|
+
options: OptionsProtocol,
|
|
1042
|
+
agent_coordinator: EnhancedAgentCoordinator,
|
|
1043
|
+
issues: list[t.Any],
|
|
1044
|
+
) -> bool:
|
|
1045
|
+
self.logger.info(f"AI agents will attempt to fix {len(issues)} issues")
|
|
1046
|
+
fix_result = await agent_coordinator.handle_issues(issues)
|
|
1047
|
+
return await self._process_fix_results(options, fix_result)
|
|
1048
|
+
|
|
1049
|
+
def _log_debug_phase_start(self) -> None:
|
|
1050
|
+
if self._should_debug():
|
|
1051
|
+
self.debugger.log_workflow_phase(
|
|
1052
|
+
"ai_agent_fixing",
|
|
1053
|
+
"started",
|
|
1054
|
+
details={"ai_agent": True},
|
|
1055
|
+
)
|
|
1056
|
+
# Log structured data to stderr for AI consumption
|
|
1057
|
+
self.logger.info(
|
|
1058
|
+
"AI agent fixing phase started",
|
|
1059
|
+
ai_agent_fixing=True,
|
|
1060
|
+
event_type="ai_fix_start",
|
|
1061
|
+
)
|
|
1062
|
+
|
|
1063
|
+
def _setup_agent_coordinator(self) -> EnhancedAgentCoordinator:
|
|
1064
|
+
from crackerjack.agents.enhanced_coordinator import create_enhanced_coordinator
|
|
1065
|
+
|
|
1066
|
+
agent_context = AgentContext(
|
|
1067
|
+
project_path=self.pkg_path,
|
|
1068
|
+
session_id=getattr(self.session, "session_id", None),
|
|
1069
|
+
)
|
|
1070
|
+
|
|
1071
|
+
# Use enhanced coordinator with Claude Code agent integration
|
|
1072
|
+
agent_coordinator = create_enhanced_coordinator(
|
|
1073
|
+
context=agent_context, enable_external_agents=True
|
|
1074
|
+
)
|
|
1075
|
+
agent_coordinator.initialize_agents()
|
|
1076
|
+
return agent_coordinator
|
|
1077
|
+
|
|
1078
|
+
def _handle_no_issues_found(self) -> bool:
|
|
1079
|
+
self.logger.info("No issues collected for AI agent fixing")
|
|
1080
|
+
self._update_mcp_status("ai_fixing", "completed")
|
|
1081
|
+
return True
|
|
1082
|
+
|
|
1083
|
+
async def _process_fix_results(
|
|
1084
|
+
self, options: OptionsProtocol, fix_result: t.Any
|
|
1085
|
+
) -> bool:
|
|
1086
|
+
verification_success = await self._verify_fixes_applied(options, fix_result)
|
|
1087
|
+
success = fix_result.success and verification_success
|
|
1088
|
+
|
|
1089
|
+
if success:
|
|
1090
|
+
self._handle_successful_fixes(fix_result)
|
|
1091
|
+
else:
|
|
1092
|
+
self._handle_failed_fixes(fix_result, verification_success)
|
|
1093
|
+
|
|
1094
|
+
self._log_debug_phase_completion(success, fix_result)
|
|
1095
|
+
return success
|
|
1096
|
+
|
|
1097
|
+
def _handle_successful_fixes(self, fix_result: t.Any) -> None:
|
|
1098
|
+
self.logger.info(
|
|
1099
|
+
"AI agents successfully fixed all issues and verification passed"
|
|
1100
|
+
)
|
|
1101
|
+
self._update_mcp_status("ai_fixing", "completed")
|
|
1102
|
+
self._log_fix_counts_if_debugging(fix_result)
|
|
1103
|
+
|
|
1104
|
+
def _handle_failed_fixes(
|
|
1105
|
+
self, fix_result: t.Any, verification_success: bool
|
|
1106
|
+
) -> None:
|
|
1107
|
+
if not verification_success:
|
|
1108
|
+
self.logger.warning(
|
|
1109
|
+
"AI agent fixes did not pass verification-issues still exist"
|
|
1110
|
+
)
|
|
1111
|
+
else:
|
|
1112
|
+
self.logger.warning(
|
|
1113
|
+
f"AI agents could not fix all issues: {fix_result.remaining_issues}",
|
|
1114
|
+
)
|
|
1115
|
+
self._update_mcp_status("ai_fixing", "failed")
|
|
1116
|
+
|
|
1117
|
+
def _log_fix_counts_if_debugging(self, fix_result: t.Any) -> None:
|
|
1118
|
+
if not self._should_debug():
|
|
1119
|
+
return
|
|
1120
|
+
|
|
1121
|
+
total_fixes = len(fix_result.fixes_applied)
|
|
1122
|
+
test_fixes = len(
|
|
1123
|
+
[f for f in fix_result.fixes_applied if "test" in f.lower()],
|
|
1124
|
+
)
|
|
1125
|
+
hook_fixes = total_fixes - test_fixes
|
|
1126
|
+
self.debugger.log_test_fixes(test_fixes)
|
|
1127
|
+
self.debugger.log_hook_fixes(hook_fixes)
|
|
1128
|
+
|
|
1129
|
+
# Log structured data to stderr for AI consumption
|
|
1130
|
+
self.logger.info(
|
|
1131
|
+
"AI fixes applied",
|
|
1132
|
+
ai_agent_fixing=True,
|
|
1133
|
+
event_type="ai_fix_counts",
|
|
1134
|
+
total_fixes=total_fixes,
|
|
1135
|
+
test_fixes=test_fixes,
|
|
1136
|
+
hook_fixes=hook_fixes,
|
|
1137
|
+
)
|
|
1138
|
+
|
|
1139
|
+
def _log_debug_phase_completion(self, success: bool, fix_result: t.Any) -> None:
|
|
1140
|
+
if self._should_debug():
|
|
1141
|
+
self.debugger.log_workflow_phase(
|
|
1142
|
+
"ai_agent_fixing",
|
|
1143
|
+
"completed" if success else "failed",
|
|
1144
|
+
details={
|
|
1145
|
+
"confidence": fix_result.confidence,
|
|
1146
|
+
"fixes_applied": len(fix_result.fixes_applied),
|
|
1147
|
+
"remaining_issues": len(fix_result.remaining_issues),
|
|
1148
|
+
},
|
|
1149
|
+
)
|
|
1150
|
+
# Log structured data to stderr for AI consumption
|
|
1151
|
+
self.logger.info(
|
|
1152
|
+
f"AI agent fixing phase {'completed' if success else 'failed'}",
|
|
1153
|
+
ai_agent_fixing=True,
|
|
1154
|
+
event_type="ai_fix_completion",
|
|
1155
|
+
success=success,
|
|
1156
|
+
confidence=fix_result.confidence,
|
|
1157
|
+
fixes_applied=len(fix_result.fixes_applied),
|
|
1158
|
+
remaining_issues=len(fix_result.remaining_issues),
|
|
1159
|
+
)
|
|
1160
|
+
|
|
1161
|
+
def _handle_fixing_phase_error(self, error: Exception) -> bool:
|
|
1162
|
+
self.logger.exception(f"AI agent fixing phase failed: {error}")
|
|
1163
|
+
self.session.fail_task("ai_fixing", f"AI agent fixing failed: {error}")
|
|
1164
|
+
self._update_mcp_status("ai_fixing", "failed")
|
|
1165
|
+
|
|
1166
|
+
if self._should_debug():
|
|
1167
|
+
self.debugger.log_workflow_phase(
|
|
1168
|
+
"ai_agent_fixing",
|
|
1169
|
+
"failed",
|
|
1170
|
+
details={"error": str(error)},
|
|
1171
|
+
)
|
|
1172
|
+
# Log structured data to stderr for AI consumption
|
|
1173
|
+
self.logger.error(
|
|
1174
|
+
"AI agent fixing phase failed",
|
|
1175
|
+
ai_agent_fixing=True,
|
|
1176
|
+
event_type="ai_fix_error",
|
|
1177
|
+
error=str(error),
|
|
1178
|
+
error_type=type(error).__name__,
|
|
1179
|
+
)
|
|
1180
|
+
|
|
1181
|
+
return False
|
|
1182
|
+
|
|
1183
|
+
async def _verify_fixes_applied(
|
|
1184
|
+
self, options: OptionsProtocol, fix_result: t.Any
|
|
1185
|
+
) -> bool:
|
|
1186
|
+
if not fix_result.fixes_applied:
|
|
1187
|
+
return True
|
|
1188
|
+
|
|
1189
|
+
self.logger.info("Verifying AI agent fixes by re-running quality checks")
|
|
1190
|
+
|
|
1191
|
+
verification_success = True
|
|
1192
|
+
|
|
1193
|
+
if self._should_verify_test_fixes(fix_result.fixes_applied):
|
|
1194
|
+
if not await self._verify_test_fixes(options):
|
|
1195
|
+
verification_success = False
|
|
1196
|
+
|
|
1197
|
+
if self._should_verify_hook_fixes(fix_result.fixes_applied):
|
|
1198
|
+
if not await self._verify_hook_fixes(options):
|
|
1199
|
+
verification_success = False
|
|
1200
|
+
|
|
1201
|
+
self._log_verification_result(verification_success)
|
|
1202
|
+
return verification_success
|
|
1203
|
+
|
|
1204
|
+
def _should_verify_test_fixes(self, fixes_applied: list[str]) -> bool:
|
|
1205
|
+
return any("test" in fix.lower() for fix in fixes_applied)
|
|
1206
|
+
|
|
1207
|
+
async def _verify_test_fixes(self, options: OptionsProtocol) -> bool:
|
|
1208
|
+
self.logger.info("Re-running tests to verify test fixes")
|
|
1209
|
+
test_success = self.phases.run_testing_phase(options)
|
|
1210
|
+
if not test_success:
|
|
1211
|
+
self.logger.warning("Test verification failed-test fixes did not work")
|
|
1212
|
+
return test_success
|
|
1213
|
+
|
|
1214
|
+
def _should_verify_hook_fixes(self, fixes_applied: list[str]) -> bool:
|
|
1215
|
+
hook_fixes = [fix for fix in fixes_applied if self._is_hook_related_fix(fix)]
|
|
1216
|
+
return bool(hook_fixes)
|
|
1217
|
+
|
|
1218
|
+
def _is_hook_related_fix(self, fix: str) -> bool:
|
|
1219
|
+
"""Check if a fix is related to hooks and should trigger hook verification."""
|
|
1220
|
+
fix_lower = fix.lower()
|
|
1221
|
+
return (
|
|
1222
|
+
"hook" not in fix_lower or "complexity" in fix_lower or "type" in fix_lower
|
|
1223
|
+
)
|
|
1224
|
+
|
|
1225
|
+
async def _verify_hook_fixes(self, options: OptionsProtocol) -> bool:
|
|
1226
|
+
self.logger.info("Re-running comprehensive hooks to verify hook fixes")
|
|
1227
|
+
hook_success = self.phases.run_comprehensive_hooks_only(options)
|
|
1228
|
+
if not hook_success:
|
|
1229
|
+
self.logger.warning("Hook verification failed-hook fixes did not work")
|
|
1230
|
+
return hook_success
|
|
1231
|
+
|
|
1232
|
+
def _log_verification_result(self, verification_success: bool) -> None:
|
|
1233
|
+
if verification_success:
|
|
1234
|
+
self.logger.info("All AI agent fixes verified successfully")
|
|
1235
|
+
else:
|
|
1236
|
+
self.logger.error(
|
|
1237
|
+
"Verification failed-some fixes did not resolve the issues"
|
|
1238
|
+
)
|
|
1239
|
+
|
|
1240
|
+
async def _collect_issues_from_failures(self) -> list[Issue]:
|
|
1241
|
+
issues: list[Issue] = []
|
|
1242
|
+
|
|
1243
|
+
test_issues, test_count = self._collect_test_failure_issues()
|
|
1244
|
+
hook_issues, hook_count = self._collect_hook_failure_issues()
|
|
1245
|
+
|
|
1246
|
+
issues.extend(test_issues)
|
|
1247
|
+
issues.extend(hook_issues)
|
|
1248
|
+
|
|
1249
|
+
self._log_failure_counts_if_debugging(test_count, hook_count)
|
|
1250
|
+
|
|
1251
|
+
return issues
|
|
1252
|
+
|
|
1253
|
+
def _collect_test_failure_issues(self) -> tuple[list[Issue], int]:
|
|
1254
|
+
issues: list[Issue] = []
|
|
1255
|
+
test_count = 0
|
|
1256
|
+
|
|
1257
|
+
if hasattr(self.phases, "test_manager") and hasattr(
|
|
1258
|
+
self.phases.test_manager,
|
|
1259
|
+
"get_test_failures",
|
|
1260
|
+
):
|
|
1261
|
+
test_failures = self.phases.test_manager.get_test_failures()
|
|
1262
|
+
test_count = len(test_failures)
|
|
1263
|
+
for i, failure in enumerate(
|
|
1264
|
+
test_failures[:20],
|
|
1265
|
+
):
|
|
1266
|
+
issue = Issue(
|
|
1267
|
+
id=f"test_failure_{i}",
|
|
1268
|
+
type=IssueType.TEST_FAILURE,
|
|
1269
|
+
severity=Priority.HIGH,
|
|
1270
|
+
message=failure.strip(),
|
|
1271
|
+
stage="tests",
|
|
1272
|
+
)
|
|
1273
|
+
issues.append(issue)
|
|
1274
|
+
|
|
1275
|
+
return issues, test_count
|
|
1276
|
+
|
|
1277
|
+
def _collect_hook_failure_issues(self) -> tuple[list[Issue], int]:
|
|
1278
|
+
issues: list[Issue] = []
|
|
1279
|
+
hook_count = 0
|
|
1280
|
+
|
|
1281
|
+
try:
|
|
1282
|
+
hook_results = self.phases.hook_manager.run_comprehensive_hooks()
|
|
1283
|
+
issues, hook_count = self._process_hook_results(hook_results)
|
|
1284
|
+
except Exception:
|
|
1285
|
+
issues, hook_count = self._fallback_to_session_tracker()
|
|
1286
|
+
|
|
1287
|
+
return issues, hook_count
|
|
1288
|
+
|
|
1289
|
+
def _process_hook_results(self, hook_results: t.Any) -> tuple[list[Issue], int]:
|
|
1290
|
+
issues: list[Issue] = []
|
|
1291
|
+
hook_count = 0
|
|
1292
|
+
|
|
1293
|
+
for result in hook_results:
|
|
1294
|
+
if not self._is_hook_result_failed(result):
|
|
1295
|
+
continue
|
|
1296
|
+
|
|
1297
|
+
hook_count += 1
|
|
1298
|
+
result_issues = self._extract_issues_from_hook_result(result)
|
|
1299
|
+
issues.extend(result_issues)
|
|
1300
|
+
|
|
1301
|
+
return issues, hook_count
|
|
1302
|
+
|
|
1303
|
+
def _is_hook_result_failed(self, result: t.Any) -> bool:
|
|
1304
|
+
return result.status in ("failed", "error", "timeout")
|
|
1305
|
+
|
|
1306
|
+
def _extract_issues_from_hook_result(self, result: t.Any) -> list[Issue]:
|
|
1307
|
+
if result.issues_found:
|
|
1308
|
+
return self._create_specific_issues_from_hook_result(result)
|
|
1309
|
+
return [self._create_generic_issue_from_hook_result(result)]
|
|
1310
|
+
|
|
1311
|
+
def _create_specific_issues_from_hook_result(self, result: t.Any) -> list[Issue]:
|
|
1312
|
+
issues: list[Issue] = []
|
|
1313
|
+
hook_context = f"{result.name}: "
|
|
1314
|
+
|
|
1315
|
+
for issue_text in result.issues_found:
|
|
1316
|
+
parsed_issues = self._parse_issues_for_agents([hook_context + issue_text])
|
|
1317
|
+
issues.extend(parsed_issues)
|
|
1318
|
+
|
|
1319
|
+
return issues
|
|
1320
|
+
|
|
1321
|
+
def _create_generic_issue_from_hook_result(self, result: t.Any) -> Issue:
|
|
1322
|
+
issue_type = self._determine_hook_issue_type(result.name)
|
|
1323
|
+
return Issue(
|
|
1324
|
+
id=f"hook_failure_{result.name}",
|
|
1325
|
+
type=issue_type,
|
|
1326
|
+
severity=Priority.MEDIUM,
|
|
1327
|
+
message=f"Hook {result.name} failed with no specific details",
|
|
1328
|
+
stage="comprehensive",
|
|
1329
|
+
)
|
|
1330
|
+
|
|
1331
|
+
def _determine_hook_issue_type(self, hook_name: str) -> IssueType:
|
|
1332
|
+
formatting_hooks = {
|
|
1333
|
+
"trailing-whitespace",
|
|
1334
|
+
"end-of-file-fixer",
|
|
1335
|
+
"ruff-format",
|
|
1336
|
+
"ruff-check",
|
|
1337
|
+
}
|
|
1338
|
+
|
|
1339
|
+
if hook_name == "validate-regex-patterns":
|
|
1340
|
+
return IssueType.REGEX_VALIDATION
|
|
1341
|
+
|
|
1342
|
+
return (
|
|
1343
|
+
IssueType.FORMATTING
|
|
1344
|
+
if hook_name in formatting_hooks
|
|
1345
|
+
else IssueType.TYPE_ERROR
|
|
1346
|
+
)
|
|
1347
|
+
|
|
1348
|
+
def _fallback_to_session_tracker(self) -> tuple[list[Issue], int]:
|
|
1349
|
+
issues: list[Issue] = []
|
|
1350
|
+
hook_count = 0
|
|
1351
|
+
|
|
1352
|
+
if not self.session.session_tracker:
|
|
1353
|
+
return issues, hook_count
|
|
1354
|
+
|
|
1355
|
+
for task_id, task_data in self.session.session_tracker.tasks.items():
|
|
1356
|
+
if self._is_failed_hook_task(task_data, task_id):
|
|
1357
|
+
hook_count += 1
|
|
1358
|
+
hook_issues = self._process_hook_failure(task_id, task_data)
|
|
1359
|
+
issues.extend(hook_issues)
|
|
1360
|
+
|
|
1361
|
+
return issues, hook_count
|
|
1362
|
+
|
|
1363
|
+
def _is_failed_hook_task(self, task_data: t.Any, task_id: str) -> bool:
|
|
1364
|
+
return task_data.status == "failed" and task_id in (
|
|
1365
|
+
"fast_hooks",
|
|
1366
|
+
"comprehensive_hooks",
|
|
1367
|
+
)
|
|
1368
|
+
|
|
1369
|
+
def _process_hook_failure(self, task_id: str, task_data: t.Any) -> list[Issue]:
|
|
1370
|
+
error_msg = getattr(task_data, "error_message", "Unknown error")
|
|
1371
|
+
specific_issues = self._parse_hook_error_details(task_id, error_msg)
|
|
1372
|
+
|
|
1373
|
+
if specific_issues:
|
|
1374
|
+
return specific_issues
|
|
1375
|
+
|
|
1376
|
+
return [self._create_generic_hook_issue(task_id, error_msg)]
|
|
1377
|
+
|
|
1378
|
+
def _create_generic_hook_issue(self, task_id: str, error_msg: str) -> Issue:
|
|
1379
|
+
issue_type = IssueType.FORMATTING if "fast" in task_id else IssueType.TYPE_ERROR
|
|
1380
|
+
return Issue(
|
|
1381
|
+
id=f"hook_failure_{task_id}",
|
|
1382
|
+
type=issue_type,
|
|
1383
|
+
severity=Priority.MEDIUM,
|
|
1384
|
+
message=error_msg,
|
|
1385
|
+
stage=task_id.replace("_hooks", ""),
|
|
1386
|
+
)
|
|
1387
|
+
|
|
1388
|
+
def _parse_hook_error_details(self, task_id: str, error_msg: str) -> list[Issue]:
|
|
1389
|
+
issues: list[Issue] = []
|
|
1390
|
+
|
|
1391
|
+
if task_id == "comprehensive_hooks":
|
|
1392
|
+
issues.extend(self._parse_comprehensive_hook_errors(error_msg))
|
|
1393
|
+
elif task_id == "fast_hooks":
|
|
1394
|
+
issues.append(self._create_fast_hook_issue())
|
|
1395
|
+
|
|
1396
|
+
return issues
|
|
1397
|
+
|
|
1398
|
+
def _parse_comprehensive_hook_errors(self, error_msg: str) -> list[Issue]:
|
|
1399
|
+
error_lower = error_msg.lower()
|
|
1400
|
+
error_checkers = self._get_comprehensive_error_checkers()
|
|
1401
|
+
|
|
1402
|
+
issues = []
|
|
1403
|
+
for check_func in error_checkers:
|
|
1404
|
+
issue = check_func(error_lower)
|
|
1405
|
+
if issue:
|
|
1406
|
+
issues.append(issue)
|
|
1407
|
+
|
|
1408
|
+
return issues
|
|
1409
|
+
|
|
1410
|
+
def _get_comprehensive_error_checkers(
|
|
1411
|
+
self,
|
|
1412
|
+
) -> list[t.Callable[[str], Issue | None]]:
|
|
1413
|
+
"""Get list of error checking functions for comprehensive hooks."""
|
|
1414
|
+
return [
|
|
1415
|
+
self._check_complexity_error,
|
|
1416
|
+
self._check_type_error,
|
|
1417
|
+
self._check_security_error,
|
|
1418
|
+
self._check_performance_error,
|
|
1419
|
+
self._check_dead_code_error,
|
|
1420
|
+
self._check_regex_validation_error,
|
|
1421
|
+
]
|
|
1422
|
+
|
|
1423
|
+
def _check_complexity_error(self, error_lower: str) -> Issue | None:
|
|
1424
|
+
if "complexipy" in error_lower or "c901" in error_lower:
|
|
1425
|
+
return Issue(
|
|
1426
|
+
id="complexity_violation",
|
|
1427
|
+
type=IssueType.COMPLEXITY,
|
|
1428
|
+
severity=Priority.HIGH,
|
|
1429
|
+
message="Code complexity violation detected",
|
|
1430
|
+
stage="comprehensive",
|
|
1431
|
+
)
|
|
1432
|
+
return None
|
|
1433
|
+
|
|
1434
|
+
def _check_type_error(self, error_lower: str) -> Issue | None:
|
|
1435
|
+
if "pyright" in error_lower:
|
|
1436
|
+
return Issue(
|
|
1437
|
+
id="pyright_type_error",
|
|
1438
|
+
type=IssueType.TYPE_ERROR,
|
|
1439
|
+
severity=Priority.HIGH,
|
|
1440
|
+
message="Type checking errors detected by pyright",
|
|
1441
|
+
stage="comprehensive",
|
|
1442
|
+
)
|
|
1443
|
+
return None
|
|
1444
|
+
|
|
1445
|
+
def _check_security_error(self, error_lower: str) -> Issue | None:
|
|
1446
|
+
if "bandit" in error_lower:
|
|
1447
|
+
return Issue(
|
|
1448
|
+
id="bandit_security_issue",
|
|
1449
|
+
type=IssueType.SECURITY,
|
|
1450
|
+
severity=Priority.HIGH,
|
|
1451
|
+
message="Security vulnerabilities detected by bandit",
|
|
1452
|
+
stage="comprehensive",
|
|
1453
|
+
)
|
|
1454
|
+
return None
|
|
1455
|
+
|
|
1456
|
+
def _check_performance_error(self, error_lower: str) -> Issue | None:
|
|
1457
|
+
if "refurb" in error_lower:
|
|
1458
|
+
return Issue(
|
|
1459
|
+
id="refurb_quality_issue",
|
|
1460
|
+
type=IssueType.PERFORMANCE,
|
|
1461
|
+
severity=Priority.MEDIUM,
|
|
1462
|
+
message="Code quality issues detected by refurb",
|
|
1463
|
+
stage="comprehensive",
|
|
1464
|
+
)
|
|
1465
|
+
return None
|
|
1466
|
+
|
|
1467
|
+
def _check_dead_code_error(self, error_lower: str) -> Issue | None:
|
|
1468
|
+
if "vulture" in error_lower:
|
|
1469
|
+
return Issue(
|
|
1470
|
+
id="vulture_dead_code",
|
|
1471
|
+
type=IssueType.DEAD_CODE,
|
|
1472
|
+
severity=Priority.MEDIUM,
|
|
1473
|
+
message="Dead code detected by vulture",
|
|
1474
|
+
stage="comprehensive",
|
|
1475
|
+
)
|
|
1476
|
+
return None
|
|
1477
|
+
|
|
1478
|
+
def _check_regex_validation_error(self, error_lower: str) -> Issue | None:
|
|
1479
|
+
regex_keywords = ("raw regex", "regex pattern", r"\g<", "replacement")
|
|
1480
|
+
if "validate-regex-patterns" in error_lower or any(
|
|
1481
|
+
keyword in error_lower for keyword in regex_keywords
|
|
1482
|
+
):
|
|
1483
|
+
return Issue(
|
|
1484
|
+
id="regex_validation_failure",
|
|
1485
|
+
type=IssueType.REGEX_VALIDATION,
|
|
1486
|
+
severity=Priority.HIGH,
|
|
1487
|
+
message="Unsafe regex patterns detected by validate-regex-patterns",
|
|
1488
|
+
stage="fast",
|
|
1489
|
+
)
|
|
1490
|
+
return None
|
|
1491
|
+
|
|
1492
|
+
def _create_fast_hook_issue(self) -> Issue:
|
|
1493
|
+
return Issue(
|
|
1494
|
+
id="fast_hooks_formatting",
|
|
1495
|
+
type=IssueType.FORMATTING,
|
|
1496
|
+
severity=Priority.LOW,
|
|
1497
|
+
message="Code formatting issues detected",
|
|
1498
|
+
stage="fast",
|
|
1499
|
+
)
|
|
1500
|
+
|
|
1501
|
+
def _parse_issues_for_agents(self, issue_strings: list[str]) -> list[Issue]:
|
|
1502
|
+
issues: list[Issue] = []
|
|
1503
|
+
|
|
1504
|
+
for i, issue_str in enumerate(issue_strings):
|
|
1505
|
+
issue_type, priority = self._classify_issue(issue_str)
|
|
1506
|
+
|
|
1507
|
+
issue = Issue(
|
|
1508
|
+
id=f"parsed_issue_{i}",
|
|
1509
|
+
type=issue_type,
|
|
1510
|
+
severity=priority,
|
|
1511
|
+
message=issue_str.strip(),
|
|
1512
|
+
stage="comprehensive",
|
|
1513
|
+
)
|
|
1514
|
+
issues.append(issue)
|
|
1515
|
+
|
|
1516
|
+
return issues
|
|
1517
|
+
|
|
1518
|
+
def _classify_issue(self, issue_str: str) -> tuple[IssueType, Priority]:
|
|
1519
|
+
issue_lower = issue_str.lower()
|
|
1520
|
+
|
|
1521
|
+
# Check high priority issues first
|
|
1522
|
+
high_priority_result = self._check_high_priority_issues(issue_lower)
|
|
1523
|
+
if high_priority_result:
|
|
1524
|
+
return high_priority_result
|
|
1525
|
+
|
|
1526
|
+
# Check medium priority issues
|
|
1527
|
+
medium_priority_result = self._check_medium_priority_issues(issue_lower)
|
|
1528
|
+
if medium_priority_result:
|
|
1529
|
+
return medium_priority_result
|
|
1530
|
+
|
|
1531
|
+
# Default to formatting issue
|
|
1532
|
+
return IssueType.FORMATTING, Priority.MEDIUM
|
|
1533
|
+
|
|
1534
|
+
def _check_high_priority_issues(
|
|
1535
|
+
self, issue_lower: str
|
|
1536
|
+
) -> tuple[IssueType, Priority] | None:
|
|
1537
|
+
"""Check for high priority issue types.
|
|
1538
|
+
|
|
1539
|
+
Args:
|
|
1540
|
+
issue_lower: Lowercase issue string
|
|
1541
|
+
|
|
1542
|
+
Returns:
|
|
1543
|
+
Tuple of issue type and priority if found, None otherwise
|
|
1544
|
+
"""
|
|
1545
|
+
high_priority_checks = [
|
|
1546
|
+
(self._is_type_error, IssueType.TYPE_ERROR),
|
|
1547
|
+
(self._is_security_issue, IssueType.SECURITY),
|
|
1548
|
+
(self._is_complexity_issue, IssueType.COMPLEXITY),
|
|
1549
|
+
(self._is_regex_validation_issue, IssueType.REGEX_VALIDATION),
|
|
1550
|
+
]
|
|
1551
|
+
|
|
1552
|
+
for check_func, issue_type in high_priority_checks:
|
|
1553
|
+
if check_func(issue_lower):
|
|
1554
|
+
return issue_type, Priority.HIGH
|
|
1555
|
+
|
|
1556
|
+
return None
|
|
1557
|
+
|
|
1558
|
+
def _check_medium_priority_issues(
|
|
1559
|
+
self, issue_lower: str
|
|
1560
|
+
) -> tuple[IssueType, Priority] | None:
|
|
1561
|
+
"""Check for medium priority issue types.
|
|
1562
|
+
|
|
1563
|
+
Args:
|
|
1564
|
+
issue_lower: Lowercase issue string
|
|
1565
|
+
|
|
1566
|
+
Returns:
|
|
1567
|
+
Tuple of issue type and priority if found, None otherwise
|
|
1568
|
+
"""
|
|
1569
|
+
medium_priority_checks = [
|
|
1570
|
+
(self._is_dead_code_issue, IssueType.DEAD_CODE),
|
|
1571
|
+
(self._is_performance_issue, IssueType.PERFORMANCE),
|
|
1572
|
+
(self._is_import_error, IssueType.IMPORT_ERROR),
|
|
1573
|
+
]
|
|
1574
|
+
|
|
1575
|
+
for check_func, issue_type in medium_priority_checks:
|
|
1576
|
+
if check_func(issue_lower):
|
|
1577
|
+
return issue_type, Priority.MEDIUM
|
|
1578
|
+
|
|
1579
|
+
return None
|
|
1580
|
+
|
|
1581
|
+
def _is_type_error(self, issue_lower: str) -> bool:
|
|
1582
|
+
return any(
|
|
1583
|
+
keyword in issue_lower for keyword in ("type", "annotation", "pyright")
|
|
1584
|
+
)
|
|
1585
|
+
|
|
1586
|
+
def _is_security_issue(self, issue_lower: str) -> bool:
|
|
1587
|
+
return any(
|
|
1588
|
+
keyword in issue_lower for keyword in ("security", "bandit", "hardcoded")
|
|
1589
|
+
)
|
|
1590
|
+
|
|
1591
|
+
def _is_complexity_issue(self, issue_lower: str) -> bool:
|
|
1592
|
+
return any(
|
|
1593
|
+
keyword in issue_lower
|
|
1594
|
+
for keyword in ("complexity", "complexipy", "c901", "too complex")
|
|
1595
|
+
)
|
|
1596
|
+
|
|
1597
|
+
def _is_regex_validation_issue(self, issue_lower: str) -> bool:
|
|
1598
|
+
return any(
|
|
1599
|
+
keyword in issue_lower
|
|
1600
|
+
for keyword in (
|
|
1601
|
+
"regex",
|
|
1602
|
+
"pattern",
|
|
1603
|
+
"validate-regex-patterns",
|
|
1604
|
+
r"\g<",
|
|
1605
|
+
"replacement",
|
|
1606
|
+
)
|
|
1607
|
+
)
|
|
1608
|
+
|
|
1609
|
+
def _is_dead_code_issue(self, issue_lower: str) -> bool:
|
|
1610
|
+
return any(keyword in issue_lower for keyword in ("unused", "dead", "vulture"))
|
|
1611
|
+
|
|
1612
|
+
def _is_performance_issue(self, issue_lower: str) -> bool:
|
|
1613
|
+
return any(
|
|
1614
|
+
keyword in issue_lower for keyword in ("performance", "refurb", "furb")
|
|
1615
|
+
)
|
|
1616
|
+
|
|
1617
|
+
def _is_import_error(self, issue_lower: str) -> bool:
|
|
1618
|
+
return any(keyword in issue_lower for keyword in ("import", "creosote"))
|
|
1619
|
+
|
|
1620
|
+
def _log_failure_counts_if_debugging(
|
|
1621
|
+
self, test_count: int, hook_count: int
|
|
1622
|
+
) -> None:
|
|
1623
|
+
if self._should_debug():
|
|
1624
|
+
self.debugger.log_test_failures(test_count)
|
|
1625
|
+
self.debugger.log_hook_failures(hook_count)
|
|
1626
|
+
|
|
1627
|
+
def _check_security_gates_for_publishing(
|
|
1628
|
+
self, options: OptionsProtocol
|
|
1629
|
+
) -> tuple[bool, bool]:
|
|
1630
|
+
publishing_requested = bool(options.publish or options.all)
|
|
1631
|
+
|
|
1632
|
+
if not publishing_requested:
|
|
1633
|
+
return False, False
|
|
1634
|
+
|
|
1635
|
+
try:
|
|
1636
|
+
security_blocks_publishing = self._check_security_critical_failures()
|
|
1637
|
+
return publishing_requested, security_blocks_publishing
|
|
1638
|
+
except Exception as e:
|
|
1639
|
+
self.logger.warning(f"Security check failed: {e} - blocking publishing")
|
|
1640
|
+
self.console.print(
|
|
1641
|
+
"[red]🔒 SECURITY CHECK FAILED: Unable to verify security status - publishing BLOCKED[/red]"
|
|
1642
|
+
)
|
|
1643
|
+
|
|
1644
|
+
return publishing_requested, True
|
|
1645
|
+
|
|
1646
|
+
async def _handle_security_gate_failure(
|
|
1647
|
+
self, options: OptionsProtocol, allow_ai_fixing: bool = False
|
|
1648
|
+
) -> bool:
|
|
1649
|
+
self._display_security_gate_failure_message()
|
|
1650
|
+
|
|
1651
|
+
if allow_ai_fixing:
|
|
1652
|
+
return await self._attempt_ai_assisted_security_fix(options)
|
|
1653
|
+
return self._handle_manual_security_fix()
|
|
1654
|
+
|
|
1655
|
+
def _display_security_gate_failure_message(self) -> None:
|
|
1656
|
+
"""Display initial security gate failure message."""
|
|
1657
|
+
self.console.print(
|
|
1658
|
+
"[red]🔒 SECURITY GATE: Critical security checks failed[/red]"
|
|
1659
|
+
)
|
|
1660
|
+
|
|
1661
|
+
async def _attempt_ai_assisted_security_fix(self, options: OptionsProtocol) -> bool:
|
|
1662
|
+
"""Attempt to fix security issues using AI assistance.
|
|
1663
|
+
|
|
1664
|
+
Args:
|
|
1665
|
+
options: Configuration options
|
|
1666
|
+
|
|
1667
|
+
Returns:
|
|
1668
|
+
True if security issues were resolved, False otherwise
|
|
1669
|
+
"""
|
|
1670
|
+
self._display_ai_fixing_messages()
|
|
1671
|
+
|
|
1672
|
+
ai_fix_success = await self._run_ai_agent_fixing_phase(options)
|
|
1673
|
+
if ai_fix_success:
|
|
1674
|
+
return self._verify_security_fix_success()
|
|
1675
|
+
|
|
1676
|
+
return False
|
|
1677
|
+
|
|
1678
|
+
def _display_ai_fixing_messages(self) -> None:
|
|
1679
|
+
"""Display messages about AI-assisted security fixing."""
|
|
1680
|
+
self.console.print(
|
|
1681
|
+
"[red]Security-critical hooks (bandit, pyright, gitleaks) must pass before publishing[/red]"
|
|
1682
|
+
)
|
|
1683
|
+
self.console.print(
|
|
1684
|
+
"[yellow]🤖 Attempting AI-assisted security issue resolution...[/yellow]"
|
|
1685
|
+
)
|
|
1686
|
+
|
|
1687
|
+
def _verify_security_fix_success(self) -> bool:
|
|
1688
|
+
"""Verify that AI fixes resolved the security issues.
|
|
1689
|
+
|
|
1690
|
+
Returns:
|
|
1691
|
+
True if security issues were resolved, False otherwise
|
|
1692
|
+
"""
|
|
1693
|
+
try:
|
|
1694
|
+
security_still_blocks = self._check_security_critical_failures()
|
|
1695
|
+
if not security_still_blocks:
|
|
1696
|
+
self.console.print(
|
|
1697
|
+
"[green]✅ AI agents resolved security issues - publishing allowed[/green]"
|
|
1698
|
+
)
|
|
1699
|
+
return True
|
|
1700
|
+
else:
|
|
1701
|
+
self.console.print(
|
|
1702
|
+
"[red]🔒 Security issues persist after AI fixing - publishing still BLOCKED[/red]"
|
|
1703
|
+
)
|
|
1704
|
+
return False
|
|
1705
|
+
except Exception as e:
|
|
1706
|
+
self.logger.warning(f"Security re-check failed: {e} - blocking publishing")
|
|
1707
|
+
return False
|
|
1708
|
+
|
|
1709
|
+
def _handle_manual_security_fix(self) -> bool:
|
|
1710
|
+
"""Handle security fix when AI assistance is not allowed.
|
|
1711
|
+
|
|
1712
|
+
Returns:
|
|
1713
|
+
Always False since manual intervention is required
|
|
1714
|
+
"""
|
|
1715
|
+
self.console.print(
|
|
1716
|
+
"[red]Security-critical hooks (bandit, pyright, gitleaks) must pass before publishing[/red]"
|
|
1717
|
+
)
|
|
1718
|
+
return False
|
|
1719
|
+
|
|
1720
|
+
def _check_security_critical_failures(self) -> bool:
|
|
1721
|
+
try:
|
|
1722
|
+
from crackerjack.security.audit import SecurityAuditor
|
|
1723
|
+
|
|
1724
|
+
auditor = SecurityAuditor()
|
|
1725
|
+
|
|
1726
|
+
fast_results = self._get_recent_fast_hook_results()
|
|
1727
|
+
comprehensive_results = self._get_recent_comprehensive_hook_results()
|
|
1728
|
+
|
|
1729
|
+
audit_report = auditor.audit_hook_results(
|
|
1730
|
+
fast_results, comprehensive_results
|
|
1731
|
+
)
|
|
1732
|
+
|
|
1733
|
+
self._last_security_audit = audit_report
|
|
1734
|
+
|
|
1735
|
+
return audit_report.has_critical_failures
|
|
1736
|
+
|
|
1737
|
+
except Exception as e:
|
|
1738
|
+
self.logger.warning(f"Security audit failed: {e} - failing securely")
|
|
1739
|
+
|
|
1740
|
+
raise
|
|
1741
|
+
|
|
1742
|
+
def _get_recent_fast_hook_results(self) -> list[t.Any]:
|
|
1743
|
+
results = self._extract_hook_results_from_session("fast_hooks")
|
|
1744
|
+
|
|
1745
|
+
if not results:
|
|
1746
|
+
results = self._create_mock_hook_results(["gitleaks"])
|
|
1747
|
+
|
|
1748
|
+
return results
|
|
1749
|
+
|
|
1750
|
+
def _extract_hook_results_from_session(self, hook_type: str) -> list[t.Any]:
|
|
1751
|
+
results: list[t.Any] = []
|
|
1752
|
+
|
|
1753
|
+
session_tracker = self._get_session_tracker()
|
|
1754
|
+
if not session_tracker:
|
|
1755
|
+
return results
|
|
1756
|
+
|
|
1757
|
+
for task_id, task_data in session_tracker.tasks.items():
|
|
1758
|
+
if task_id == hook_type and hasattr(task_data, "hook_results"):
|
|
1759
|
+
if task_data.hook_results:
|
|
1760
|
+
results.extend(task_data.hook_results)
|
|
1761
|
+
|
|
1762
|
+
return results
|
|
1763
|
+
|
|
1764
|
+
def _get_session_tracker(self) -> t.Any | None:
|
|
1765
|
+
return (
|
|
1766
|
+
getattr(self.session, "session_tracker", None)
|
|
1767
|
+
if hasattr(self.session, "session_tracker")
|
|
1768
|
+
else None
|
|
1769
|
+
)
|
|
1770
|
+
|
|
1771
|
+
def _create_mock_hook_results(self, critical_hooks: list[str]) -> list[t.Any]:
|
|
1772
|
+
results: list[t.Any] = []
|
|
1773
|
+
|
|
1774
|
+
for hook_name in critical_hooks:
|
|
1775
|
+
mock_result = self._create_mock_hook_result(hook_name)
|
|
1776
|
+
results.append(mock_result)
|
|
1777
|
+
|
|
1778
|
+
return results
|
|
1779
|
+
|
|
1780
|
+
def _create_mock_hook_result(self, hook_name: str) -> t.Any:
|
|
1781
|
+
return type(
|
|
1782
|
+
"MockResult",
|
|
1783
|
+
(),
|
|
1784
|
+
{
|
|
1785
|
+
"name": hook_name,
|
|
1786
|
+
"status": "unknown",
|
|
1787
|
+
"output": "Unable to determine hook status",
|
|
1788
|
+
},
|
|
1789
|
+
)()
|
|
1790
|
+
|
|
1791
|
+
def _get_recent_comprehensive_hook_results(self) -> list[t.Any]:
|
|
1792
|
+
results = self._extract_hook_results_from_session("comprehensive_hooks")
|
|
1793
|
+
|
|
1794
|
+
if not results:
|
|
1795
|
+
results = self._create_mock_hook_results(["bandit", "pyright"])
|
|
1796
|
+
|
|
1797
|
+
return results
|
|
1798
|
+
|
|
1799
|
+
def _is_security_critical_failure(self, result: t.Any) -> bool:
|
|
1800
|
+
security_critical_hooks = {
|
|
1801
|
+
"bandit",
|
|
1802
|
+
"pyright",
|
|
1803
|
+
"gitleaks",
|
|
1804
|
+
}
|
|
1805
|
+
|
|
1806
|
+
hook_name = getattr(result, "name", "").lower()
|
|
1807
|
+
is_failed = getattr(result, "status", "unknown") in (
|
|
1808
|
+
"failed",
|
|
1809
|
+
"error",
|
|
1810
|
+
"timeout",
|
|
1811
|
+
)
|
|
1812
|
+
|
|
1813
|
+
return hook_name in security_critical_hooks and is_failed
|
|
1814
|
+
|
|
1815
|
+
def _workflow_context(
|
|
1816
|
+
self,
|
|
1817
|
+
workflow_id: str,
|
|
1818
|
+
options: OptionsProtocol,
|
|
1819
|
+
) -> dict[str, t.Any]:
|
|
1820
|
+
"""Build a consistent payload for workflow-level events."""
|
|
1821
|
+
return {
|
|
1822
|
+
"workflow_id": workflow_id,
|
|
1823
|
+
"test_mode": getattr(options, "test", False),
|
|
1824
|
+
"skip_hooks": getattr(options, "skip_hooks", False),
|
|
1825
|
+
"publish": getattr(options, "publish", False),
|
|
1826
|
+
"all": getattr(options, "all", False),
|
|
1827
|
+
"commit": getattr(options, "commit", False),
|
|
1828
|
+
"ai_agent": getattr(options, "ai_agent", False),
|
|
1829
|
+
}
|
|
1830
|
+
|
|
1831
|
+
async def _publish_event(
|
|
1832
|
+
self, event: WorkflowEvent, payload: dict[str, t.Any]
|
|
1833
|
+
) -> None:
|
|
1834
|
+
"""Publish workflow events when the bus is available."""
|
|
1835
|
+
if not getattr(self, "_event_bus", None):
|
|
1836
|
+
return
|
|
1837
|
+
|
|
1838
|
+
try:
|
|
1839
|
+
await self._event_bus.publish(event, payload) # type: ignore[union-attr]
|
|
1840
|
+
except Exception as exc: # pragma: no cover - logging only
|
|
1841
|
+
self.logger.debug(
|
|
1842
|
+
"Failed to publish workflow event",
|
|
1843
|
+
extra={"event": event.value, "error": str(exc)},
|
|
1844
|
+
)
|
|
1845
|
+
|
|
1846
|
+
|
|
1847
|
+
class WorkflowOrchestrator:
|
|
1848
|
+
def __init__(
|
|
1849
|
+
self,
|
|
1850
|
+
pkg_path: Path | None = None,
|
|
1851
|
+
dry_run: bool = False,
|
|
1852
|
+
web_job_id: str | None = None,
|
|
1853
|
+
verbose: bool = False,
|
|
1854
|
+
debug: bool = False,
|
|
1855
|
+
changed_only: bool = False,
|
|
1856
|
+
) -> None:
|
|
1857
|
+
# Initialize console and pkg_path first
|
|
1858
|
+
from acb.console import Console
|
|
1859
|
+
|
|
1860
|
+
self.console = depends.get_sync(Console)
|
|
1861
|
+
self.pkg_path = pkg_path or Path.cwd()
|
|
1862
|
+
self.dry_run = dry_run
|
|
1863
|
+
self.web_job_id = web_job_id
|
|
1864
|
+
self.verbose = verbose
|
|
1865
|
+
self.debug = debug
|
|
1866
|
+
self.changed_only = changed_only
|
|
1867
|
+
|
|
1868
|
+
# Import protocols for retrieving dependencies via ACB
|
|
1869
|
+
from crackerjack.models.protocols import (
|
|
1870
|
+
ConfigMergeServiceProtocol,
|
|
1871
|
+
FileSystemInterface,
|
|
1872
|
+
GitInterface,
|
|
1873
|
+
HookManager,
|
|
1874
|
+
PublishManager,
|
|
1875
|
+
TestManagerProtocol,
|
|
1876
|
+
)
|
|
1877
|
+
|
|
1878
|
+
# Setup services with ACB DI
|
|
1879
|
+
self._setup_acb_services()
|
|
1880
|
+
|
|
1881
|
+
self._initialize_logging()
|
|
1882
|
+
|
|
1883
|
+
self.logger = depends.get_sync(LoggerProtocol)
|
|
1884
|
+
|
|
1885
|
+
# Create coordinators - dependencies retrieved via ACB's depends.get_sync()
|
|
1886
|
+
self.session = SessionCoordinator(self.console, self.pkg_path, self.web_job_id)
|
|
1887
|
+
|
|
1888
|
+
# Register SessionCoordinator in DI for WorkflowPipeline injection
|
|
1889
|
+
depends.set(SessionCoordinator, self.session)
|
|
1890
|
+
|
|
1891
|
+
self.phases = PhaseCoordinator(
|
|
1892
|
+
console=self.console,
|
|
1893
|
+
pkg_path=self.pkg_path,
|
|
1894
|
+
session=self.session,
|
|
1895
|
+
filesystem=depends.get_sync(FileSystemInterface),
|
|
1896
|
+
git_service=depends.get_sync(GitInterface),
|
|
1897
|
+
hook_manager=depends.get_sync(HookManager),
|
|
1898
|
+
test_manager=depends.get_sync(TestManagerProtocol),
|
|
1899
|
+
publish_manager=depends.get_sync(PublishManager),
|
|
1900
|
+
config_merge_service=depends.get_sync(ConfigMergeServiceProtocol),
|
|
1901
|
+
)
|
|
1902
|
+
|
|
1903
|
+
# Register PhaseCoordinator in DI for WorkflowPipeline injection
|
|
1904
|
+
depends.set(PhaseCoordinator, self.phases)
|
|
1905
|
+
|
|
1906
|
+
# WorkflowPipeline uses @depends.inject, so all parameters are auto-injected
|
|
1907
|
+
self.pipeline = WorkflowPipeline()
|
|
1908
|
+
|
|
1909
|
+
def _setup_acb_services(self) -> None:
|
|
1910
|
+
"""Setup all services using ACB dependency injection."""
|
|
1911
|
+
self._register_filesystem_and_git_services()
|
|
1912
|
+
self._register_manager_services()
|
|
1913
|
+
self._register_core_services()
|
|
1914
|
+
self._register_quality_services()
|
|
1915
|
+
self._register_monitoring_services()
|
|
1916
|
+
self._setup_event_system()
|
|
1917
|
+
|
|
1918
|
+
def _register_filesystem_and_git_services(self) -> None:
|
|
1919
|
+
"""Register filesystem and git services."""
|
|
1920
|
+
from acb.depends import depends
|
|
1921
|
+
|
|
1922
|
+
from crackerjack.models.protocols import (
|
|
1923
|
+
FileSystemInterface,
|
|
1924
|
+
GitInterface,
|
|
1925
|
+
GitServiceProtocol,
|
|
1926
|
+
)
|
|
1927
|
+
from crackerjack.services.enhanced_filesystem import EnhancedFileSystemService
|
|
1928
|
+
from crackerjack.services.git import GitService
|
|
1929
|
+
|
|
1930
|
+
filesystem = EnhancedFileSystemService()
|
|
1931
|
+
depends.set(FileSystemInterface, filesystem)
|
|
1932
|
+
|
|
1933
|
+
git_service = GitService(self.pkg_path)
|
|
1934
|
+
depends.set(GitInterface, git_service)
|
|
1935
|
+
depends.set(GitServiceProtocol, git_service)
|
|
1936
|
+
|
|
1937
|
+
def _register_manager_services(self) -> None:
|
|
1938
|
+
"""Register hook, test, and publish managers."""
|
|
1939
|
+
from acb.depends import depends
|
|
1940
|
+
|
|
1941
|
+
from crackerjack.managers.hook_manager import HookManagerImpl
|
|
1942
|
+
from crackerjack.managers.publish_manager import PublishManagerImpl
|
|
1943
|
+
from crackerjack.managers.test_manager import TestManager
|
|
1944
|
+
from crackerjack.models.protocols import (
|
|
1945
|
+
HookManager,
|
|
1946
|
+
PublishManager,
|
|
1947
|
+
TestManagerProtocol,
|
|
1948
|
+
)
|
|
1949
|
+
|
|
1950
|
+
hook_manager = HookManagerImpl(
|
|
1951
|
+
self.pkg_path,
|
|
1952
|
+
verbose=self.verbose,
|
|
1953
|
+
debug=self.debug,
|
|
1954
|
+
use_incremental=self.changed_only,
|
|
1955
|
+
)
|
|
1956
|
+
depends.set(HookManager, hook_manager)
|
|
1957
|
+
|
|
1958
|
+
test_manager = TestManager()
|
|
1959
|
+
depends.set(TestManagerProtocol, test_manager)
|
|
1960
|
+
|
|
1961
|
+
publish_manager = PublishManagerImpl()
|
|
1962
|
+
depends.set(PublishManager, publish_manager)
|
|
1963
|
+
|
|
1964
|
+
def _register_core_services(self) -> None:
|
|
1965
|
+
"""Register core configuration and security services."""
|
|
1966
|
+
from acb.depends import depends
|
|
1967
|
+
|
|
1968
|
+
from crackerjack.executors.hook_lock_manager import HookLockManager
|
|
1969
|
+
from crackerjack.models.protocols import (
|
|
1970
|
+
ConfigIntegrityServiceProtocol,
|
|
1971
|
+
ConfigMergeServiceProtocol,
|
|
1972
|
+
EnhancedFileSystemServiceProtocol,
|
|
1973
|
+
HookLockManagerProtocol,
|
|
1974
|
+
SecurityServiceProtocol,
|
|
1975
|
+
SmartSchedulingServiceProtocol,
|
|
1976
|
+
UnifiedConfigurationServiceProtocol,
|
|
1977
|
+
)
|
|
1978
|
+
from crackerjack.services.cache import CrackerjackCache
|
|
1979
|
+
from crackerjack.services.config_integrity import ConfigIntegrityService
|
|
1980
|
+
from crackerjack.services.config_merge import ConfigMergeService
|
|
1981
|
+
from crackerjack.services.enhanced_filesystem import EnhancedFileSystemService
|
|
1982
|
+
from crackerjack.services.security import SecurityService
|
|
1983
|
+
from crackerjack.services.smart_scheduling import SmartSchedulingService
|
|
1984
|
+
from crackerjack.services.unified_config import UnifiedConfigurationService
|
|
1985
|
+
|
|
1986
|
+
depends.set(
|
|
1987
|
+
UnifiedConfigurationServiceProtocol,
|
|
1988
|
+
UnifiedConfigurationService(pkg_path=self.pkg_path),
|
|
1989
|
+
)
|
|
1990
|
+
depends.set(
|
|
1991
|
+
ConfigIntegrityServiceProtocol,
|
|
1992
|
+
ConfigIntegrityService(project_path=self.pkg_path),
|
|
1993
|
+
)
|
|
1994
|
+
depends.set(ConfigMergeServiceProtocol, ConfigMergeService())
|
|
1995
|
+
depends.set(
|
|
1996
|
+
SmartSchedulingServiceProtocol,
|
|
1997
|
+
SmartSchedulingService(project_path=self.pkg_path),
|
|
1998
|
+
)
|
|
1999
|
+
depends.set(EnhancedFileSystemServiceProtocol, EnhancedFileSystemService())
|
|
2000
|
+
depends.set(SecurityServiceProtocol, SecurityService())
|
|
2001
|
+
depends.set(HookLockManagerProtocol, HookLockManager())
|
|
2002
|
+
depends.set(CrackerjackCache, CrackerjackCache())
|
|
2003
|
+
|
|
2004
|
+
def _register_quality_services(self) -> None:
|
|
2005
|
+
"""Register coverage, version analysis, and code quality services."""
|
|
2006
|
+
from acb.depends import depends
|
|
2007
|
+
|
|
2008
|
+
from crackerjack.models.protocols import (
|
|
2009
|
+
ChangelogGeneratorProtocol,
|
|
2010
|
+
CoverageBadgeServiceProtocol,
|
|
2011
|
+
CoverageRatchetProtocol,
|
|
2012
|
+
GitInterface,
|
|
2013
|
+
RegexPatternsProtocol,
|
|
2014
|
+
VersionAnalyzerProtocol,
|
|
2015
|
+
)
|
|
2016
|
+
from crackerjack.services.changelog_automation import ChangelogGenerator
|
|
2017
|
+
from crackerjack.services.coverage_badge_service import CoverageBadgeService
|
|
2018
|
+
from crackerjack.services.coverage_ratchet import CoverageRatchetService
|
|
2019
|
+
from crackerjack.services.regex_patterns import RegexPatternsService
|
|
2020
|
+
from crackerjack.services.version_analyzer import VersionAnalyzer
|
|
2021
|
+
|
|
2022
|
+
coverage_ratchet = CoverageRatchetService(self.pkg_path)
|
|
2023
|
+
depends.set(CoverageRatchetProtocol, coverage_ratchet)
|
|
2024
|
+
|
|
2025
|
+
coverage_badge = CoverageBadgeService(project_root=self.pkg_path)
|
|
2026
|
+
depends.set(CoverageBadgeServiceProtocol, coverage_badge)
|
|
2027
|
+
|
|
2028
|
+
git_service = depends.get_sync(GitInterface)
|
|
2029
|
+
version_analyzer = VersionAnalyzer(git_service=git_service)
|
|
2030
|
+
depends.set(VersionAnalyzerProtocol, version_analyzer)
|
|
2031
|
+
|
|
2032
|
+
changelog_generator = ChangelogGenerator()
|
|
2033
|
+
depends.set(ChangelogGeneratorProtocol, changelog_generator)
|
|
2034
|
+
|
|
2035
|
+
regex_patterns = RegexPatternsService()
|
|
2036
|
+
depends.set(RegexPatternsProtocol, regex_patterns)
|
|
2037
|
+
|
|
2038
|
+
def _register_monitoring_services(self) -> None:
|
|
2039
|
+
"""Register performance monitoring and benchmarking services."""
|
|
2040
|
+
from acb.depends import depends
|
|
2041
|
+
from acb.logger import Logger
|
|
2042
|
+
|
|
2043
|
+
from crackerjack.models.protocols import PerformanceBenchmarkProtocol
|
|
2044
|
+
from crackerjack.services.monitoring.performance_benchmarks import (
|
|
2045
|
+
PerformanceBenchmarkService,
|
|
2046
|
+
)
|
|
2047
|
+
|
|
2048
|
+
performance_benchmarks = PerformanceBenchmarkService(
|
|
2049
|
+
console=self.console,
|
|
2050
|
+
logger=depends.get_sync(Logger),
|
|
2051
|
+
pkg_path=self.pkg_path,
|
|
2052
|
+
)
|
|
2053
|
+
depends.set(PerformanceBenchmarkProtocol, performance_benchmarks)
|
|
2054
|
+
|
|
2055
|
+
def _setup_event_system(self) -> None:
|
|
2056
|
+
"""Setup event bus and telemetry."""
|
|
2057
|
+
from acb.depends import depends
|
|
2058
|
+
|
|
2059
|
+
from crackerjack.events import (
|
|
2060
|
+
WorkflowEventBus,
|
|
2061
|
+
WorkflowEventTelemetry,
|
|
2062
|
+
register_default_subscribers,
|
|
2063
|
+
)
|
|
2064
|
+
|
|
2065
|
+
default_state_dir = Path.home() / ".crackerjack" / "state"
|
|
2066
|
+
default_state_dir.mkdir(parents=True, exist_ok=True)
|
|
2067
|
+
|
|
2068
|
+
event_bus = WorkflowEventBus()
|
|
2069
|
+
telemetry_state_file = default_state_dir / "workflow_events.json"
|
|
2070
|
+
telemetry = WorkflowEventTelemetry(state_file=telemetry_state_file)
|
|
2071
|
+
register_default_subscribers(event_bus, telemetry)
|
|
2072
|
+
|
|
2073
|
+
depends.set(WorkflowEventBus, event_bus)
|
|
2074
|
+
depends.set(WorkflowEventTelemetry, telemetry)
|
|
2075
|
+
|
|
2076
|
+
def _initialize_logging(self) -> None:
|
|
2077
|
+
from crackerjack.services.log_manager import get_log_manager
|
|
2078
|
+
from crackerjack.services.logging import setup_structured_logging
|
|
2079
|
+
|
|
2080
|
+
log_manager = get_log_manager()
|
|
2081
|
+
session_id = getattr(self, "web_job_id", None) or str(int(time.time()))[:8]
|
|
2082
|
+
debug_log_file = log_manager.create_debug_log_file(session_id)
|
|
2083
|
+
|
|
2084
|
+
log_level = "DEBUG" if self.debug else "INFO"
|
|
2085
|
+
setup_structured_logging(
|
|
2086
|
+
level=log_level, json_output=False, log_file=debug_log_file
|
|
2087
|
+
)
|
|
2088
|
+
|
|
2089
|
+
temp_logger = depends.get_sync(LoggerProtocol)
|
|
2090
|
+
temp_logger.debug(
|
|
2091
|
+
"Structured logging initialized",
|
|
2092
|
+
log_file=str(debug_log_file),
|
|
2093
|
+
log_directory=str(log_manager.log_dir),
|
|
2094
|
+
package_path=str(self.pkg_path),
|
|
2095
|
+
dry_run=self.dry_run,
|
|
2096
|
+
)
|
|
2097
|
+
|
|
2098
|
+
def _initialize_session_tracking(self, options: OptionsProtocol) -> None:
|
|
2099
|
+
self.session.initialize_session_tracking(options)
|
|
2100
|
+
|
|
2101
|
+
def _track_task(self, task_id: str, task_name: str) -> None:
|
|
2102
|
+
self.session.track_task(task_id, task_name)
|
|
2103
|
+
|
|
2104
|
+
def _complete_task(self, task_id: str, details: str | None = None) -> None:
|
|
2105
|
+
self.session.complete_task(task_id, details)
|
|
2106
|
+
|
|
2107
|
+
def _fail_task(self, task_id: str, error: str) -> None:
|
|
2108
|
+
self.session.fail_task(task_id, error)
|
|
2109
|
+
|
|
2110
|
+
def run_cleaning_phase(self, options: OptionsProtocol) -> bool:
|
|
2111
|
+
result: bool = self.phases.run_cleaning_phase(options) # type: ignore[arg-type,assignment]
|
|
2112
|
+
return result
|
|
2113
|
+
|
|
2114
|
+
def run_fast_hooks_only(self, options: OptionsProtocol) -> bool:
|
|
2115
|
+
result: bool = self.phases.run_fast_hooks_only(options) # type: ignore[arg-type,assignment]
|
|
2116
|
+
return result
|
|
2117
|
+
|
|
2118
|
+
def run_comprehensive_hooks_only(self, options: OptionsProtocol) -> bool:
|
|
2119
|
+
result: bool = self.phases.run_comprehensive_hooks_only(options) # type: ignore[arg-type,assignment]
|
|
2120
|
+
return result
|
|
2121
|
+
|
|
2122
|
+
def run_hooks_phase(self, options: OptionsProtocol) -> bool:
|
|
2123
|
+
result: bool = self.phases.run_hooks_phase(options) # type: ignore[arg-type,assignment]
|
|
2124
|
+
return result
|
|
2125
|
+
|
|
2126
|
+
def run_testing_phase(self, options: OptionsProtocol) -> bool:
|
|
2127
|
+
result: bool = self.phases.run_testing_phase(options) # type: ignore[arg-type,assignment]
|
|
2128
|
+
return result
|
|
2129
|
+
|
|
2130
|
+
def run_publishing_phase(self, options: OptionsProtocol) -> bool:
|
|
2131
|
+
result: bool = self.phases.run_publishing_phase(options) # type: ignore[arg-type,assignment]
|
|
2132
|
+
return result
|
|
2133
|
+
|
|
2134
|
+
def run_commit_phase(self, options: OptionsProtocol) -> bool:
|
|
2135
|
+
result: bool = self.phases.run_commit_phase(options) # type: ignore[arg-type,assignment]
|
|
2136
|
+
return result
|
|
2137
|
+
|
|
2138
|
+
def run_configuration_phase(self, options: OptionsProtocol) -> bool:
|
|
2139
|
+
result: bool = self.phases.run_configuration_phase(options) # type: ignore[arg-type,assignment]
|
|
2140
|
+
return result
|
|
2141
|
+
|
|
2142
|
+
async def run_complete_workflow(self, options: OptionsProtocol) -> bool:
|
|
2143
|
+
result: bool = await self.pipeline.run_complete_workflow(options)
|
|
2144
|
+
# Ensure we properly clean up any pending tasks before finishing
|
|
2145
|
+
await self._cleanup_pending_tasks()
|
|
2146
|
+
return result
|
|
2147
|
+
|
|
2148
|
+
async def _cleanup_pending_tasks(self) -> None:
|
|
2149
|
+
"""Clean up any remaining asyncio tasks before event loop closes."""
|
|
2150
|
+
# First call the pipeline cleanup methods if they exist
|
|
2151
|
+
await self._cleanup_pipeline_executors()
|
|
2152
|
+
|
|
2153
|
+
# Then handle general asyncio task cleanup
|
|
2154
|
+
await self._cleanup_remaining_tasks()
|
|
2155
|
+
|
|
2156
|
+
async def _cleanup_pipeline_executors(self) -> None:
|
|
2157
|
+
"""Clean up specific pipeline executors."""
|
|
2158
|
+
with suppress(Exception):
|
|
2159
|
+
# Try to call specific async cleanup methods on executors/pipeline if they exist
|
|
2160
|
+
if hasattr(self, "pipeline") and hasattr(self.pipeline, "phases"):
|
|
2161
|
+
await self._cleanup_executor_if_exists(
|
|
2162
|
+
self.pipeline.phases, "_parallel_executor"
|
|
2163
|
+
)
|
|
2164
|
+
await self._cleanup_executor_if_exists(
|
|
2165
|
+
self.pipeline.phases, "_async_executor"
|
|
2166
|
+
)
|
|
2167
|
+
|
|
2168
|
+
async def _cleanup_executor_if_exists(
|
|
2169
|
+
self, phases_obj: t.Any, executor_attr: str
|
|
2170
|
+
) -> None:
|
|
2171
|
+
"""Clean up an executor if it exists and has the required cleanup method."""
|
|
2172
|
+
if hasattr(phases_obj, executor_attr):
|
|
2173
|
+
executor = getattr(phases_obj, executor_attr)
|
|
2174
|
+
if hasattr(executor, "async_cleanup"):
|
|
2175
|
+
await executor.async_cleanup()
|
|
2176
|
+
|
|
2177
|
+
async def _cleanup_remaining_tasks(self) -> None:
|
|
2178
|
+
"""Clean up any remaining asyncio tasks."""
|
|
2179
|
+
with suppress(RuntimeError):
|
|
2180
|
+
loop = asyncio.get_running_loop()
|
|
2181
|
+
# Get all pending tasks
|
|
2182
|
+
pending_tasks = [
|
|
2183
|
+
task for task in asyncio.all_tasks(loop) if not task.done()
|
|
2184
|
+
]
|
|
2185
|
+
await self._cancel_pending_tasks(pending_tasks)
|
|
2186
|
+
|
|
2187
|
+
async def _cancel_pending_tasks(self, pending_tasks: list) -> None:
|
|
2188
|
+
"""Cancel pending tasks with proper error handling."""
|
|
2189
|
+
for task in pending_tasks:
|
|
2190
|
+
if not task.done():
|
|
2191
|
+
try:
|
|
2192
|
+
task.cancel()
|
|
2193
|
+
# Wait a short time for cancellation to complete
|
|
2194
|
+
await asyncio.wait_for(task, timeout=0.1)
|
|
2195
|
+
except (TimeoutError, asyncio.CancelledError):
|
|
2196
|
+
# Task was cancelled or couldn't finish in time, continue
|
|
2197
|
+
pass
|
|
2198
|
+
except RuntimeError as e:
|
|
2199
|
+
# Catch the specific error when event loop is closed during task cancellation
|
|
2200
|
+
if "Event loop is closed" in str(e):
|
|
2201
|
+
# Event loop was closed while trying to cancel tasks, just return
|
|
2202
|
+
return
|
|
2203
|
+
else:
|
|
2204
|
+
# Re-raise other RuntimeErrors
|
|
2205
|
+
raise
|
|
2206
|
+
|
|
2207
|
+
def run_complete_workflow_sync(self, options: OptionsProtocol) -> bool:
|
|
2208
|
+
"""Sync wrapper for run_complete_workflow."""
|
|
2209
|
+
return asyncio.run(self.run_complete_workflow(options))
|
|
2210
|
+
|
|
2211
|
+
def _cleanup_resources(self) -> None:
|
|
2212
|
+
self.session.cleanup_resources()
|
|
2213
|
+
|
|
2214
|
+
def _register_cleanup(self, cleanup_handler: t.Callable[[], None]) -> None:
|
|
2215
|
+
self.session.register_cleanup(cleanup_handler)
|
|
2216
|
+
|
|
2217
|
+
def _track_lock_file(self, lock_file_path: Path) -> None:
|
|
2218
|
+
self.session.track_lock_file(lock_file_path)
|
|
2219
|
+
|
|
2220
|
+
def _get_version(self) -> str:
|
|
2221
|
+
try:
|
|
2222
|
+
return version("crackerjack")
|
|
2223
|
+
except Exception:
|
|
2224
|
+
return "unknown"
|
|
2225
|
+
|
|
2226
|
+
async def process(self, options: OptionsProtocol) -> bool:
|
|
2227
|
+
self.session.start_session("process_workflow")
|
|
2228
|
+
|
|
2229
|
+
try:
|
|
2230
|
+
result = await self.run_complete_workflow(options)
|
|
2231
|
+
return self._finalize_session_with_result(result)
|
|
2232
|
+
except Exception:
|
|
2233
|
+
return self._finalize_session_on_exception()
|
|
2234
|
+
|
|
2235
|
+
def _finalize_session_with_result(self, result: bool) -> bool:
|
|
2236
|
+
"""Finalize session with the workflow result."""
|
|
2237
|
+
self.session.end_session(success=result)
|
|
2238
|
+
return result
|
|
2239
|
+
|
|
2240
|
+
def _finalize_session_on_exception(self) -> bool:
|
|
2241
|
+
"""Finalize session when an exception occurs."""
|
|
2242
|
+
self.session.end_session(success=False)
|
|
2243
|
+
return False
|