crackerjack 0.37.9__py3-none-any.whl → 0.45.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- crackerjack/README.md +19 -0
- crackerjack/__init__.py +30 -1
- crackerjack/__main__.py +342 -1263
- crackerjack/adapters/README.md +18 -0
- crackerjack/adapters/__init__.py +27 -5
- crackerjack/adapters/_output_paths.py +167 -0
- crackerjack/adapters/_qa_adapter_base.py +309 -0
- crackerjack/adapters/_tool_adapter_base.py +706 -0
- crackerjack/adapters/ai/README.md +65 -0
- crackerjack/adapters/ai/__init__.py +5 -0
- crackerjack/adapters/ai/claude.py +853 -0
- crackerjack/adapters/complexity/README.md +53 -0
- crackerjack/adapters/complexity/__init__.py +10 -0
- crackerjack/adapters/complexity/complexipy.py +641 -0
- crackerjack/adapters/dependency/__init__.py +22 -0
- crackerjack/adapters/dependency/pip_audit.py +418 -0
- crackerjack/adapters/format/README.md +72 -0
- crackerjack/adapters/format/__init__.py +11 -0
- crackerjack/adapters/format/mdformat.py +313 -0
- crackerjack/adapters/format/ruff.py +516 -0
- crackerjack/adapters/lint/README.md +47 -0
- crackerjack/adapters/lint/__init__.py +11 -0
- crackerjack/adapters/lint/codespell.py +273 -0
- crackerjack/adapters/lsp/README.md +49 -0
- crackerjack/adapters/lsp/__init__.py +27 -0
- crackerjack/adapters/{rust_tool_manager.py → lsp/_manager.py} +3 -3
- crackerjack/adapters/{skylos_adapter.py → lsp/skylos.py} +59 -7
- crackerjack/adapters/{zuban_adapter.py → lsp/zuban.py} +3 -6
- crackerjack/adapters/refactor/README.md +59 -0
- crackerjack/adapters/refactor/__init__.py +12 -0
- crackerjack/adapters/refactor/creosote.py +318 -0
- crackerjack/adapters/refactor/refurb.py +406 -0
- crackerjack/adapters/refactor/skylos.py +494 -0
- crackerjack/adapters/sast/README.md +132 -0
- crackerjack/adapters/sast/__init__.py +32 -0
- crackerjack/adapters/sast/_base.py +201 -0
- crackerjack/adapters/sast/bandit.py +423 -0
- crackerjack/adapters/sast/pyscn.py +405 -0
- crackerjack/adapters/sast/semgrep.py +241 -0
- crackerjack/adapters/security/README.md +111 -0
- crackerjack/adapters/security/__init__.py +17 -0
- crackerjack/adapters/security/gitleaks.py +339 -0
- crackerjack/adapters/type/README.md +52 -0
- crackerjack/adapters/type/__init__.py +12 -0
- crackerjack/adapters/type/pyrefly.py +402 -0
- crackerjack/adapters/type/ty.py +402 -0
- crackerjack/adapters/type/zuban.py +522 -0
- crackerjack/adapters/utility/README.md +51 -0
- crackerjack/adapters/utility/__init__.py +10 -0
- crackerjack/adapters/utility/checks.py +884 -0
- crackerjack/agents/README.md +264 -0
- crackerjack/agents/__init__.py +40 -12
- crackerjack/agents/base.py +1 -0
- crackerjack/agents/claude_code_bridge.py +641 -0
- crackerjack/agents/coordinator.py +49 -53
- crackerjack/agents/dry_agent.py +187 -3
- crackerjack/agents/enhanced_coordinator.py +279 -0
- crackerjack/agents/enhanced_proactive_agent.py +185 -0
- crackerjack/agents/error_middleware.py +53 -0
- crackerjack/agents/formatting_agent.py +6 -8
- crackerjack/agents/helpers/__init__.py +9 -0
- crackerjack/agents/helpers/performance/__init__.py +22 -0
- crackerjack/agents/helpers/performance/performance_ast_analyzer.py +357 -0
- crackerjack/agents/helpers/performance/performance_pattern_detector.py +909 -0
- crackerjack/agents/helpers/performance/performance_recommender.py +572 -0
- crackerjack/agents/helpers/refactoring/__init__.py +22 -0
- crackerjack/agents/helpers/refactoring/code_transformer.py +536 -0
- crackerjack/agents/helpers/refactoring/complexity_analyzer.py +344 -0
- crackerjack/agents/helpers/refactoring/dead_code_detector.py +437 -0
- crackerjack/agents/helpers/test_creation/__init__.py +19 -0
- crackerjack/agents/helpers/test_creation/test_ast_analyzer.py +216 -0
- crackerjack/agents/helpers/test_creation/test_coverage_analyzer.py +643 -0
- crackerjack/agents/helpers/test_creation/test_template_generator.py +1031 -0
- crackerjack/agents/performance_agent.py +121 -1152
- crackerjack/agents/refactoring_agent.py +156 -655
- crackerjack/agents/semantic_agent.py +479 -0
- crackerjack/agents/semantic_helpers.py +356 -0
- crackerjack/agents/test_creation_agent.py +19 -1605
- crackerjack/api.py +5 -7
- crackerjack/cli/README.md +394 -0
- crackerjack/cli/__init__.py +1 -1
- crackerjack/cli/cache_handlers.py +23 -18
- crackerjack/cli/cache_handlers_enhanced.py +1 -4
- crackerjack/cli/facade.py +70 -8
- crackerjack/cli/formatting.py +13 -0
- crackerjack/cli/handlers/__init__.py +85 -0
- crackerjack/cli/handlers/advanced.py +103 -0
- crackerjack/cli/handlers/ai_features.py +62 -0
- crackerjack/cli/handlers/analytics.py +479 -0
- crackerjack/cli/handlers/changelog.py +271 -0
- crackerjack/cli/handlers/config_handlers.py +16 -0
- crackerjack/cli/handlers/coverage.py +84 -0
- crackerjack/cli/handlers/documentation.py +280 -0
- crackerjack/cli/handlers/main_handlers.py +497 -0
- crackerjack/cli/handlers/monitoring.py +371 -0
- crackerjack/cli/handlers.py +249 -49
- crackerjack/cli/interactive.py +8 -5
- crackerjack/cli/options.py +203 -110
- crackerjack/cli/semantic_handlers.py +292 -0
- crackerjack/cli/version.py +19 -0
- crackerjack/code_cleaner.py +60 -24
- crackerjack/config/README.md +472 -0
- crackerjack/config/__init__.py +256 -0
- crackerjack/config/global_lock_config.py +191 -54
- crackerjack/config/hooks.py +188 -16
- crackerjack/config/loader.py +239 -0
- crackerjack/config/settings.py +141 -0
- crackerjack/config/tool_commands.py +331 -0
- crackerjack/core/README.md +393 -0
- crackerjack/core/async_workflow_orchestrator.py +79 -53
- crackerjack/core/autofix_coordinator.py +22 -9
- crackerjack/core/container.py +10 -9
- crackerjack/core/enhanced_container.py +9 -9
- crackerjack/core/performance.py +1 -1
- crackerjack/core/performance_monitor.py +5 -3
- crackerjack/core/phase_coordinator.py +1018 -634
- crackerjack/core/proactive_workflow.py +3 -3
- crackerjack/core/retry.py +275 -0
- crackerjack/core/service_watchdog.py +167 -23
- crackerjack/core/session_coordinator.py +187 -382
- crackerjack/core/timeout_manager.py +161 -44
- crackerjack/core/workflow/__init__.py +21 -0
- crackerjack/core/workflow/workflow_ai_coordinator.py +863 -0
- crackerjack/core/workflow/workflow_event_orchestrator.py +1107 -0
- crackerjack/core/workflow/workflow_issue_parser.py +714 -0
- crackerjack/core/workflow/workflow_phase_executor.py +1158 -0
- crackerjack/core/workflow/workflow_security_gates.py +400 -0
- crackerjack/core/workflow_orchestrator.py +1247 -953
- crackerjack/data/README.md +11 -0
- crackerjack/data/__init__.py +8 -0
- crackerjack/data/models.py +79 -0
- crackerjack/data/repository.py +210 -0
- crackerjack/decorators/README.md +180 -0
- crackerjack/decorators/__init__.py +35 -0
- crackerjack/decorators/error_handling.py +649 -0
- crackerjack/decorators/error_handling_decorators.py +334 -0
- crackerjack/decorators/helpers.py +58 -0
- crackerjack/decorators/patterns.py +281 -0
- crackerjack/decorators/utils.py +58 -0
- crackerjack/docs/README.md +11 -0
- crackerjack/docs/generated/api/CLI_REFERENCE.md +1 -1
- crackerjack/documentation/README.md +11 -0
- crackerjack/documentation/ai_templates.py +1 -1
- crackerjack/documentation/dual_output_generator.py +11 -9
- crackerjack/documentation/reference_generator.py +104 -59
- crackerjack/dynamic_config.py +52 -61
- crackerjack/errors.py +1 -1
- crackerjack/events/README.md +11 -0
- crackerjack/events/__init__.py +16 -0
- crackerjack/events/telemetry.py +175 -0
- crackerjack/events/workflow_bus.py +346 -0
- crackerjack/exceptions/README.md +301 -0
- crackerjack/exceptions/__init__.py +5 -0
- crackerjack/exceptions/config.py +4 -0
- crackerjack/exceptions/tool_execution_error.py +245 -0
- crackerjack/executors/README.md +591 -0
- crackerjack/executors/__init__.py +2 -0
- crackerjack/executors/async_hook_executor.py +539 -77
- crackerjack/executors/cached_hook_executor.py +3 -3
- crackerjack/executors/hook_executor.py +967 -102
- crackerjack/executors/hook_lock_manager.py +31 -22
- crackerjack/executors/individual_hook_executor.py +66 -32
- crackerjack/executors/lsp_aware_hook_executor.py +136 -57
- crackerjack/executors/progress_hook_executor.py +282 -0
- crackerjack/executors/tool_proxy.py +23 -7
- crackerjack/hooks/README.md +485 -0
- crackerjack/hooks/lsp_hook.py +8 -9
- crackerjack/intelligence/README.md +557 -0
- crackerjack/interactive.py +37 -10
- crackerjack/managers/README.md +369 -0
- crackerjack/managers/async_hook_manager.py +41 -57
- crackerjack/managers/hook_manager.py +449 -79
- crackerjack/managers/publish_manager.py +81 -36
- crackerjack/managers/test_command_builder.py +290 -12
- crackerjack/managers/test_executor.py +93 -8
- crackerjack/managers/test_manager.py +1082 -75
- crackerjack/managers/test_progress.py +118 -26
- crackerjack/mcp/README.md +374 -0
- crackerjack/mcp/cache.py +25 -2
- crackerjack/mcp/client_runner.py +35 -18
- crackerjack/mcp/context.py +9 -9
- crackerjack/mcp/dashboard.py +24 -8
- crackerjack/mcp/enhanced_progress_monitor.py +34 -23
- crackerjack/mcp/file_monitor.py +27 -6
- crackerjack/mcp/progress_components.py +45 -34
- crackerjack/mcp/progress_monitor.py +6 -9
- crackerjack/mcp/rate_limiter.py +11 -7
- crackerjack/mcp/server.py +2 -0
- crackerjack/mcp/server_core.py +187 -55
- crackerjack/mcp/service_watchdog.py +12 -9
- crackerjack/mcp/task_manager.py +2 -2
- crackerjack/mcp/tools/README.md +27 -0
- crackerjack/mcp/tools/__init__.py +2 -0
- crackerjack/mcp/tools/core_tools.py +75 -52
- crackerjack/mcp/tools/execution_tools.py +87 -31
- crackerjack/mcp/tools/intelligence_tools.py +2 -2
- crackerjack/mcp/tools/proactive_tools.py +1 -1
- crackerjack/mcp/tools/semantic_tools.py +584 -0
- crackerjack/mcp/tools/utility_tools.py +180 -132
- crackerjack/mcp/tools/workflow_executor.py +87 -46
- crackerjack/mcp/websocket/README.md +31 -0
- crackerjack/mcp/websocket/app.py +11 -1
- crackerjack/mcp/websocket/event_bridge.py +188 -0
- crackerjack/mcp/websocket/jobs.py +27 -4
- crackerjack/mcp/websocket/monitoring/__init__.py +25 -0
- crackerjack/mcp/websocket/monitoring/api/__init__.py +19 -0
- crackerjack/mcp/websocket/monitoring/api/dependencies.py +141 -0
- crackerjack/mcp/websocket/monitoring/api/heatmap.py +154 -0
- crackerjack/mcp/websocket/monitoring/api/intelligence.py +199 -0
- crackerjack/mcp/websocket/monitoring/api/metrics.py +203 -0
- crackerjack/mcp/websocket/monitoring/api/telemetry.py +101 -0
- crackerjack/mcp/websocket/monitoring/dashboard.py +18 -0
- crackerjack/mcp/websocket/monitoring/factory.py +109 -0
- crackerjack/mcp/websocket/monitoring/filters.py +10 -0
- crackerjack/mcp/websocket/monitoring/metrics.py +64 -0
- crackerjack/mcp/websocket/monitoring/models.py +90 -0
- crackerjack/mcp/websocket/monitoring/utils.py +171 -0
- crackerjack/mcp/websocket/monitoring/websocket_manager.py +78 -0
- crackerjack/mcp/websocket/monitoring/websockets/__init__.py +17 -0
- crackerjack/mcp/websocket/monitoring/websockets/dependencies.py +126 -0
- crackerjack/mcp/websocket/monitoring/websockets/heatmap.py +176 -0
- crackerjack/mcp/websocket/monitoring/websockets/intelligence.py +291 -0
- crackerjack/mcp/websocket/monitoring/websockets/metrics.py +291 -0
- crackerjack/mcp/websocket/monitoring_endpoints.py +16 -2930
- crackerjack/mcp/websocket/server.py +1 -3
- crackerjack/mcp/websocket/websocket_handler.py +107 -6
- crackerjack/models/README.md +308 -0
- crackerjack/models/__init__.py +10 -1
- crackerjack/models/config.py +639 -22
- crackerjack/models/config_adapter.py +6 -6
- crackerjack/models/protocols.py +1167 -23
- crackerjack/models/pydantic_models.py +320 -0
- crackerjack/models/qa_config.py +145 -0
- crackerjack/models/qa_results.py +134 -0
- crackerjack/models/results.py +35 -0
- crackerjack/models/semantic_models.py +258 -0
- crackerjack/models/task.py +19 -3
- crackerjack/models/test_models.py +60 -0
- crackerjack/monitoring/README.md +11 -0
- crackerjack/monitoring/ai_agent_watchdog.py +5 -4
- crackerjack/monitoring/metrics_collector.py +4 -3
- crackerjack/monitoring/regression_prevention.py +4 -3
- crackerjack/monitoring/websocket_server.py +4 -241
- crackerjack/orchestration/README.md +340 -0
- crackerjack/orchestration/__init__.py +43 -0
- crackerjack/orchestration/advanced_orchestrator.py +20 -67
- crackerjack/orchestration/cache/README.md +312 -0
- crackerjack/orchestration/cache/__init__.py +37 -0
- crackerjack/orchestration/cache/memory_cache.py +338 -0
- crackerjack/orchestration/cache/tool_proxy_cache.py +340 -0
- crackerjack/orchestration/config.py +297 -0
- crackerjack/orchestration/coverage_improvement.py +13 -6
- crackerjack/orchestration/execution_strategies.py +6 -6
- crackerjack/orchestration/hook_orchestrator.py +1398 -0
- crackerjack/orchestration/strategies/README.md +401 -0
- crackerjack/orchestration/strategies/__init__.py +39 -0
- crackerjack/orchestration/strategies/adaptive_strategy.py +630 -0
- crackerjack/orchestration/strategies/parallel_strategy.py +237 -0
- crackerjack/orchestration/strategies/sequential_strategy.py +299 -0
- crackerjack/orchestration/test_progress_streamer.py +1 -1
- crackerjack/plugins/README.md +11 -0
- crackerjack/plugins/hooks.py +3 -2
- crackerjack/plugins/loader.py +3 -3
- crackerjack/plugins/managers.py +1 -1
- crackerjack/py313.py +191 -0
- crackerjack/security/README.md +11 -0
- crackerjack/services/README.md +374 -0
- crackerjack/services/__init__.py +8 -21
- crackerjack/services/ai/README.md +295 -0
- crackerjack/services/ai/__init__.py +7 -0
- crackerjack/services/ai/advanced_optimizer.py +878 -0
- crackerjack/services/{contextual_ai_assistant.py → ai/contextual_ai_assistant.py} +5 -3
- crackerjack/services/ai/embeddings.py +444 -0
- crackerjack/services/ai/intelligent_commit.py +328 -0
- crackerjack/services/ai/predictive_analytics.py +510 -0
- crackerjack/services/api_extractor.py +5 -3
- crackerjack/services/bounded_status_operations.py +45 -5
- crackerjack/services/cache.py +249 -318
- crackerjack/services/changelog_automation.py +7 -3
- crackerjack/services/command_execution_service.py +305 -0
- crackerjack/services/config_integrity.py +83 -39
- crackerjack/services/config_merge.py +9 -6
- crackerjack/services/config_service.py +198 -0
- crackerjack/services/config_template.py +13 -26
- crackerjack/services/coverage_badge_service.py +6 -4
- crackerjack/services/coverage_ratchet.py +53 -27
- crackerjack/services/debug.py +18 -7
- crackerjack/services/dependency_analyzer.py +4 -4
- crackerjack/services/dependency_monitor.py +13 -13
- crackerjack/services/documentation_generator.py +4 -2
- crackerjack/services/documentation_service.py +62 -33
- crackerjack/services/enhanced_filesystem.py +81 -27
- crackerjack/services/enterprise_optimizer.py +1 -1
- crackerjack/services/error_pattern_analyzer.py +10 -10
- crackerjack/services/file_filter.py +221 -0
- crackerjack/services/file_hasher.py +5 -7
- crackerjack/services/file_io_service.py +361 -0
- crackerjack/services/file_modifier.py +615 -0
- crackerjack/services/filesystem.py +80 -109
- crackerjack/services/git.py +99 -5
- crackerjack/services/health_metrics.py +4 -6
- crackerjack/services/heatmap_generator.py +12 -3
- crackerjack/services/incremental_executor.py +380 -0
- crackerjack/services/initialization.py +101 -49
- crackerjack/services/log_manager.py +2 -2
- crackerjack/services/logging.py +120 -68
- crackerjack/services/lsp_client.py +12 -12
- crackerjack/services/memory_optimizer.py +27 -22
- crackerjack/services/monitoring/README.md +30 -0
- crackerjack/services/monitoring/__init__.py +9 -0
- crackerjack/services/monitoring/dependency_monitor.py +678 -0
- crackerjack/services/monitoring/error_pattern_analyzer.py +676 -0
- crackerjack/services/monitoring/health_metrics.py +716 -0
- crackerjack/services/monitoring/metrics.py +587 -0
- crackerjack/services/{performance_benchmarks.py → monitoring/performance_benchmarks.py} +100 -14
- crackerjack/services/{performance_cache.py → monitoring/performance_cache.py} +21 -15
- crackerjack/services/{performance_monitor.py → monitoring/performance_monitor.py} +10 -6
- crackerjack/services/parallel_executor.py +166 -55
- crackerjack/services/patterns/__init__.py +142 -0
- crackerjack/services/patterns/agents.py +107 -0
- crackerjack/services/patterns/code/__init__.py +15 -0
- crackerjack/services/patterns/code/detection.py +118 -0
- crackerjack/services/patterns/code/imports.py +107 -0
- crackerjack/services/patterns/code/paths.py +159 -0
- crackerjack/services/patterns/code/performance.py +119 -0
- crackerjack/services/patterns/code/replacement.py +36 -0
- crackerjack/services/patterns/core.py +212 -0
- crackerjack/services/patterns/documentation/__init__.py +14 -0
- crackerjack/services/patterns/documentation/badges_markdown.py +96 -0
- crackerjack/services/patterns/documentation/comments_blocks.py +83 -0
- crackerjack/services/patterns/documentation/docstrings.py +89 -0
- crackerjack/services/patterns/formatting.py +226 -0
- crackerjack/services/patterns/operations.py +339 -0
- crackerjack/services/patterns/security/__init__.py +23 -0
- crackerjack/services/patterns/security/code_injection.py +122 -0
- crackerjack/services/patterns/security/credentials.py +190 -0
- crackerjack/services/patterns/security/path_traversal.py +221 -0
- crackerjack/services/patterns/security/unsafe_operations.py +216 -0
- crackerjack/services/patterns/templates.py +62 -0
- crackerjack/services/patterns/testing/__init__.py +18 -0
- crackerjack/services/patterns/testing/error_patterns.py +107 -0
- crackerjack/services/patterns/testing/pytest_output.py +126 -0
- crackerjack/services/patterns/tool_output/__init__.py +16 -0
- crackerjack/services/patterns/tool_output/bandit.py +72 -0
- crackerjack/services/patterns/tool_output/other.py +97 -0
- crackerjack/services/patterns/tool_output/pyright.py +67 -0
- crackerjack/services/patterns/tool_output/ruff.py +44 -0
- crackerjack/services/patterns/url_sanitization.py +114 -0
- crackerjack/services/patterns/utilities.py +42 -0
- crackerjack/services/patterns/utils.py +339 -0
- crackerjack/services/patterns/validation.py +46 -0
- crackerjack/services/patterns/versioning.py +62 -0
- crackerjack/services/predictive_analytics.py +21 -8
- crackerjack/services/profiler.py +280 -0
- crackerjack/services/quality/README.md +415 -0
- crackerjack/services/quality/__init__.py +11 -0
- crackerjack/services/quality/anomaly_detector.py +392 -0
- crackerjack/services/quality/pattern_cache.py +333 -0
- crackerjack/services/quality/pattern_detector.py +479 -0
- crackerjack/services/quality/qa_orchestrator.py +491 -0
- crackerjack/services/{quality_baseline.py → quality/quality_baseline.py} +163 -2
- crackerjack/services/{quality_baseline_enhanced.py → quality/quality_baseline_enhanced.py} +4 -1
- crackerjack/services/{quality_intelligence.py → quality/quality_intelligence.py} +180 -16
- crackerjack/services/regex_patterns.py +58 -2987
- crackerjack/services/regex_utils.py +55 -29
- crackerjack/services/secure_status_formatter.py +42 -15
- crackerjack/services/secure_subprocess.py +35 -2
- crackerjack/services/security.py +16 -8
- crackerjack/services/server_manager.py +40 -51
- crackerjack/services/smart_scheduling.py +46 -6
- crackerjack/services/status_authentication.py +3 -3
- crackerjack/services/thread_safe_status_collector.py +1 -0
- crackerjack/services/tool_filter.py +368 -0
- crackerjack/services/tool_version_service.py +9 -5
- crackerjack/services/unified_config.py +43 -351
- crackerjack/services/vector_store.py +689 -0
- crackerjack/services/version_analyzer.py +6 -4
- crackerjack/services/version_checker.py +14 -8
- crackerjack/services/zuban_lsp_service.py +5 -4
- crackerjack/slash_commands/README.md +11 -0
- crackerjack/slash_commands/init.md +2 -12
- crackerjack/slash_commands/run.md +84 -50
- crackerjack/tools/README.md +11 -0
- crackerjack/tools/__init__.py +30 -0
- crackerjack/tools/_git_utils.py +105 -0
- crackerjack/tools/check_added_large_files.py +139 -0
- crackerjack/tools/check_ast.py +105 -0
- crackerjack/tools/check_json.py +103 -0
- crackerjack/tools/check_jsonschema.py +297 -0
- crackerjack/tools/check_toml.py +103 -0
- crackerjack/tools/check_yaml.py +110 -0
- crackerjack/tools/codespell_wrapper.py +72 -0
- crackerjack/tools/end_of_file_fixer.py +202 -0
- crackerjack/tools/format_json.py +128 -0
- crackerjack/tools/mdformat_wrapper.py +114 -0
- crackerjack/tools/trailing_whitespace.py +198 -0
- crackerjack/tools/validate_regex_patterns.py +7 -3
- crackerjack/ui/README.md +11 -0
- crackerjack/ui/dashboard_renderer.py +28 -0
- crackerjack/ui/templates/README.md +11 -0
- crackerjack/utils/console_utils.py +13 -0
- crackerjack/utils/dependency_guard.py +230 -0
- crackerjack/utils/retry_utils.py +275 -0
- crackerjack/workflows/README.md +590 -0
- crackerjack/workflows/__init__.py +46 -0
- crackerjack/workflows/actions.py +811 -0
- crackerjack/workflows/auto_fix.py +444 -0
- crackerjack/workflows/container_builder.py +499 -0
- crackerjack/workflows/definitions.py +443 -0
- crackerjack/workflows/engine.py +177 -0
- crackerjack/workflows/event_bridge.py +242 -0
- {crackerjack-0.37.9.dist-info → crackerjack-0.45.2.dist-info}/METADATA +678 -98
- crackerjack-0.45.2.dist-info/RECORD +478 -0
- {crackerjack-0.37.9.dist-info → crackerjack-0.45.2.dist-info}/WHEEL +1 -1
- crackerjack/managers/test_manager_backup.py +0 -1075
- crackerjack/mcp/tools/execution_tools_backup.py +0 -1011
- crackerjack/mixins/__init__.py +0 -3
- crackerjack/mixins/error_handling.py +0 -145
- crackerjack/services/config.py +0 -358
- crackerjack/ui/server_panels.py +0 -125
- crackerjack-0.37.9.dist-info/RECORD +0 -231
- /crackerjack/adapters/{rust_tool_adapter.py → lsp/_base.py} +0 -0
- /crackerjack/adapters/{lsp_client.py → lsp/_client.py} +0 -0
- {crackerjack-0.37.9.dist-info → crackerjack-0.45.2.dist-info}/entry_points.txt +0 -0
- {crackerjack-0.37.9.dist-info → crackerjack-0.45.2.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,91 +1,103 @@
|
|
|
1
|
+
"""Workflow Orchestrator for ACB integration.
|
|
2
|
+
|
|
3
|
+
ACB-powered orchestration layer managing workflow lifecycle, dependency resolution,
|
|
4
|
+
and execution strategies. Supports dual execution modes for gradual migration.
|
|
5
|
+
|
|
6
|
+
ACB Patterns:
|
|
7
|
+
- MODULE_ID and MODULE_STATUS at module level
|
|
8
|
+
- depends.set() registration after class definition
|
|
9
|
+
- Structured logging with context fields
|
|
10
|
+
- Protocol-based interfaces
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from __future__ import annotations
|
|
14
|
+
|
|
1
15
|
import asyncio
|
|
2
16
|
import time
|
|
3
17
|
import typing as t
|
|
18
|
+
from contextlib import suppress
|
|
19
|
+
from importlib.metadata import version
|
|
4
20
|
from pathlib import Path
|
|
5
21
|
|
|
6
|
-
from
|
|
22
|
+
from acb.config import Config
|
|
23
|
+
from acb.console import Console
|
|
24
|
+
from acb.depends import Inject, depends
|
|
25
|
+
from acb.events import Event, EventHandlerResult
|
|
7
26
|
|
|
8
27
|
from crackerjack.agents.base import AgentContext, Issue, IssueType, Priority
|
|
9
|
-
from crackerjack.agents.
|
|
10
|
-
from crackerjack.
|
|
11
|
-
from crackerjack.
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
)
|
|
21
|
-
from crackerjack.services.memory_optimizer import get_memory_optimizer, memory_optimized
|
|
22
|
-
from crackerjack.services.performance_benchmarks import PerformanceBenchmarkService
|
|
23
|
-
from crackerjack.services.performance_cache import get_performance_cache
|
|
24
|
-
from crackerjack.services.performance_monitor import (
|
|
25
|
-
get_performance_monitor,
|
|
26
|
-
phase_monitor,
|
|
27
|
-
)
|
|
28
|
-
from crackerjack.services.quality_baseline_enhanced import (
|
|
29
|
-
EnhancedQualityBaselineService,
|
|
28
|
+
from crackerjack.agents.enhanced_coordinator import EnhancedAgentCoordinator
|
|
29
|
+
from crackerjack.events import WorkflowEvent, WorkflowEventBus
|
|
30
|
+
from crackerjack.models.protocols import (
|
|
31
|
+
DebugServiceProtocol,
|
|
32
|
+
LoggerProtocol,
|
|
33
|
+
MemoryOptimizerProtocol,
|
|
34
|
+
OptionsProtocol,
|
|
35
|
+
PerformanceBenchmarkProtocol,
|
|
36
|
+
PerformanceCacheProtocol,
|
|
37
|
+
PerformanceMonitorProtocol,
|
|
38
|
+
QualityIntelligenceProtocol,
|
|
30
39
|
)
|
|
31
|
-
from crackerjack.services.
|
|
40
|
+
from crackerjack.services.logging import LoggingContext
|
|
41
|
+
from crackerjack.services.memory_optimizer import memory_optimized
|
|
32
42
|
|
|
33
43
|
from .phase_coordinator import PhaseCoordinator
|
|
34
|
-
from .session_coordinator import SessionCoordinator
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
def version() -> str:
|
|
38
|
-
try:
|
|
39
|
-
import importlib.metadata
|
|
40
|
-
|
|
41
|
-
return importlib.metadata.version("crackerjack")
|
|
42
|
-
except Exception:
|
|
43
|
-
return "unknown"
|
|
44
|
+
from .session_coordinator import SessionController, SessionCoordinator
|
|
45
|
+
from .workflow import WorkflowPhaseExecutor
|
|
44
46
|
|
|
45
47
|
|
|
46
48
|
class WorkflowPipeline:
|
|
49
|
+
@depends.inject
|
|
47
50
|
def __init__(
|
|
48
51
|
self,
|
|
49
|
-
console: Console,
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
52
|
+
console: Inject[Console],
|
|
53
|
+
config: Inject[Config],
|
|
54
|
+
performance_monitor: Inject[PerformanceMonitorProtocol],
|
|
55
|
+
memory_optimizer: Inject[MemoryOptimizerProtocol],
|
|
56
|
+
performance_cache: Inject[PerformanceCacheProtocol],
|
|
57
|
+
debugger: Inject[DebugServiceProtocol],
|
|
58
|
+
logger: Inject[LoggerProtocol],
|
|
59
|
+
session: Inject[SessionCoordinator],
|
|
60
|
+
phases: Inject[PhaseCoordinator],
|
|
61
|
+
phase_executor: Inject[WorkflowPhaseExecutor],
|
|
62
|
+
quality_intelligence: Inject[QualityIntelligenceProtocol] | None = None,
|
|
63
|
+
performance_benchmarks: Inject[PerformanceBenchmarkProtocol] | None = None,
|
|
53
64
|
) -> None:
|
|
54
65
|
self.console = console
|
|
55
|
-
self.
|
|
66
|
+
self.config = config
|
|
67
|
+
self.pkg_path = config.root_path
|
|
56
68
|
self.session = session
|
|
57
69
|
self.phases = phases
|
|
58
70
|
self._mcp_state_manager: t.Any = None
|
|
59
71
|
self._last_security_audit: t.Any = None
|
|
60
72
|
|
|
61
|
-
|
|
62
|
-
self._debugger
|
|
63
|
-
|
|
64
|
-
self.
|
|
65
|
-
self.
|
|
66
|
-
self.
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
try:
|
|
70
|
-
quality_baseline = EnhancedQualityBaselineService()
|
|
71
|
-
self._quality_intelligence = QualityIntelligenceService(quality_baseline)
|
|
72
|
-
except Exception:
|
|
73
|
-
# Fallback gracefully if quality intelligence is not available
|
|
74
|
-
self._quality_intelligence = None
|
|
73
|
+
# Services injected via ACB DI
|
|
74
|
+
self._debugger = debugger
|
|
75
|
+
self._performance_monitor = performance_monitor
|
|
76
|
+
self._memory_optimizer = memory_optimizer
|
|
77
|
+
self._cache = performance_cache
|
|
78
|
+
self._quality_intelligence = quality_intelligence
|
|
79
|
+
self._performance_benchmarks = performance_benchmarks
|
|
80
|
+
self.logger = logger
|
|
75
81
|
|
|
76
|
-
#
|
|
82
|
+
# Event bus with graceful fallback
|
|
77
83
|
try:
|
|
78
|
-
self.
|
|
79
|
-
|
|
84
|
+
self._event_bus: WorkflowEventBus | None = depends.get_sync(
|
|
85
|
+
WorkflowEventBus
|
|
80
86
|
)
|
|
81
|
-
except Exception:
|
|
82
|
-
|
|
83
|
-
self.
|
|
87
|
+
except Exception as e:
|
|
88
|
+
print(f"WARNING: WorkflowEventBus not available: {type(e).__name__}: {e}")
|
|
89
|
+
self._event_bus = None
|
|
90
|
+
|
|
91
|
+
# Phase executor for workflow execution
|
|
92
|
+
self._phase_executor = phase_executor
|
|
93
|
+
self._phase_executor.configure(session, phases, self._event_bus)
|
|
94
|
+
self._phase_executor._mcp_state_manager = self._mcp_state_manager
|
|
95
|
+
|
|
96
|
+
self._session_controller = SessionController(self)
|
|
84
97
|
|
|
85
98
|
@property
|
|
86
|
-
def debugger(self) ->
|
|
87
|
-
|
|
88
|
-
self._debugger = get_ai_agent_debugger()
|
|
99
|
+
def debugger(self) -> DebugServiceProtocol:
|
|
100
|
+
"""Get debug service (already injected via DI)."""
|
|
89
101
|
return self._debugger
|
|
90
102
|
|
|
91
103
|
def _should_debug(self) -> bool:
|
|
@@ -96,141 +108,515 @@ class WorkflowPipeline:
|
|
|
96
108
|
@memory_optimized
|
|
97
109
|
async def run_complete_workflow(self, options: OptionsProtocol) -> bool:
|
|
98
110
|
workflow_id = f"workflow_{int(time.time())}"
|
|
111
|
+
event_context = self._workflow_context(workflow_id, options)
|
|
112
|
+
start_time = time.time()
|
|
99
113
|
|
|
100
114
|
self._performance_monitor.start_workflow(workflow_id)
|
|
101
|
-
|
|
102
115
|
await self._cache.start()
|
|
116
|
+
await self._publish_event(WorkflowEvent.WORKFLOW_STARTED, event_context)
|
|
103
117
|
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
success = await self._execute_workflow_with_timing(
|
|
114
|
-
options, start_time, workflow_id
|
|
118
|
+
success = False
|
|
119
|
+
try:
|
|
120
|
+
with LoggingContext(
|
|
121
|
+
"workflow_execution",
|
|
122
|
+
testing=getattr(options, "test", False),
|
|
123
|
+
skip_hooks=getattr(options, "skip_hooks", False),
|
|
124
|
+
):
|
|
125
|
+
success = await self._execute_workflow(
|
|
126
|
+
options, workflow_id, event_context, start_time
|
|
115
127
|
)
|
|
128
|
+
return success
|
|
129
|
+
except KeyboardInterrupt:
|
|
130
|
+
return await self._handle_keyboard_interrupt(workflow_id, event_context)
|
|
131
|
+
except Exception as e:
|
|
132
|
+
return await self._handle_general_exception(e, workflow_id, event_context)
|
|
133
|
+
finally:
|
|
134
|
+
await self._cleanup_workflow_resources()
|
|
116
135
|
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
136
|
+
async def _execute_workflow(
|
|
137
|
+
self,
|
|
138
|
+
options: OptionsProtocol,
|
|
139
|
+
workflow_id: str,
|
|
140
|
+
event_context: dict[str, t.Any],
|
|
141
|
+
start_time: float,
|
|
142
|
+
) -> bool:
|
|
143
|
+
"""Execute the workflow either event-driven or sequentially."""
|
|
144
|
+
if self._event_bus:
|
|
145
|
+
return await self._run_event_driven_workflow(
|
|
146
|
+
options, workflow_id, event_context, start_time
|
|
147
|
+
)
|
|
148
|
+
return await self._run_sequential_workflow(
|
|
149
|
+
options, workflow_id, event_context, start_time
|
|
150
|
+
)
|
|
124
151
|
|
|
125
|
-
|
|
152
|
+
async def _run_sequential_workflow(
|
|
153
|
+
self,
|
|
154
|
+
options: OptionsProtocol,
|
|
155
|
+
workflow_id: str,
|
|
156
|
+
event_context: dict[str, t.Any],
|
|
157
|
+
start_time: float,
|
|
158
|
+
) -> bool:
|
|
159
|
+
"""Execute the workflow sequentially."""
|
|
160
|
+
await self._publish_event(
|
|
161
|
+
WorkflowEvent.WORKFLOW_SESSION_INITIALIZING,
|
|
162
|
+
event_context,
|
|
163
|
+
)
|
|
164
|
+
self._session_controller.initialize(options)
|
|
165
|
+
await self._publish_event(
|
|
166
|
+
WorkflowEvent.WORKFLOW_SESSION_READY,
|
|
167
|
+
event_context,
|
|
168
|
+
)
|
|
169
|
+
success = await self._execute_workflow_with_timing(
|
|
170
|
+
options, start_time, workflow_id
|
|
171
|
+
)
|
|
172
|
+
final_event = (
|
|
173
|
+
WorkflowEvent.WORKFLOW_COMPLETED
|
|
174
|
+
if success
|
|
175
|
+
else WorkflowEvent.WORKFLOW_FAILED
|
|
176
|
+
)
|
|
177
|
+
await self._publish_event(
|
|
178
|
+
final_event,
|
|
179
|
+
event_context | {"success": success},
|
|
180
|
+
)
|
|
181
|
+
self._performance_monitor.end_workflow(workflow_id, success)
|
|
182
|
+
return success
|
|
126
183
|
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
184
|
+
async def _handle_keyboard_interrupt(
|
|
185
|
+
self, workflow_id: str, event_context: dict[str, t.Any]
|
|
186
|
+
) -> bool:
|
|
187
|
+
"""Handle keyboard interrupt during workflow execution."""
|
|
188
|
+
self._performance_monitor.end_workflow(workflow_id, False)
|
|
189
|
+
await self._publish_event(
|
|
190
|
+
WorkflowEvent.WORKFLOW_INTERRUPTED,
|
|
191
|
+
event_context,
|
|
192
|
+
)
|
|
193
|
+
return self._handle_user_interruption()
|
|
130
194
|
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
195
|
+
async def _handle_general_exception(
|
|
196
|
+
self, e: Exception, workflow_id: str, event_context: dict[str, t.Any]
|
|
197
|
+
) -> bool:
|
|
198
|
+
"""Handle general exceptions during workflow execution."""
|
|
199
|
+
self._performance_monitor.end_workflow(workflow_id, False)
|
|
200
|
+
await self._publish_event(
|
|
201
|
+
WorkflowEvent.WORKFLOW_FAILED,
|
|
202
|
+
event_context
|
|
203
|
+
| {
|
|
204
|
+
"error": str(e),
|
|
205
|
+
"error_type": type(e).__name__,
|
|
206
|
+
},
|
|
207
|
+
)
|
|
208
|
+
return self._handle_workflow_exception(e)
|
|
134
209
|
|
|
135
|
-
|
|
136
|
-
|
|
210
|
+
async def _cleanup_workflow_resources(self) -> None:
|
|
211
|
+
"""Clean up workflow resources in the finally block."""
|
|
212
|
+
self.session.cleanup_resources()
|
|
213
|
+
self._memory_optimizer.optimize_memory()
|
|
214
|
+
await self._cache.stop()
|
|
137
215
|
|
|
138
|
-
|
|
139
|
-
|
|
216
|
+
def _unsubscribe_all_subscriptions(self, subscriptions: list[str]) -> None:
|
|
217
|
+
"""Unsubscribe from all event subscriptions."""
|
|
218
|
+
for subscription_id in subscriptions.copy():
|
|
219
|
+
if self._event_bus:
|
|
220
|
+
self._event_bus.unsubscribe(subscription_id)
|
|
221
|
+
subscriptions.remove(subscription_id)
|
|
140
222
|
|
|
141
|
-
def
|
|
142
|
-
self
|
|
143
|
-
|
|
223
|
+
async def _finalize_workflow(
|
|
224
|
+
self,
|
|
225
|
+
start_time: float,
|
|
226
|
+
workflow_id: str,
|
|
227
|
+
success: bool,
|
|
228
|
+
completion_future: asyncio.Future[bool],
|
|
229
|
+
subscriptions: list[str],
|
|
230
|
+
payload: dict[str, t.Any] | None = None,
|
|
231
|
+
) -> EventHandlerResult:
|
|
232
|
+
"""Finalize the workflow execution."""
|
|
233
|
+
if completion_future.done():
|
|
234
|
+
return EventHandlerResult(success=success)
|
|
144
235
|
|
|
145
|
-
self.
|
|
146
|
-
|
|
147
|
-
self.
|
|
148
|
-
self.
|
|
149
|
-
self._register_lsp_cleanup_handler(options)
|
|
150
|
-
self._log_workflow_startup_info(options)
|
|
236
|
+
self.session.finalize_session(start_time, success)
|
|
237
|
+
duration = time.time() - start_time
|
|
238
|
+
self._log_workflow_completion(success, duration)
|
|
239
|
+
self._log_workflow_completion_debug(success, duration)
|
|
151
240
|
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
241
|
+
workflow_perf = self._performance_monitor.end_workflow(workflow_id, success)
|
|
242
|
+
self.logger.info(
|
|
243
|
+
f"Workflow performance: {workflow_perf.performance_score: .1f} score, "
|
|
244
|
+
f"{workflow_perf.total_duration_seconds: .2f}s duration"
|
|
245
|
+
)
|
|
155
246
|
|
|
156
|
-
self.
|
|
157
|
-
|
|
158
|
-
"started",
|
|
159
|
-
details={
|
|
160
|
-
"testing": getattr(options, "test", False),
|
|
161
|
-
"skip_hooks": getattr(options, "skip_hooks", False),
|
|
162
|
-
"ai_agent": getattr(options, "ai_agent", False),
|
|
163
|
-
},
|
|
247
|
+
await self._generate_performance_benchmark_report(
|
|
248
|
+
workflow_id, duration, success
|
|
164
249
|
)
|
|
165
250
|
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
self.session.set_cleanup_config(options.cleanup)
|
|
251
|
+
self._unsubscribe_all_subscriptions(subscriptions)
|
|
252
|
+
completion_future.set_result(success)
|
|
169
253
|
|
|
170
|
-
|
|
171
|
-
"""Initialize Zuban LSP server if not disabled."""
|
|
172
|
-
# Check if LSP is disabled via CLI flag or configuration
|
|
173
|
-
if getattr(options, "no_zuban_lsp", False):
|
|
174
|
-
self.logger.debug("Zuban LSP server disabled by --no-zuban-lsp flag")
|
|
175
|
-
return
|
|
254
|
+
return EventHandlerResult(success=success)
|
|
176
255
|
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
256
|
+
async def _publish_workflow_failure(
|
|
257
|
+
self,
|
|
258
|
+
event_context: dict[str, t.Any],
|
|
259
|
+
stage: str,
|
|
260
|
+
error: Exception | None = None,
|
|
261
|
+
) -> None:
|
|
262
|
+
"""Publish workflow failure event."""
|
|
263
|
+
payload: dict[str, t.Any] = event_context | {"stage": stage}
|
|
264
|
+
if error is not None:
|
|
265
|
+
payload["error"] = str(error)
|
|
266
|
+
payload["error_type"] = type(error).__name__
|
|
182
267
|
|
|
183
|
-
|
|
184
|
-
self.logger.debug("Zuban LSP server auto-start disabled in configuration")
|
|
185
|
-
return
|
|
268
|
+
await self._publish_event(WorkflowEvent.WORKFLOW_FAILED, payload)
|
|
186
269
|
|
|
187
|
-
|
|
188
|
-
|
|
270
|
+
async def _handle_session_ready(
|
|
271
|
+
self,
|
|
272
|
+
event: Event,
|
|
273
|
+
state_flags: dict[str, bool],
|
|
274
|
+
workflow_id: str,
|
|
275
|
+
options: OptionsProtocol,
|
|
276
|
+
) -> EventHandlerResult:
|
|
277
|
+
"""Handle session ready event."""
|
|
278
|
+
if state_flags["configuration"]:
|
|
279
|
+
return EventHandlerResult(success=True)
|
|
280
|
+
state_flags["configuration"] = True
|
|
189
281
|
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
282
|
+
try:
|
|
283
|
+
await self._publish_event(
|
|
284
|
+
WorkflowEvent.CONFIG_PHASE_STARTED,
|
|
285
|
+
{"workflow_id": workflow_id},
|
|
194
286
|
)
|
|
195
|
-
|
|
287
|
+
config_success = await asyncio.to_thread(
|
|
288
|
+
self.phases.run_configuration_phase,
|
|
289
|
+
options,
|
|
290
|
+
)
|
|
291
|
+
await self._publish_event(
|
|
292
|
+
WorkflowEvent.CONFIG_PHASE_COMPLETED,
|
|
293
|
+
{
|
|
294
|
+
"workflow_id": workflow_id,
|
|
295
|
+
"success": config_success,
|
|
296
|
+
},
|
|
297
|
+
)
|
|
298
|
+
if not config_success:
|
|
299
|
+
await self._publish_workflow_failure(
|
|
300
|
+
{"workflow_id": workflow_id}, "configuration"
|
|
301
|
+
)
|
|
302
|
+
return EventHandlerResult(success=config_success)
|
|
303
|
+
except Exception as exc: # pragma: no cover - defensive
|
|
304
|
+
await self._publish_workflow_failure(
|
|
305
|
+
{"workflow_id": workflow_id}, "configuration", exc
|
|
306
|
+
)
|
|
307
|
+
return EventHandlerResult(success=False, error_message=str(exc))
|
|
308
|
+
|
|
309
|
+
async def _handle_config_completed(
|
|
310
|
+
self,
|
|
311
|
+
event: Event,
|
|
312
|
+
state_flags: dict[str, bool],
|
|
313
|
+
workflow_id: str,
|
|
314
|
+
options: OptionsProtocol,
|
|
315
|
+
) -> EventHandlerResult:
|
|
316
|
+
"""Handle configuration completed event."""
|
|
317
|
+
if not event.payload.get("success", False):
|
|
318
|
+
return EventHandlerResult(success=False)
|
|
319
|
+
if state_flags["quality"]:
|
|
320
|
+
return EventHandlerResult(success=True)
|
|
321
|
+
state_flags["quality"] = True
|
|
196
322
|
|
|
197
|
-
# Auto-start LSP server in background
|
|
198
323
|
try:
|
|
199
|
-
|
|
200
|
-
|
|
324
|
+
await self._publish_event(
|
|
325
|
+
WorkflowEvent.QUALITY_PHASE_STARTED,
|
|
326
|
+
{"workflow_id": workflow_id},
|
|
327
|
+
)
|
|
328
|
+
quality_success = await self._execute_quality_phase(options, workflow_id)
|
|
329
|
+
await self._publish_event(
|
|
330
|
+
WorkflowEvent.QUALITY_PHASE_COMPLETED,
|
|
331
|
+
{
|
|
332
|
+
"workflow_id": workflow_id,
|
|
333
|
+
"success": quality_success,
|
|
334
|
+
},
|
|
335
|
+
)
|
|
336
|
+
if not quality_success:
|
|
337
|
+
await self._publish_workflow_failure(
|
|
338
|
+
{"workflow_id": workflow_id}, "quality"
|
|
339
|
+
)
|
|
340
|
+
return EventHandlerResult(success=quality_success)
|
|
341
|
+
except Exception as exc: # pragma: no cover - defensive
|
|
342
|
+
await self._publish_workflow_failure(
|
|
343
|
+
{"workflow_id": workflow_id}, "quality", exc
|
|
344
|
+
)
|
|
345
|
+
return EventHandlerResult(success=False, error_message=str(exc))
|
|
346
|
+
|
|
347
|
+
async def _handle_quality_completed(
|
|
348
|
+
self,
|
|
349
|
+
event: Event,
|
|
350
|
+
state_flags: dict[str, bool],
|
|
351
|
+
workflow_id: str,
|
|
352
|
+
options: OptionsProtocol,
|
|
353
|
+
publish_requested: bool,
|
|
354
|
+
) -> EventHandlerResult:
|
|
355
|
+
"""Handle quality phase completed event."""
|
|
356
|
+
if not event.payload.get("success", False):
|
|
357
|
+
return EventHandlerResult(success=False)
|
|
358
|
+
if state_flags["publishing"]:
|
|
359
|
+
return EventHandlerResult(success=True)
|
|
360
|
+
state_flags["publishing"] = True
|
|
201
361
|
|
|
202
|
-
|
|
203
|
-
if
|
|
204
|
-
|
|
205
|
-
|
|
362
|
+
try:
|
|
363
|
+
if publish_requested:
|
|
364
|
+
await self._publish_event(
|
|
365
|
+
WorkflowEvent.PUBLISH_PHASE_STARTED,
|
|
366
|
+
{"workflow_id": workflow_id},
|
|
367
|
+
)
|
|
368
|
+
publishing_success = await self._execute_publishing_workflow(
|
|
369
|
+
options, workflow_id
|
|
370
|
+
)
|
|
371
|
+
await self._publish_event(
|
|
372
|
+
WorkflowEvent.PUBLISH_PHASE_COMPLETED,
|
|
373
|
+
{
|
|
374
|
+
"workflow_id": workflow_id,
|
|
375
|
+
"success": publishing_success,
|
|
376
|
+
},
|
|
377
|
+
)
|
|
378
|
+
if not publishing_success:
|
|
379
|
+
await self._publish_workflow_failure(
|
|
380
|
+
{"workflow_id": workflow_id}, "publishing"
|
|
381
|
+
)
|
|
382
|
+
return EventHandlerResult(success=False)
|
|
206
383
|
else:
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
384
|
+
await self._publish_event(
|
|
385
|
+
WorkflowEvent.PUBLISH_PHASE_COMPLETED,
|
|
386
|
+
{
|
|
387
|
+
"workflow_id": workflow_id,
|
|
388
|
+
"success": True,
|
|
389
|
+
"skipped": True,
|
|
390
|
+
},
|
|
391
|
+
)
|
|
392
|
+
return EventHandlerResult(success=True)
|
|
393
|
+
except Exception as exc: # pragma: no cover - defensive
|
|
394
|
+
await self._publish_workflow_failure(
|
|
395
|
+
{"workflow_id": workflow_id}, "publishing", exc
|
|
396
|
+
)
|
|
397
|
+
return EventHandlerResult(success=False, error_message=str(exc))
|
|
398
|
+
|
|
399
|
+
async def _handle_publish_completed(
|
|
400
|
+
self,
|
|
401
|
+
event: Event,
|
|
402
|
+
state_flags: dict[str, bool],
|
|
403
|
+
workflow_id: str,
|
|
404
|
+
options: OptionsProtocol,
|
|
405
|
+
commit_requested: bool,
|
|
406
|
+
publish_requested: bool,
|
|
407
|
+
event_context: dict[str, t.Any],
|
|
408
|
+
) -> EventHandlerResult:
|
|
409
|
+
"""Handle publishing completed event."""
|
|
410
|
+
if publish_requested and not event.payload.get("success", False):
|
|
411
|
+
return EventHandlerResult(success=False)
|
|
412
|
+
if state_flags["commit"]:
|
|
413
|
+
return EventHandlerResult(success=True)
|
|
414
|
+
state_flags["commit"] = True
|
|
415
|
+
|
|
416
|
+
try:
|
|
417
|
+
if commit_requested:
|
|
418
|
+
await self._publish_event(
|
|
419
|
+
WorkflowEvent.COMMIT_PHASE_STARTED,
|
|
420
|
+
{"workflow_id": workflow_id},
|
|
421
|
+
)
|
|
422
|
+
commit_success = await self._execute_commit_workflow(
|
|
423
|
+
options, workflow_id
|
|
424
|
+
)
|
|
425
|
+
await self._publish_event(
|
|
426
|
+
WorkflowEvent.COMMIT_PHASE_COMPLETED,
|
|
427
|
+
{
|
|
428
|
+
"workflow_id": workflow_id,
|
|
429
|
+
"success": commit_success,
|
|
430
|
+
},
|
|
431
|
+
)
|
|
432
|
+
if not commit_success:
|
|
433
|
+
await self._publish_workflow_failure(
|
|
434
|
+
{"workflow_id": workflow_id}, "commit"
|
|
435
|
+
)
|
|
436
|
+
return EventHandlerResult(success=False)
|
|
437
|
+
else:
|
|
438
|
+
await self._publish_event(
|
|
439
|
+
WorkflowEvent.COMMIT_PHASE_COMPLETED,
|
|
440
|
+
{
|
|
441
|
+
"workflow_id": workflow_id,
|
|
442
|
+
"success": True,
|
|
443
|
+
"skipped": True,
|
|
444
|
+
},
|
|
445
|
+
)
|
|
220
446
|
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
stderr=subprocess.DEVNULL,
|
|
225
|
-
start_new_session=True,
|
|
447
|
+
await self._publish_event(
|
|
448
|
+
WorkflowEvent.WORKFLOW_COMPLETED,
|
|
449
|
+
event_context | {"success": True},
|
|
226
450
|
)
|
|
451
|
+
return EventHandlerResult(success=True)
|
|
452
|
+
except Exception as exc: # pragma: no cover - defensive
|
|
453
|
+
await self._publish_workflow_failure(
|
|
454
|
+
{"workflow_id": workflow_id}, "commit", exc
|
|
455
|
+
)
|
|
456
|
+
return EventHandlerResult(success=False, error_message=str(exc))
|
|
227
457
|
|
|
228
|
-
|
|
229
|
-
|
|
458
|
+
async def _handle_workflow_completed(
|
|
459
|
+
self,
|
|
460
|
+
event: Event,
|
|
461
|
+
start_time: float,
|
|
462
|
+
workflow_id: str,
|
|
463
|
+
completion_future: asyncio.Future[bool],
|
|
464
|
+
subscriptions: list[str],
|
|
465
|
+
) -> EventHandlerResult:
|
|
466
|
+
"""Handle workflow completed event."""
|
|
467
|
+
return await self._finalize_workflow(
|
|
468
|
+
start_time,
|
|
469
|
+
workflow_id,
|
|
470
|
+
True,
|
|
471
|
+
completion_future,
|
|
472
|
+
subscriptions,
|
|
473
|
+
event.payload,
|
|
474
|
+
)
|
|
475
|
+
|
|
476
|
+
async def _handle_workflow_failed(
|
|
477
|
+
self,
|
|
478
|
+
event: Event,
|
|
479
|
+
start_time: float,
|
|
480
|
+
workflow_id: str,
|
|
481
|
+
completion_future: asyncio.Future[bool],
|
|
482
|
+
subscriptions: list[str],
|
|
483
|
+
) -> EventHandlerResult:
|
|
484
|
+
"""Handle workflow failed event."""
|
|
485
|
+
return await self._finalize_workflow(
|
|
486
|
+
start_time,
|
|
487
|
+
workflow_id,
|
|
488
|
+
False,
|
|
489
|
+
completion_future,
|
|
490
|
+
subscriptions,
|
|
491
|
+
event.payload,
|
|
492
|
+
)
|
|
493
|
+
|
|
494
|
+
async def _run_event_driven_workflow(
|
|
495
|
+
self,
|
|
496
|
+
options: OptionsProtocol,
|
|
497
|
+
workflow_id: str,
|
|
498
|
+
event_context: dict[str, t.Any],
|
|
499
|
+
start_time: float,
|
|
500
|
+
) -> bool:
|
|
501
|
+
if not self._event_bus:
|
|
502
|
+
raise RuntimeError("Workflow event bus is not configured.")
|
|
503
|
+
|
|
504
|
+
loop = asyncio.get_running_loop()
|
|
505
|
+
completion_future: asyncio.Future[bool] = loop.create_future()
|
|
506
|
+
subscriptions: list[str] = []
|
|
507
|
+
|
|
508
|
+
publish_requested = bool(
|
|
509
|
+
getattr(options, "publish", False) or getattr(options, "all", False)
|
|
510
|
+
)
|
|
511
|
+
commit_requested = bool(getattr(options, "commit", False))
|
|
512
|
+
|
|
513
|
+
state_flags = {
|
|
514
|
+
"configuration": False,
|
|
515
|
+
"quality": False,
|
|
516
|
+
"publishing": False,
|
|
517
|
+
"commit": False,
|
|
518
|
+
}
|
|
519
|
+
|
|
520
|
+
# Subscribe to events
|
|
521
|
+
async def on_session_ready(event: Event) -> EventHandlerResult:
|
|
522
|
+
return await self._handle_session_ready(
|
|
523
|
+
event, state_flags, workflow_id, options
|
|
230
524
|
)
|
|
231
525
|
|
|
232
|
-
|
|
233
|
-
self.
|
|
526
|
+
async def on_config_completed(event: Event) -> EventHandlerResult:
|
|
527
|
+
return await self._handle_config_completed(
|
|
528
|
+
event, state_flags, workflow_id, options
|
|
529
|
+
)
|
|
530
|
+
|
|
531
|
+
async def on_quality_completed(event: Event) -> EventHandlerResult:
|
|
532
|
+
return await self._handle_quality_completed(
|
|
533
|
+
event, state_flags, workflow_id, options, publish_requested
|
|
534
|
+
)
|
|
535
|
+
|
|
536
|
+
async def on_publish_completed(event: Event) -> EventHandlerResult:
|
|
537
|
+
return await self._handle_publish_completed(
|
|
538
|
+
event,
|
|
539
|
+
state_flags,
|
|
540
|
+
workflow_id,
|
|
541
|
+
options,
|
|
542
|
+
commit_requested,
|
|
543
|
+
publish_requested,
|
|
544
|
+
event_context,
|
|
545
|
+
)
|
|
546
|
+
|
|
547
|
+
async def on_workflow_completed(event: Event) -> EventHandlerResult:
|
|
548
|
+
return await self._handle_workflow_completed(
|
|
549
|
+
event, start_time, workflow_id, completion_future, subscriptions
|
|
550
|
+
)
|
|
551
|
+
|
|
552
|
+
async def on_workflow_failed(event: Event) -> EventHandlerResult:
|
|
553
|
+
return await self._handle_workflow_failed(
|
|
554
|
+
event, start_time, workflow_id, completion_future, subscriptions
|
|
555
|
+
)
|
|
556
|
+
|
|
557
|
+
subscriptions.extend(
|
|
558
|
+
(
|
|
559
|
+
self._event_bus.subscribe(
|
|
560
|
+
WorkflowEvent.WORKFLOW_SESSION_READY,
|
|
561
|
+
on_session_ready,
|
|
562
|
+
),
|
|
563
|
+
self._event_bus.subscribe(
|
|
564
|
+
WorkflowEvent.CONFIG_PHASE_COMPLETED,
|
|
565
|
+
on_config_completed,
|
|
566
|
+
),
|
|
567
|
+
self._event_bus.subscribe(
|
|
568
|
+
WorkflowEvent.QUALITY_PHASE_COMPLETED,
|
|
569
|
+
on_quality_completed,
|
|
570
|
+
),
|
|
571
|
+
self._event_bus.subscribe(
|
|
572
|
+
WorkflowEvent.PUBLISH_PHASE_COMPLETED,
|
|
573
|
+
on_publish_completed,
|
|
574
|
+
),
|
|
575
|
+
self._event_bus.subscribe(
|
|
576
|
+
WorkflowEvent.WORKFLOW_COMPLETED,
|
|
577
|
+
on_workflow_completed,
|
|
578
|
+
),
|
|
579
|
+
self._event_bus.subscribe(
|
|
580
|
+
WorkflowEvent.WORKFLOW_FAILED,
|
|
581
|
+
on_workflow_failed,
|
|
582
|
+
),
|
|
583
|
+
)
|
|
584
|
+
)
|
|
585
|
+
|
|
586
|
+
try:
|
|
587
|
+
await self._publish_event(
|
|
588
|
+
WorkflowEvent.WORKFLOW_SESSION_INITIALIZING,
|
|
589
|
+
event_context,
|
|
590
|
+
)
|
|
591
|
+
self._session_controller.initialize(options)
|
|
592
|
+
await self._publish_event(
|
|
593
|
+
WorkflowEvent.WORKFLOW_SESSION_READY,
|
|
594
|
+
event_context,
|
|
595
|
+
)
|
|
596
|
+
except Exception as exc: # pragma: no cover - defensive
|
|
597
|
+
await self._publish_workflow_failure(
|
|
598
|
+
event_context, "session_initialization", exc
|
|
599
|
+
)
|
|
600
|
+
await self._finalize_workflow(
|
|
601
|
+
start_time, workflow_id, False, completion_future, subscriptions
|
|
602
|
+
)
|
|
603
|
+
return False
|
|
604
|
+
|
|
605
|
+
return await completion_future
|
|
606
|
+
|
|
607
|
+
def _log_workflow_startup_debug(self, options: OptionsProtocol) -> None:
|
|
608
|
+
if not self._should_debug():
|
|
609
|
+
return
|
|
610
|
+
|
|
611
|
+
self.debugger.log_workflow_phase(
|
|
612
|
+
"workflow_execution",
|
|
613
|
+
"started",
|
|
614
|
+
details={
|
|
615
|
+
"testing": getattr(options, "test", False),
|
|
616
|
+
"skip_hooks": getattr(options, "skip_hooks", False),
|
|
617
|
+
"ai_agent": getattr(options, "ai_agent", False),
|
|
618
|
+
},
|
|
619
|
+
)
|
|
234
620
|
|
|
235
621
|
def _log_zuban_lsp_status(self) -> None:
|
|
236
622
|
"""Display current Zuban LSP server status during workflow startup."""
|
|
@@ -251,54 +637,6 @@ class WorkflowPipeline:
|
|
|
251
637
|
except Exception as e:
|
|
252
638
|
self.logger.debug(f"Failed to check Zuban LSP status: {e}")
|
|
253
639
|
|
|
254
|
-
def _configure_hook_manager_lsp(self, options: OptionsProtocol) -> None:
|
|
255
|
-
"""Configure hook manager with LSP optimization settings."""
|
|
256
|
-
# Check if LSP hooks are enabled
|
|
257
|
-
enable_lsp_hooks = getattr(options, "enable_lsp_hooks", False)
|
|
258
|
-
|
|
259
|
-
# Configure the hook manager
|
|
260
|
-
hook_manager = self.phases.hook_manager
|
|
261
|
-
if hasattr(hook_manager, "configure_lsp_optimization"):
|
|
262
|
-
hook_manager.configure_lsp_optimization(enable_lsp_hooks)
|
|
263
|
-
|
|
264
|
-
if enable_lsp_hooks and not getattr(options, "no_zuban_lsp", False):
|
|
265
|
-
self.console.print(
|
|
266
|
-
"🔍 LSP-optimized hook execution enabled for faster type checking",
|
|
267
|
-
style="blue",
|
|
268
|
-
)
|
|
269
|
-
|
|
270
|
-
def _register_lsp_cleanup_handler(self, options: OptionsProtocol) -> None:
|
|
271
|
-
"""Register cleanup handler to stop LSP server when workflow completes."""
|
|
272
|
-
# Get configuration to check if we should handle LSP cleanup
|
|
273
|
-
config = getattr(options, "zuban_lsp", None)
|
|
274
|
-
if config and not config.enabled:
|
|
275
|
-
return
|
|
276
|
-
|
|
277
|
-
if getattr(options, "no_zuban_lsp", False):
|
|
278
|
-
return
|
|
279
|
-
|
|
280
|
-
def cleanup_lsp_server() -> None:
|
|
281
|
-
"""Cleanup function to gracefully stop LSP server if it was auto-started."""
|
|
282
|
-
try:
|
|
283
|
-
from crackerjack.services.server_manager import (
|
|
284
|
-
find_zuban_lsp_processes,
|
|
285
|
-
stop_process,
|
|
286
|
-
)
|
|
287
|
-
|
|
288
|
-
lsp_processes = find_zuban_lsp_processes()
|
|
289
|
-
if lsp_processes:
|
|
290
|
-
for proc in lsp_processes:
|
|
291
|
-
self.logger.debug(
|
|
292
|
-
f"Stopping auto-started Zuban LSP server (PID: {proc['pid']})"
|
|
293
|
-
)
|
|
294
|
-
stop_process(proc["pid"])
|
|
295
|
-
|
|
296
|
-
except Exception as e:
|
|
297
|
-
self.logger.debug(f"Error during LSP cleanup: {e}")
|
|
298
|
-
|
|
299
|
-
# Register the cleanup handler with the session
|
|
300
|
-
self.session.register_cleanup(cleanup_lsp_server)
|
|
301
|
-
|
|
302
640
|
def _log_workflow_startup_info(self, options: OptionsProtocol) -> None:
|
|
303
641
|
self.logger.info(
|
|
304
642
|
"Starting complete workflow execution",
|
|
@@ -313,7 +651,9 @@ class WorkflowPipeline:
|
|
|
313
651
|
async def _execute_workflow_with_timing(
|
|
314
652
|
self, options: OptionsProtocol, start_time: float, workflow_id: str
|
|
315
653
|
) -> bool:
|
|
316
|
-
success = await self._execute_workflow_phases(
|
|
654
|
+
success = await self._phase_executor._execute_workflow_phases(
|
|
655
|
+
options, workflow_id
|
|
656
|
+
)
|
|
317
657
|
self.session.finalize_session(start_time, success)
|
|
318
658
|
|
|
319
659
|
duration = time.time() - start_time
|
|
@@ -351,36 +691,9 @@ class WorkflowPipeline:
|
|
|
351
691
|
return
|
|
352
692
|
|
|
353
693
|
try:
|
|
354
|
-
|
|
355
|
-
{
|
|
356
|
-
"workflow_id": workflow_id,
|
|
357
|
-
"total_duration": duration,
|
|
358
|
-
"success": success,
|
|
359
|
-
"cache_metrics": self._cache.get_stats() if self._cache else {},
|
|
360
|
-
"memory_metrics": self._memory_optimizer.get_stats()
|
|
361
|
-
if hasattr(self._memory_optimizer, "get_stats")
|
|
362
|
-
else {},
|
|
363
|
-
}
|
|
364
|
-
|
|
365
|
-
# Generate benchmark comparison
|
|
694
|
+
self._gather_performance_metrics(workflow_id, duration, success)
|
|
366
695
|
benchmark_results = await self._performance_benchmarks.run_benchmark_suite()
|
|
367
|
-
|
|
368
|
-
# Display compact performance summary
|
|
369
|
-
if benchmark_results:
|
|
370
|
-
self.console.print("\n[cyan]📊 Performance Benchmark Summary[/cyan]")
|
|
371
|
-
self.console.print(f"Workflow Duration: [bold]{duration:.2f}s[/bold]")
|
|
372
|
-
|
|
373
|
-
# Show key performance improvements if available
|
|
374
|
-
for result in benchmark_results.results[:3]: # Top 3 results
|
|
375
|
-
if result.time_improvement_percentage > 0:
|
|
376
|
-
self.console.print(
|
|
377
|
-
f"[green]⚡[/green] {result.test_name}: {result.time_improvement_percentage:.1f}% faster"
|
|
378
|
-
)
|
|
379
|
-
|
|
380
|
-
if result.cache_hit_ratio > 0:
|
|
381
|
-
self.console.print(
|
|
382
|
-
f"[blue]🎯[/blue] Cache efficiency: {result.cache_hit_ratio:.0%}"
|
|
383
|
-
)
|
|
696
|
+
self._display_benchmark_results(benchmark_results, duration)
|
|
384
697
|
|
|
385
698
|
except Exception as e:
|
|
386
699
|
self.console.print(
|
|
@@ -390,6 +703,53 @@ class WorkflowPipeline:
|
|
|
390
703
|
if self.debugger.enabled:
|
|
391
704
|
self.debugger.print_debug_summary()
|
|
392
705
|
|
|
706
|
+
def _gather_performance_metrics(
|
|
707
|
+
self, workflow_id: str, duration: float, success: bool
|
|
708
|
+
) -> dict[str, t.Any]:
|
|
709
|
+
"""Gather performance metrics from workflow execution."""
|
|
710
|
+
return {
|
|
711
|
+
"workflow_id": workflow_id,
|
|
712
|
+
"total_duration": duration,
|
|
713
|
+
"success": success,
|
|
714
|
+
"cache_metrics": self._cache.get_stats() if self._cache else {},
|
|
715
|
+
"memory_metrics": self._memory_optimizer.get_stats()
|
|
716
|
+
if hasattr(self._memory_optimizer, "get_stats")
|
|
717
|
+
else {},
|
|
718
|
+
}
|
|
719
|
+
|
|
720
|
+
def _display_benchmark_results(
|
|
721
|
+
self, benchmark_results: t.Any, duration: float
|
|
722
|
+
) -> None:
|
|
723
|
+
"""Display compact performance summary."""
|
|
724
|
+
if not benchmark_results:
|
|
725
|
+
return
|
|
726
|
+
|
|
727
|
+
self.console.print("\n[cyan]📊 Performance Benchmark Summary[/cyan]")
|
|
728
|
+
self.console.print(f"Workflow Duration: [bold]{duration:.2f}s[/bold]")
|
|
729
|
+
|
|
730
|
+
self._show_performance_improvements(benchmark_results)
|
|
731
|
+
|
|
732
|
+
def _show_performance_improvements(self, benchmark_results: t.Any) -> None:
|
|
733
|
+
"""Show key performance improvements from benchmark results."""
|
|
734
|
+
for result in benchmark_results.results[:3]: # Top 3 results
|
|
735
|
+
self._display_time_improvement(result)
|
|
736
|
+
self._display_cache_efficiency(result)
|
|
737
|
+
|
|
738
|
+
def _display_time_improvement(self, result: t.Any) -> None:
|
|
739
|
+
"""Display time improvement percentage if available."""
|
|
740
|
+
if result.time_improvement_percentage > 0:
|
|
741
|
+
self.console.print(
|
|
742
|
+
f"[green]⚡[/green] {result.test_name}:"
|
|
743
|
+
f" {result.time_improvement_percentage:.1f}% faster"
|
|
744
|
+
)
|
|
745
|
+
|
|
746
|
+
def _display_cache_efficiency(self, result: t.Any) -> None:
|
|
747
|
+
"""Display cache hit ratio if available."""
|
|
748
|
+
if result.cache_hit_ratio > 0:
|
|
749
|
+
self.console.print(
|
|
750
|
+
f"[blue]🎯[/blue] Cache efficiency: {result.cache_hit_ratio:.0%}"
|
|
751
|
+
)
|
|
752
|
+
|
|
393
753
|
def _handle_user_interruption(self) -> bool:
|
|
394
754
|
self.console.print("Interrupted by user")
|
|
395
755
|
self.session.fail_task("workflow", "Interrupted by user")
|
|
@@ -406,45 +766,6 @@ class WorkflowPipeline:
|
|
|
406
766
|
)
|
|
407
767
|
return False
|
|
408
768
|
|
|
409
|
-
async def _execute_workflow_phases(
|
|
410
|
-
self, options: OptionsProtocol, workflow_id: str
|
|
411
|
-
) -> bool:
|
|
412
|
-
success = True
|
|
413
|
-
|
|
414
|
-
with phase_monitor(workflow_id, "configuration"):
|
|
415
|
-
config_success = self.phases.run_configuration_phase(options)
|
|
416
|
-
success = success and config_success
|
|
417
|
-
|
|
418
|
-
quality_success = await self._execute_quality_phase(options, workflow_id)
|
|
419
|
-
success = success and quality_success
|
|
420
|
-
|
|
421
|
-
# If quality phase failed and we're in publishing mode, stop here
|
|
422
|
-
if not quality_success and self._is_publishing_workflow(options):
|
|
423
|
-
return False
|
|
424
|
-
|
|
425
|
-
# Execute publishing workflow if requested
|
|
426
|
-
publishing_success = await self._execute_publishing_workflow(
|
|
427
|
-
options, workflow_id
|
|
428
|
-
)
|
|
429
|
-
if not publishing_success:
|
|
430
|
-
success = False
|
|
431
|
-
|
|
432
|
-
# Execute commit workflow independently if requested
|
|
433
|
-
# Note: Commit workflow runs regardless of publish success to ensure
|
|
434
|
-
# version bump changes are always committed when requested
|
|
435
|
-
commit_success = await self._execute_commit_workflow(options, workflow_id)
|
|
436
|
-
if not commit_success:
|
|
437
|
-
success = False
|
|
438
|
-
|
|
439
|
-
# Only fail the overall workflow if publishing was explicitly requested and failed
|
|
440
|
-
if not publishing_success and (options.publish or options.all):
|
|
441
|
-
self.console.print(
|
|
442
|
-
"[red]❌ Publishing failed - overall workflow marked as failed[/red]"
|
|
443
|
-
)
|
|
444
|
-
return False
|
|
445
|
-
|
|
446
|
-
return success
|
|
447
|
-
|
|
448
769
|
def _handle_quality_phase_result(
|
|
449
770
|
self, success: bool, quality_success: bool, options: OptionsProtocol
|
|
450
771
|
) -> bool:
|
|
@@ -469,222 +790,14 @@ class WorkflowPipeline:
|
|
|
469
790
|
return False
|
|
470
791
|
return success
|
|
471
792
|
|
|
472
|
-
def _is_publishing_workflow(self, options: OptionsProtocol) -> bool:
|
|
473
|
-
return bool(options.publish or options.all)
|
|
474
|
-
|
|
475
|
-
async def _execute_publishing_workflow(
|
|
476
|
-
self, options: OptionsProtocol, workflow_id: str
|
|
477
|
-
) -> bool:
|
|
478
|
-
if not options.publish and not options.all:
|
|
479
|
-
return True
|
|
480
|
-
|
|
481
|
-
with phase_monitor(workflow_id, "publishing"):
|
|
482
|
-
if not self.phases.run_publishing_phase(options):
|
|
483
|
-
self.session.fail_task("workflow", "Publishing failed")
|
|
484
|
-
return False
|
|
485
|
-
return True
|
|
486
|
-
|
|
487
|
-
async def _execute_commit_workflow(
|
|
488
|
-
self, options: OptionsProtocol, workflow_id: str
|
|
489
|
-
) -> bool:
|
|
490
|
-
if not options.commit:
|
|
491
|
-
return True
|
|
492
|
-
|
|
493
|
-
with phase_monitor(workflow_id, "commit"):
|
|
494
|
-
if not self.phases.run_commit_phase(options):
|
|
495
|
-
return False
|
|
496
|
-
return True
|
|
497
|
-
|
|
498
|
-
async def _execute_quality_phase(
|
|
499
|
-
self, options: OptionsProtocol, workflow_id: str
|
|
500
|
-
) -> bool:
|
|
501
|
-
# Use quality intelligence to make informed decisions about quality phase
|
|
502
|
-
if self._quality_intelligence:
|
|
503
|
-
quality_decision = await self._make_quality_intelligence_decision(options)
|
|
504
|
-
self.console.print(
|
|
505
|
-
f"[dim]🧠 Quality Intelligence: {quality_decision}[/dim]"
|
|
506
|
-
)
|
|
507
|
-
|
|
508
|
-
if hasattr(options, "fast") and options.fast:
|
|
509
|
-
return await self._run_fast_hooks_phase_monitored(options, workflow_id)
|
|
510
|
-
if hasattr(options, "comp") and options.comp:
|
|
511
|
-
return await self._run_comprehensive_hooks_phase_monitored(
|
|
512
|
-
options, workflow_id
|
|
513
|
-
)
|
|
514
|
-
if getattr(options, "test", False):
|
|
515
|
-
return await self._execute_test_workflow(options, workflow_id)
|
|
516
|
-
return await self._execute_standard_hooks_workflow_monitored(
|
|
517
|
-
options, workflow_id
|
|
518
|
-
)
|
|
519
|
-
|
|
520
|
-
async def _make_quality_intelligence_decision(
|
|
521
|
-
self, options: OptionsProtocol
|
|
522
|
-
) -> str:
|
|
523
|
-
"""Use quality intelligence to make informed decisions about workflow execution."""
|
|
524
|
-
try:
|
|
525
|
-
if not self._quality_intelligence:
|
|
526
|
-
return "Quality intelligence not available"
|
|
527
|
-
|
|
528
|
-
# Analyze recent quality trends and anomalies
|
|
529
|
-
anomalies = self._quality_intelligence.detect_anomalies()
|
|
530
|
-
patterns = self._quality_intelligence.identify_patterns()
|
|
531
|
-
|
|
532
|
-
# Make intelligent recommendations based on current state
|
|
533
|
-
recommendations = []
|
|
534
|
-
if anomalies:
|
|
535
|
-
high_severity_anomalies = [
|
|
536
|
-
a for a in anomalies if a.severity.name in ("CRITICAL", "HIGH")
|
|
537
|
-
]
|
|
538
|
-
if high_severity_anomalies:
|
|
539
|
-
recommendations.append(
|
|
540
|
-
"comprehensive analysis recommended due to quality anomalies"
|
|
541
|
-
)
|
|
542
|
-
else:
|
|
543
|
-
recommendations.append("standard quality checks sufficient")
|
|
544
|
-
|
|
545
|
-
if patterns:
|
|
546
|
-
improving_patterns = [
|
|
547
|
-
p for p in patterns if p.trend_direction.name == "IMPROVING"
|
|
548
|
-
]
|
|
549
|
-
if improving_patterns:
|
|
550
|
-
recommendations.append("quality trending upward")
|
|
551
|
-
else:
|
|
552
|
-
recommendations.append("quality monitoring active")
|
|
553
|
-
|
|
554
|
-
if not recommendations:
|
|
555
|
-
recommendations.append("baseline quality analysis active")
|
|
556
|
-
|
|
557
|
-
return "; ".join(recommendations)
|
|
558
|
-
|
|
559
|
-
except Exception as e:
|
|
560
|
-
return f"Quality intelligence analysis failed: {str(e)[:50]}..."
|
|
561
|
-
|
|
562
|
-
async def _execute_test_workflow(
|
|
563
|
-
self, options: OptionsProtocol, workflow_id: str
|
|
564
|
-
) -> bool:
|
|
565
|
-
iteration = self._start_iteration_tracking(options)
|
|
566
|
-
|
|
567
|
-
if not await self._execute_initial_phases(options, workflow_id, iteration):
|
|
568
|
-
return False
|
|
569
|
-
|
|
570
|
-
(
|
|
571
|
-
testing_passed,
|
|
572
|
-
comprehensive_passed,
|
|
573
|
-
) = await self._run_main_quality_phases_async(options, workflow_id)
|
|
574
|
-
|
|
575
|
-
return await self._handle_workflow_completion(
|
|
576
|
-
options, iteration, testing_passed, comprehensive_passed, workflow_id
|
|
577
|
-
)
|
|
578
|
-
|
|
579
|
-
async def _execute_initial_phases(
|
|
580
|
-
self, options: OptionsProtocol, workflow_id: str, iteration: int
|
|
581
|
-
) -> bool:
|
|
582
|
-
with phase_monitor(workflow_id, "fast_hooks") as monitor:
|
|
583
|
-
if not await self._run_initial_fast_hooks_async(
|
|
584
|
-
options, iteration, monitor
|
|
585
|
-
):
|
|
586
|
-
return False
|
|
587
|
-
|
|
588
|
-
return self._execute_optional_cleaning_phase(options)
|
|
589
|
-
|
|
590
|
-
def _execute_optional_cleaning_phase(self, options: OptionsProtocol) -> bool:
|
|
591
|
-
if not getattr(options, "clean", False):
|
|
592
|
-
return True
|
|
593
|
-
|
|
594
|
-
if not self._run_code_cleaning_phase(options):
|
|
595
|
-
return False
|
|
596
|
-
|
|
597
|
-
if not self._run_post_cleaning_fast_hooks(options):
|
|
598
|
-
return False
|
|
599
|
-
|
|
600
|
-
self._mark_code_cleaning_complete()
|
|
601
|
-
return True
|
|
602
|
-
|
|
603
|
-
async def _handle_workflow_completion(
|
|
604
|
-
self,
|
|
605
|
-
options: OptionsProtocol,
|
|
606
|
-
iteration: int,
|
|
607
|
-
testing_passed: bool,
|
|
608
|
-
comprehensive_passed: bool,
|
|
609
|
-
workflow_id: str = "unknown",
|
|
610
|
-
) -> bool:
|
|
611
|
-
if options.ai_agent:
|
|
612
|
-
return await self._handle_ai_agent_workflow(
|
|
613
|
-
options, iteration, testing_passed, comprehensive_passed, workflow_id
|
|
614
|
-
)
|
|
615
|
-
|
|
616
|
-
return await self._handle_standard_workflow(
|
|
617
|
-
options, iteration, testing_passed, comprehensive_passed
|
|
618
|
-
)
|
|
619
|
-
|
|
620
|
-
def _start_iteration_tracking(self, options: OptionsProtocol) -> int:
|
|
621
|
-
iteration = 1
|
|
622
|
-
if options.ai_agent and self._should_debug():
|
|
623
|
-
self.debugger.log_iteration_start(iteration)
|
|
624
|
-
return iteration
|
|
625
|
-
|
|
626
793
|
def _run_initial_fast_hooks(self, options: OptionsProtocol, iteration: int) -> bool:
|
|
627
|
-
fast_hooks_passed = self._run_fast_hooks_phase(options)
|
|
794
|
+
fast_hooks_passed = self._phase_executor._run_fast_hooks_phase(options)
|
|
628
795
|
if not fast_hooks_passed:
|
|
629
796
|
if options.ai_agent and self._should_debug():
|
|
630
797
|
self.debugger.log_iteration_end(iteration, False)
|
|
631
798
|
return False
|
|
632
799
|
return True
|
|
633
800
|
|
|
634
|
-
async def _run_main_quality_phases_async(
|
|
635
|
-
self, options: OptionsProtocol, workflow_id: str
|
|
636
|
-
) -> tuple[bool, bool]:
|
|
637
|
-
testing_task = asyncio.create_task(
|
|
638
|
-
self._run_testing_phase_async(options, workflow_id)
|
|
639
|
-
)
|
|
640
|
-
comprehensive_task = asyncio.create_task(
|
|
641
|
-
self._run_comprehensive_hooks_phase_monitored(options, workflow_id)
|
|
642
|
-
)
|
|
643
|
-
|
|
644
|
-
results = await asyncio.gather(
|
|
645
|
-
testing_task, comprehensive_task, return_exceptions=True
|
|
646
|
-
)
|
|
647
|
-
|
|
648
|
-
testing_result, comprehensive_result = results
|
|
649
|
-
|
|
650
|
-
if isinstance(testing_result, Exception):
|
|
651
|
-
self.logger.error(f"Testing phase failed with exception: {testing_result}")
|
|
652
|
-
testing_passed = False
|
|
653
|
-
else:
|
|
654
|
-
testing_passed = bool(testing_result)
|
|
655
|
-
|
|
656
|
-
if isinstance(comprehensive_result, Exception):
|
|
657
|
-
self.logger.error(
|
|
658
|
-
f"Comprehensive hooks failed with exception: {comprehensive_result}"
|
|
659
|
-
)
|
|
660
|
-
comprehensive_passed = False
|
|
661
|
-
else:
|
|
662
|
-
comprehensive_passed = bool(comprehensive_result)
|
|
663
|
-
|
|
664
|
-
return testing_passed, comprehensive_passed
|
|
665
|
-
|
|
666
|
-
async def _handle_ai_agent_workflow(
|
|
667
|
-
self,
|
|
668
|
-
options: OptionsProtocol,
|
|
669
|
-
iteration: int,
|
|
670
|
-
testing_passed: bool,
|
|
671
|
-
comprehensive_passed: bool,
|
|
672
|
-
workflow_id: str = "unknown",
|
|
673
|
-
) -> bool:
|
|
674
|
-
if not await self._process_security_gates(options):
|
|
675
|
-
return False
|
|
676
|
-
|
|
677
|
-
needs_ai_fixing = self._determine_ai_fixing_needed(
|
|
678
|
-
testing_passed, comprehensive_passed, bool(options.publish or options.all)
|
|
679
|
-
)
|
|
680
|
-
|
|
681
|
-
if needs_ai_fixing:
|
|
682
|
-
return await self._execute_ai_fixing_workflow(options, iteration)
|
|
683
|
-
|
|
684
|
-
return self._finalize_ai_workflow_success(
|
|
685
|
-
options, iteration, testing_passed, comprehensive_passed
|
|
686
|
-
)
|
|
687
|
-
|
|
688
801
|
async def _process_security_gates(self, options: OptionsProtocol) -> bool:
|
|
689
802
|
publishing_requested, security_blocks = (
|
|
690
803
|
self._check_security_gates_for_publishing(options)
|
|
@@ -698,240 +811,180 @@ class WorkflowPipeline:
|
|
|
698
811
|
)
|
|
699
812
|
return security_fix_result
|
|
700
813
|
|
|
701
|
-
|
|
702
|
-
self
|
|
703
|
-
) -> bool:
|
|
704
|
-
success = await self._run_ai_agent_fixing_phase(options)
|
|
705
|
-
if self._should_debug():
|
|
706
|
-
self.debugger.log_iteration_end(iteration, success)
|
|
707
|
-
return success
|
|
708
|
-
|
|
709
|
-
def _finalize_ai_workflow_success(
|
|
710
|
-
self,
|
|
711
|
-
options: OptionsProtocol,
|
|
712
|
-
iteration: int,
|
|
713
|
-
testing_passed: bool,
|
|
714
|
-
comprehensive_passed: bool,
|
|
715
|
-
) -> bool:
|
|
716
|
-
publishing_requested = bool(options.publish or options.all)
|
|
717
|
-
|
|
718
|
-
final_success = self._determine_workflow_success(
|
|
719
|
-
testing_passed, comprehensive_passed, publishing_requested
|
|
720
|
-
)
|
|
721
|
-
|
|
722
|
-
self._show_partial_success_warning_if_needed(
|
|
723
|
-
publishing_requested, final_success, testing_passed, comprehensive_passed
|
|
724
|
-
)
|
|
725
|
-
|
|
726
|
-
if self._should_debug():
|
|
727
|
-
self.debugger.log_iteration_end(iteration, final_success)
|
|
728
|
-
|
|
729
|
-
return final_success
|
|
730
|
-
|
|
731
|
-
def _show_partial_success_warning_if_needed(
|
|
732
|
-
self,
|
|
733
|
-
publishing_requested: bool,
|
|
734
|
-
final_success: bool,
|
|
735
|
-
testing_passed: bool,
|
|
736
|
-
comprehensive_passed: bool,
|
|
737
|
-
) -> None:
|
|
738
|
-
should_show_warning = (
|
|
739
|
-
publishing_requested
|
|
740
|
-
and final_success
|
|
741
|
-
and not (testing_passed and comprehensive_passed)
|
|
742
|
-
)
|
|
743
|
-
|
|
744
|
-
if should_show_warning:
|
|
745
|
-
self._show_security_audit_warning()
|
|
814
|
+
def _execute_standard_hooks_workflow(self, options: OptionsProtocol) -> bool:
|
|
815
|
+
self._phase_executor._update_hooks_status_running()
|
|
746
816
|
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
iteration: int,
|
|
751
|
-
testing_passed: bool,
|
|
752
|
-
comprehensive_passed: bool,
|
|
753
|
-
) -> bool:
|
|
754
|
-
publishing_requested, security_blocks = (
|
|
755
|
-
self._check_security_gates_for_publishing(options)
|
|
756
|
-
)
|
|
817
|
+
if not self._execute_fast_hooks_workflow(options):
|
|
818
|
+
self._phase_executor._handle_hooks_completion(False)
|
|
819
|
+
return False
|
|
757
820
|
|
|
758
|
-
if
|
|
759
|
-
|
|
821
|
+
if not self._execute_cleaning_workflow_if_needed(options):
|
|
822
|
+
self._phase_executor._handle_hooks_completion(False)
|
|
823
|
+
return False
|
|
760
824
|
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
comprehensive_passed,
|
|
764
|
-
publishing_requested,
|
|
825
|
+
comprehensive_success = self._phase_executor._run_comprehensive_hooks_phase(
|
|
826
|
+
options
|
|
765
827
|
)
|
|
828
|
+
self._phase_executor._handle_hooks_completion(comprehensive_success)
|
|
766
829
|
|
|
767
|
-
|
|
768
|
-
publishing_requested
|
|
769
|
-
and success
|
|
770
|
-
and not (testing_passed and comprehensive_passed)
|
|
771
|
-
):
|
|
772
|
-
self._show_security_audit_warning()
|
|
773
|
-
elif publishing_requested and not success:
|
|
774
|
-
self.console.print(
|
|
775
|
-
"[red]❌ Quality checks failed - cannot proceed to publishing[/red]"
|
|
776
|
-
)
|
|
830
|
+
return comprehensive_success
|
|
777
831
|
|
|
778
|
-
|
|
779
|
-
|
|
832
|
+
def _execute_fast_hooks_workflow(self, options: OptionsProtocol) -> bool:
|
|
833
|
+
"""Execute fast hooks phase."""
|
|
834
|
+
return self._phase_executor._run_fast_hooks_phase(options)
|
|
780
835
|
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
836
|
+
def _execute_cleaning_workflow_if_needed(self, options: OptionsProtocol) -> bool:
|
|
837
|
+
"""Execute cleaning workflow if requested."""
|
|
838
|
+
if not getattr(options, "clean", False):
|
|
839
|
+
return True
|
|
784
840
|
|
|
785
|
-
|
|
786
|
-
|
|
841
|
+
if not self._phase_executor._run_code_cleaning_phase(options):
|
|
842
|
+
return False
|
|
787
843
|
|
|
788
|
-
if not self.
|
|
789
|
-
self.session.fail_task("workflow", "Fast hooks failed")
|
|
790
|
-
self._update_mcp_status("fast", "failed")
|
|
844
|
+
if not self._phase_executor._run_post_cleaning_fast_hooks(options):
|
|
791
845
|
return False
|
|
792
846
|
|
|
793
|
-
self.
|
|
847
|
+
self._phase_executor._mark_code_cleaning_complete()
|
|
794
848
|
return True
|
|
795
849
|
|
|
796
|
-
def
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
if not success:
|
|
801
|
-
self.session.fail_task("workflow", "Testing failed")
|
|
802
|
-
self._handle_test_failures()
|
|
803
|
-
self._update_mcp_status("tests", "failed")
|
|
804
|
-
|
|
805
|
-
else:
|
|
806
|
-
self._update_mcp_status("tests", "completed")
|
|
807
|
-
|
|
808
|
-
return success
|
|
809
|
-
|
|
810
|
-
def _run_comprehensive_hooks_phase(self, options: OptionsProtocol) -> bool:
|
|
811
|
-
self._update_mcp_status("comprehensive", "running")
|
|
812
|
-
|
|
813
|
-
success = self.phases.run_comprehensive_hooks_only(options)
|
|
814
|
-
if not success:
|
|
815
|
-
self.session.fail_task("comprehensive_hooks", "Comprehensive hooks failed")
|
|
816
|
-
self._update_mcp_status("comprehensive", "failed")
|
|
817
|
-
|
|
818
|
-
else:
|
|
819
|
-
self._update_mcp_status("comprehensive", "completed")
|
|
820
|
-
|
|
821
|
-
return success
|
|
822
|
-
|
|
823
|
-
def _update_mcp_status(self, stage: str, status: str) -> None:
|
|
824
|
-
if hasattr(self, "_mcp_state_manager") and self._mcp_state_manager:
|
|
825
|
-
self._mcp_state_manager.update_stage_status(stage, status)
|
|
826
|
-
|
|
827
|
-
def _run_code_cleaning_phase(self, options: OptionsProtocol) -> bool:
|
|
828
|
-
self.console.print("\n[bold blue]🧹 Running Code Cleaning Phase...[/bold blue]")
|
|
829
|
-
|
|
830
|
-
success = self.phases.run_cleaning_phase(options)
|
|
831
|
-
if success:
|
|
832
|
-
self.console.print("[green]✅ Code cleaning completed successfully[/green]")
|
|
833
|
-
else:
|
|
834
|
-
self.console.print("[red]❌ Code cleaning failed[/red]")
|
|
835
|
-
self.session.fail_task("workflow", "Code cleaning phase failed")
|
|
836
|
-
|
|
837
|
-
return success
|
|
838
|
-
|
|
839
|
-
def _run_post_cleaning_fast_hooks(self, options: OptionsProtocol) -> bool:
|
|
840
|
-
self.console.print(
|
|
841
|
-
"\n[bold cyan]🔍 Running Post-Cleaning Fast Hooks Sanity Check...[/bold cyan]"
|
|
850
|
+
def _is_publishing_workflow(self, options: OptionsProtocol) -> bool:
|
|
851
|
+
"""Check if this is a publishing workflow."""
|
|
852
|
+
return bool(
|
|
853
|
+
getattr(options, "publish", False) or getattr(options, "all", False)
|
|
842
854
|
)
|
|
843
855
|
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
return getattr(self, "_code_cleaning_complete", False)
|
|
855
|
-
|
|
856
|
-
def _mark_code_cleaning_complete(self) -> None:
|
|
857
|
-
self._code_cleaning_complete = True
|
|
858
|
-
|
|
859
|
-
def _handle_test_failures(self) -> None:
|
|
860
|
-
if not (hasattr(self, "_mcp_state_manager") and self._mcp_state_manager):
|
|
861
|
-
return
|
|
862
|
-
|
|
863
|
-
test_manager = self.phases.test_manager
|
|
864
|
-
if not hasattr(test_manager, "get_test_failures"):
|
|
865
|
-
return
|
|
866
|
-
|
|
867
|
-
failures = test_manager.get_test_failures()
|
|
868
|
-
|
|
869
|
-
if self._should_debug():
|
|
870
|
-
self.debugger.log_test_failures(len(failures))
|
|
871
|
-
|
|
872
|
-
from crackerjack.mcp.state import Issue, Priority
|
|
856
|
+
def _update_mcp_status(self, phase: str, status: str) -> None:
|
|
857
|
+
"""Update MCP (Model Context Protocol) status."""
|
|
858
|
+
# Check if _mcp_state_manager exists and is not None
|
|
859
|
+
mcp_state_manager = getattr(self, "_mcp_state_manager", None)
|
|
860
|
+
if mcp_state_manager:
|
|
861
|
+
try:
|
|
862
|
+
mcp_state_manager.update_status(phase, status)
|
|
863
|
+
except (AttributeError, TypeError, RuntimeError) as e:
|
|
864
|
+
# If MCP is not available or fails, continue without error
|
|
865
|
+
self.logger.debug(f"MCP status update failed: {e}")
|
|
873
866
|
|
|
874
|
-
|
|
875
|
-
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
)
|
|
884
|
-
|
|
867
|
+
async def _execute_quality_phase(
|
|
868
|
+
self, options: OptionsProtocol, workflow_id: str
|
|
869
|
+
) -> bool:
|
|
870
|
+
"""Execute the quality phase of the workflow."""
|
|
871
|
+
try:
|
|
872
|
+
# Check if this is a publishing workflow
|
|
873
|
+
is_publishing = self._is_publishing_workflow(options)
|
|
874
|
+
|
|
875
|
+
# Run fast hooks phase first
|
|
876
|
+
fast_success = self.phases.run_fast_hooks_only(options)
|
|
877
|
+
if not fast_success and is_publishing:
|
|
878
|
+
return False # For publishing workflows, fast hook failures should stop execution
|
|
879
|
+
|
|
880
|
+
# Run comprehensive hooks phase
|
|
881
|
+
comprehensive_success = self.phases.run_comprehensive_hooks_only(options)
|
|
882
|
+
if not comprehensive_success and is_publishing:
|
|
883
|
+
return False # For publishing workflows, comprehensive hook failures should stop execution
|
|
884
|
+
|
|
885
|
+
# Both fast and comprehensive hooks must pass for success
|
|
886
|
+
quality_success = fast_success and comprehensive_success
|
|
887
|
+
|
|
888
|
+
# Run testing phase if requested
|
|
889
|
+
if getattr(options, "test", False):
|
|
890
|
+
testing_success = self.phases.run_testing_phase(options)
|
|
891
|
+
if not testing_success and is_publishing:
|
|
892
|
+
return False # For publishing workflows, test failures should stop execution
|
|
893
|
+
# For non-publishing workflows, testing failures should factor into overall success too
|
|
894
|
+
quality_success = quality_success and testing_success
|
|
895
|
+
|
|
896
|
+
return quality_success
|
|
897
|
+
except Exception as e:
|
|
898
|
+
self.logger.error(f"Quality phase execution failed: {e}")
|
|
899
|
+
return False
|
|
885
900
|
|
|
886
|
-
def
|
|
887
|
-
self
|
|
901
|
+
async def _execute_publishing_workflow(
|
|
902
|
+
self, options: OptionsProtocol, workflow_id: str
|
|
903
|
+
) -> bool:
|
|
904
|
+
"""Execute the publishing workflow phase."""
|
|
905
|
+
try:
|
|
906
|
+
# Run publishing phase
|
|
907
|
+
publishing_success = self.phases.run_publishing_phase(options)
|
|
908
|
+
return publishing_success
|
|
909
|
+
except Exception as e:
|
|
910
|
+
self.logger.error(f"Publishing workflow execution failed: {e}")
|
|
911
|
+
return False
|
|
888
912
|
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
913
|
+
async def _execute_commit_workflow(
|
|
914
|
+
self, options: OptionsProtocol, workflow_id: str
|
|
915
|
+
) -> bool:
|
|
916
|
+
"""Execute the commit workflow phase."""
|
|
917
|
+
try:
|
|
918
|
+
# Run commit phase
|
|
919
|
+
commit_success = self.phases.run_commit_phase(options)
|
|
920
|
+
return commit_success
|
|
921
|
+
except Exception as e:
|
|
922
|
+
self.logger.error(f"Commit workflow execution failed: {e}")
|
|
892
923
|
return False
|
|
893
924
|
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
|
|
925
|
+
def _has_code_cleaning_run(self) -> bool:
|
|
926
|
+
"""Check if code cleaning has already run in this session."""
|
|
927
|
+
# Check session metadata or a dedicated flag
|
|
928
|
+
if (
|
|
929
|
+
self.session.session_tracker
|
|
930
|
+
and "code_cleaning_completed" in self.session.session_tracker.metadata
|
|
931
|
+
):
|
|
932
|
+
return bool(
|
|
933
|
+
self.session.session_tracker.metadata["code_cleaning_completed"]
|
|
934
|
+
)
|
|
935
|
+
return False
|
|
898
936
|
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
self.
|
|
937
|
+
def _mark_code_cleaning_complete(self) -> None:
|
|
938
|
+
"""Mark that code cleaning has been completed."""
|
|
939
|
+
if self.session.session_tracker:
|
|
940
|
+
self.session.session_tracker.metadata["code_cleaning_completed"] = True
|
|
903
941
|
|
|
904
|
-
|
|
942
|
+
def _run_code_cleaning_phase(self, options: OptionsProtocol) -> bool:
|
|
943
|
+
"""Execute code cleaning phase - wrapper for ACB workflow compatibility."""
|
|
944
|
+
result: bool = self.phases.run_cleaning_phase(options) # type: ignore[arg-type,assignment]
|
|
945
|
+
return result
|
|
905
946
|
|
|
906
|
-
|
|
907
|
-
|
|
947
|
+
def _run_post_cleaning_fast_hooks(self, options: OptionsProtocol) -> bool:
|
|
948
|
+
"""Run fast hooks after code cleaning phase."""
|
|
949
|
+
result: bool = self.phases.run_fast_hooks_only(options) # type: ignore[arg-type,assignment]
|
|
950
|
+
return result
|
|
908
951
|
|
|
909
|
-
|
|
952
|
+
def _run_fast_hooks_phase(self, options: OptionsProtocol) -> bool:
|
|
953
|
+
"""Execute fast hooks phase - wrapper for ACB workflow compatibility."""
|
|
954
|
+
result: bool = self.phases.run_fast_hooks_only(options) # type: ignore[arg-type,assignment]
|
|
955
|
+
return result
|
|
910
956
|
|
|
911
|
-
def
|
|
912
|
-
|
|
913
|
-
|
|
914
|
-
|
|
957
|
+
def _run_comprehensive_hooks_phase(self, options: OptionsProtocol) -> bool:
|
|
958
|
+
"""Execute comprehensive hooks phase - wrapper for ACB workflow compatibility."""
|
|
959
|
+
result: bool = self.phases.run_comprehensive_hooks_only(options) # type: ignore[arg-type,assignment]
|
|
960
|
+
return result
|
|
915
961
|
|
|
916
|
-
def
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
|
|
962
|
+
def _run_testing_phase(self, options: OptionsProtocol) -> bool:
|
|
963
|
+
"""Execute testing phase - wrapper for ACB workflow compatibility."""
|
|
964
|
+
result: bool = self.phases.run_testing_phase(options) # type: ignore[arg-type,assignment]
|
|
965
|
+
return result
|
|
966
|
+
|
|
967
|
+
def _configure_session_cleanup(self, options: OptionsProtocol) -> None:
|
|
968
|
+
"""Configure session cleanup handlers."""
|
|
969
|
+
# Add any necessary session cleanup configuration here
|
|
970
|
+
self.session.register_cleanup(self._cleanup_workflow_resources)
|
|
971
|
+
if hasattr(self, "_mcp_state_manager") and self._mcp_state_manager:
|
|
972
|
+
self.session.register_cleanup(self._mcp_state_manager.cleanup)
|
|
922
973
|
|
|
923
|
-
def
|
|
924
|
-
|
|
974
|
+
def _initialize_zuban_lsp(self, options: OptionsProtocol) -> None:
|
|
975
|
+
"""Initialize Zuban LSP server if needed."""
|
|
976
|
+
# Placeholder implementation - actual LSP initialization would go here
|
|
977
|
+
pass
|
|
925
978
|
|
|
926
|
-
def
|
|
927
|
-
|
|
928
|
-
|
|
929
|
-
|
|
979
|
+
def _configure_hook_manager_lsp(self, options: OptionsProtocol) -> None:
|
|
980
|
+
"""Configure hook manager LSP settings."""
|
|
981
|
+
# Placeholder implementation - actual hook manager LSP configuration would go here
|
|
982
|
+
pass
|
|
930
983
|
|
|
931
|
-
def
|
|
932
|
-
|
|
933
|
-
|
|
934
|
-
|
|
984
|
+
def _register_lsp_cleanup_handler(self, options: OptionsProtocol) -> None:
|
|
985
|
+
"""Register LSP cleanup handler."""
|
|
986
|
+
# Placeholder implementation - actual LSP cleanup handler would go here
|
|
987
|
+
pass
|
|
935
988
|
|
|
936
989
|
async def _run_ai_agent_fixing_phase(self, options: OptionsProtocol) -> bool:
|
|
937
990
|
self._initialize_ai_fixing_phase(options)
|
|
@@ -952,6 +1005,12 @@ class WorkflowPipeline:
|
|
|
952
1005
|
def _initialize_ai_fixing_phase(self, options: OptionsProtocol) -> None:
|
|
953
1006
|
self._update_mcp_status("ai_fixing", "running")
|
|
954
1007
|
self.logger.info("Starting AI agent fixing phase")
|
|
1008
|
+
# Always log this important phase start for AI consumption
|
|
1009
|
+
self.logger.info(
|
|
1010
|
+
"AI agent fixing phase started",
|
|
1011
|
+
ai_agent_fixing=True,
|
|
1012
|
+
event_type="ai_fix_init",
|
|
1013
|
+
)
|
|
955
1014
|
self._log_debug_phase_start()
|
|
956
1015
|
|
|
957
1016
|
def _prepare_ai_fixing_environment(self, options: OptionsProtocol) -> None:
|
|
@@ -972,7 +1031,7 @@ class WorkflowPipeline:
|
|
|
972
1031
|
|
|
973
1032
|
async def _setup_ai_fixing_workflow(
|
|
974
1033
|
self,
|
|
975
|
-
) -> tuple[
|
|
1034
|
+
) -> tuple[EnhancedAgentCoordinator, list[t.Any]]:
|
|
976
1035
|
agent_coordinator = self._setup_agent_coordinator()
|
|
977
1036
|
issues = await self._collect_issues_from_failures()
|
|
978
1037
|
return agent_coordinator, issues
|
|
@@ -980,7 +1039,7 @@ class WorkflowPipeline:
|
|
|
980
1039
|
async def _execute_ai_fixes(
|
|
981
1040
|
self,
|
|
982
1041
|
options: OptionsProtocol,
|
|
983
|
-
agent_coordinator:
|
|
1042
|
+
agent_coordinator: EnhancedAgentCoordinator,
|
|
984
1043
|
issues: list[t.Any],
|
|
985
1044
|
) -> bool:
|
|
986
1045
|
self.logger.info(f"AI agents will attempt to fix {len(issues)} issues")
|
|
@@ -994,16 +1053,25 @@ class WorkflowPipeline:
|
|
|
994
1053
|
"started",
|
|
995
1054
|
details={"ai_agent": True},
|
|
996
1055
|
)
|
|
1056
|
+
# Log structured data to stderr for AI consumption
|
|
1057
|
+
self.logger.info(
|
|
1058
|
+
"AI agent fixing phase started",
|
|
1059
|
+
ai_agent_fixing=True,
|
|
1060
|
+
event_type="ai_fix_start",
|
|
1061
|
+
)
|
|
997
1062
|
|
|
998
|
-
def _setup_agent_coordinator(self) ->
|
|
999
|
-
from crackerjack.agents.
|
|
1063
|
+
def _setup_agent_coordinator(self) -> EnhancedAgentCoordinator:
|
|
1064
|
+
from crackerjack.agents.enhanced_coordinator import create_enhanced_coordinator
|
|
1000
1065
|
|
|
1001
1066
|
agent_context = AgentContext(
|
|
1002
1067
|
project_path=self.pkg_path,
|
|
1003
1068
|
session_id=getattr(self.session, "session_id", None),
|
|
1004
1069
|
)
|
|
1005
1070
|
|
|
1006
|
-
|
|
1071
|
+
# Use enhanced coordinator with Claude Code agent integration
|
|
1072
|
+
agent_coordinator = create_enhanced_coordinator(
|
|
1073
|
+
context=agent_context, enable_external_agents=True
|
|
1074
|
+
)
|
|
1007
1075
|
agent_coordinator.initialize_agents()
|
|
1008
1076
|
return agent_coordinator
|
|
1009
1077
|
|
|
@@ -1058,6 +1126,16 @@ class WorkflowPipeline:
|
|
|
1058
1126
|
self.debugger.log_test_fixes(test_fixes)
|
|
1059
1127
|
self.debugger.log_hook_fixes(hook_fixes)
|
|
1060
1128
|
|
|
1129
|
+
# Log structured data to stderr for AI consumption
|
|
1130
|
+
self.logger.info(
|
|
1131
|
+
"AI fixes applied",
|
|
1132
|
+
ai_agent_fixing=True,
|
|
1133
|
+
event_type="ai_fix_counts",
|
|
1134
|
+
total_fixes=total_fixes,
|
|
1135
|
+
test_fixes=test_fixes,
|
|
1136
|
+
hook_fixes=hook_fixes,
|
|
1137
|
+
)
|
|
1138
|
+
|
|
1061
1139
|
def _log_debug_phase_completion(self, success: bool, fix_result: t.Any) -> None:
|
|
1062
1140
|
if self._should_debug():
|
|
1063
1141
|
self.debugger.log_workflow_phase(
|
|
@@ -1069,6 +1147,16 @@ class WorkflowPipeline:
|
|
|
1069
1147
|
"remaining_issues": len(fix_result.remaining_issues),
|
|
1070
1148
|
},
|
|
1071
1149
|
)
|
|
1150
|
+
# Log structured data to stderr for AI consumption
|
|
1151
|
+
self.logger.info(
|
|
1152
|
+
f"AI agent fixing phase {'completed' if success else 'failed'}",
|
|
1153
|
+
ai_agent_fixing=True,
|
|
1154
|
+
event_type="ai_fix_completion",
|
|
1155
|
+
success=success,
|
|
1156
|
+
confidence=fix_result.confidence,
|
|
1157
|
+
fixes_applied=len(fix_result.fixes_applied),
|
|
1158
|
+
remaining_issues=len(fix_result.remaining_issues),
|
|
1159
|
+
)
|
|
1072
1160
|
|
|
1073
1161
|
def _handle_fixing_phase_error(self, error: Exception) -> bool:
|
|
1074
1162
|
self.logger.exception(f"AI agent fixing phase failed: {error}")
|
|
@@ -1081,6 +1169,14 @@ class WorkflowPipeline:
|
|
|
1081
1169
|
"failed",
|
|
1082
1170
|
details={"error": str(error)},
|
|
1083
1171
|
)
|
|
1172
|
+
# Log structured data to stderr for AI consumption
|
|
1173
|
+
self.logger.error(
|
|
1174
|
+
"AI agent fixing phase failed",
|
|
1175
|
+
ai_agent_fixing=True,
|
|
1176
|
+
event_type="ai_fix_error",
|
|
1177
|
+
error=str(error),
|
|
1178
|
+
error_type=type(error).__name__,
|
|
1179
|
+
)
|
|
1084
1180
|
|
|
1085
1181
|
return False
|
|
1086
1182
|
|
|
@@ -1116,15 +1212,16 @@ class WorkflowPipeline:
|
|
|
1116
1212
|
return test_success
|
|
1117
1213
|
|
|
1118
1214
|
def _should_verify_hook_fixes(self, fixes_applied: list[str]) -> bool:
|
|
1119
|
-
hook_fixes = [
|
|
1120
|
-
f
|
|
1121
|
-
for f in fixes_applied
|
|
1122
|
-
if "hook" not in f.lower()
|
|
1123
|
-
or "complexity" in f.lower()
|
|
1124
|
-
or "type" in f.lower()
|
|
1125
|
-
]
|
|
1215
|
+
hook_fixes = [fix for fix in fixes_applied if self._is_hook_related_fix(fix)]
|
|
1126
1216
|
return bool(hook_fixes)
|
|
1127
1217
|
|
|
1218
|
+
def _is_hook_related_fix(self, fix: str) -> bool:
|
|
1219
|
+
"""Check if a fix is related to hooks and should trigger hook verification."""
|
|
1220
|
+
fix_lower = fix.lower()
|
|
1221
|
+
return (
|
|
1222
|
+
"hook" not in fix_lower or "complexity" in fix_lower or "type" in fix_lower
|
|
1223
|
+
)
|
|
1224
|
+
|
|
1128
1225
|
async def _verify_hook_fixes(self, options: OptionsProtocol) -> bool:
|
|
1129
1226
|
self.logger.info("Re-running comprehensive hooks to verify hook fixes")
|
|
1130
1227
|
hook_success = self.phases.run_comprehensive_hooks_only(options)
|
|
@@ -1299,35 +1396,30 @@ class WorkflowPipeline:
|
|
|
1299
1396
|
return issues
|
|
1300
1397
|
|
|
1301
1398
|
def _parse_comprehensive_hook_errors(self, error_msg: str) -> list[Issue]:
|
|
1302
|
-
issues: list[Issue] = []
|
|
1303
1399
|
error_lower = error_msg.lower()
|
|
1400
|
+
error_checkers = self._get_comprehensive_error_checkers()
|
|
1304
1401
|
|
|
1305
|
-
|
|
1306
|
-
|
|
1307
|
-
|
|
1308
|
-
|
|
1309
|
-
|
|
1310
|
-
if type_error_issue:
|
|
1311
|
-
issues.append(type_error_issue)
|
|
1312
|
-
|
|
1313
|
-
security_issue = self._check_security_error(error_lower)
|
|
1314
|
-
if security_issue:
|
|
1315
|
-
issues.append(security_issue)
|
|
1316
|
-
|
|
1317
|
-
performance_issue = self._check_performance_error(error_lower)
|
|
1318
|
-
if performance_issue:
|
|
1319
|
-
issues.append(performance_issue)
|
|
1320
|
-
|
|
1321
|
-
dead_code_issue = self._check_dead_code_error(error_lower)
|
|
1322
|
-
if dead_code_issue:
|
|
1323
|
-
issues.append(dead_code_issue)
|
|
1324
|
-
|
|
1325
|
-
regex_issue = self._check_regex_validation_error(error_lower)
|
|
1326
|
-
if regex_issue:
|
|
1327
|
-
issues.append(regex_issue)
|
|
1402
|
+
issues = []
|
|
1403
|
+
for check_func in error_checkers:
|
|
1404
|
+
issue = check_func(error_lower)
|
|
1405
|
+
if issue:
|
|
1406
|
+
issues.append(issue)
|
|
1328
1407
|
|
|
1329
1408
|
return issues
|
|
1330
1409
|
|
|
1410
|
+
def _get_comprehensive_error_checkers(
|
|
1411
|
+
self,
|
|
1412
|
+
) -> list[t.Callable[[str], Issue | None]]:
|
|
1413
|
+
"""Get list of error checking functions for comprehensive hooks."""
|
|
1414
|
+
return [
|
|
1415
|
+
self._check_complexity_error,
|
|
1416
|
+
self._check_type_error,
|
|
1417
|
+
self._check_security_error,
|
|
1418
|
+
self._check_performance_error,
|
|
1419
|
+
self._check_dead_code_error,
|
|
1420
|
+
self._check_regex_validation_error,
|
|
1421
|
+
]
|
|
1422
|
+
|
|
1331
1423
|
def _check_complexity_error(self, error_lower: str) -> Issue | None:
|
|
1332
1424
|
if "complexipy" in error_lower or "c901" in error_lower:
|
|
1333
1425
|
return Issue(
|
|
@@ -1426,24 +1518,66 @@ class WorkflowPipeline:
|
|
|
1426
1518
|
def _classify_issue(self, issue_str: str) -> tuple[IssueType, Priority]:
|
|
1427
1519
|
issue_lower = issue_str.lower()
|
|
1428
1520
|
|
|
1429
|
-
|
|
1430
|
-
|
|
1431
|
-
if
|
|
1432
|
-
return
|
|
1433
|
-
if self._is_complexity_issue(issue_lower):
|
|
1434
|
-
return IssueType.COMPLEXITY, Priority.HIGH
|
|
1435
|
-
if self._is_regex_validation_issue(issue_lower):
|
|
1436
|
-
return IssueType.REGEX_VALIDATION, Priority.HIGH
|
|
1437
|
-
|
|
1438
|
-
if self._is_dead_code_issue(issue_lower):
|
|
1439
|
-
return IssueType.DEAD_CODE, Priority.MEDIUM
|
|
1440
|
-
if self._is_performance_issue(issue_lower):
|
|
1441
|
-
return IssueType.PERFORMANCE, Priority.MEDIUM
|
|
1442
|
-
if self._is_import_error(issue_lower):
|
|
1443
|
-
return IssueType.IMPORT_ERROR, Priority.MEDIUM
|
|
1521
|
+
# Check high priority issues first
|
|
1522
|
+
high_priority_result = self._check_high_priority_issues(issue_lower)
|
|
1523
|
+
if high_priority_result:
|
|
1524
|
+
return high_priority_result
|
|
1444
1525
|
|
|
1526
|
+
# Check medium priority issues
|
|
1527
|
+
medium_priority_result = self._check_medium_priority_issues(issue_lower)
|
|
1528
|
+
if medium_priority_result:
|
|
1529
|
+
return medium_priority_result
|
|
1530
|
+
|
|
1531
|
+
# Default to formatting issue
|
|
1445
1532
|
return IssueType.FORMATTING, Priority.MEDIUM
|
|
1446
1533
|
|
|
1534
|
+
def _check_high_priority_issues(
|
|
1535
|
+
self, issue_lower: str
|
|
1536
|
+
) -> tuple[IssueType, Priority] | None:
|
|
1537
|
+
"""Check for high priority issue types.
|
|
1538
|
+
|
|
1539
|
+
Args:
|
|
1540
|
+
issue_lower: Lowercase issue string
|
|
1541
|
+
|
|
1542
|
+
Returns:
|
|
1543
|
+
Tuple of issue type and priority if found, None otherwise
|
|
1544
|
+
"""
|
|
1545
|
+
high_priority_checks = [
|
|
1546
|
+
(self._is_type_error, IssueType.TYPE_ERROR),
|
|
1547
|
+
(self._is_security_issue, IssueType.SECURITY),
|
|
1548
|
+
(self._is_complexity_issue, IssueType.COMPLEXITY),
|
|
1549
|
+
(self._is_regex_validation_issue, IssueType.REGEX_VALIDATION),
|
|
1550
|
+
]
|
|
1551
|
+
|
|
1552
|
+
for check_func, issue_type in high_priority_checks:
|
|
1553
|
+
if check_func(issue_lower):
|
|
1554
|
+
return issue_type, Priority.HIGH
|
|
1555
|
+
|
|
1556
|
+
return None
|
|
1557
|
+
|
|
1558
|
+
def _check_medium_priority_issues(
|
|
1559
|
+
self, issue_lower: str
|
|
1560
|
+
) -> tuple[IssueType, Priority] | None:
|
|
1561
|
+
"""Check for medium priority issue types.
|
|
1562
|
+
|
|
1563
|
+
Args:
|
|
1564
|
+
issue_lower: Lowercase issue string
|
|
1565
|
+
|
|
1566
|
+
Returns:
|
|
1567
|
+
Tuple of issue type and priority if found, None otherwise
|
|
1568
|
+
"""
|
|
1569
|
+
medium_priority_checks = [
|
|
1570
|
+
(self._is_dead_code_issue, IssueType.DEAD_CODE),
|
|
1571
|
+
(self._is_performance_issue, IssueType.PERFORMANCE),
|
|
1572
|
+
(self._is_import_error, IssueType.IMPORT_ERROR),
|
|
1573
|
+
]
|
|
1574
|
+
|
|
1575
|
+
for check_func, issue_type in medium_priority_checks:
|
|
1576
|
+
if check_func(issue_lower):
|
|
1577
|
+
return issue_type, Priority.MEDIUM
|
|
1578
|
+
|
|
1579
|
+
return None
|
|
1580
|
+
|
|
1447
1581
|
def _is_type_error(self, issue_lower: str) -> bool:
|
|
1448
1582
|
return any(
|
|
1449
1583
|
keyword in issue_lower for keyword in ("type", "annotation", "pyright")
|
|
@@ -1512,78 +1646,76 @@ class WorkflowPipeline:
|
|
|
1512
1646
|
async def _handle_security_gate_failure(
|
|
1513
1647
|
self, options: OptionsProtocol, allow_ai_fixing: bool = False
|
|
1514
1648
|
) -> bool:
|
|
1649
|
+
self._display_security_gate_failure_message()
|
|
1650
|
+
|
|
1651
|
+
if allow_ai_fixing:
|
|
1652
|
+
return await self._attempt_ai_assisted_security_fix(options)
|
|
1653
|
+
return self._handle_manual_security_fix()
|
|
1654
|
+
|
|
1655
|
+
def _display_security_gate_failure_message(self) -> None:
|
|
1656
|
+
"""Display initial security gate failure message."""
|
|
1515
1657
|
self.console.print(
|
|
1516
1658
|
"[red]🔒 SECURITY GATE: Critical security checks failed[/red]"
|
|
1517
1659
|
)
|
|
1518
1660
|
|
|
1519
|
-
|
|
1520
|
-
|
|
1521
|
-
"[red]Security-critical hooks (bandit, pyright, gitleaks) must pass before publishing[/red]"
|
|
1522
|
-
)
|
|
1523
|
-
self.console.print(
|
|
1524
|
-
"[yellow]🤖 Attempting AI-assisted security issue resolution...[/yellow]"
|
|
1525
|
-
)
|
|
1661
|
+
async def _attempt_ai_assisted_security_fix(self, options: OptionsProtocol) -> bool:
|
|
1662
|
+
"""Attempt to fix security issues using AI assistance.
|
|
1526
1663
|
|
|
1527
|
-
|
|
1528
|
-
|
|
1529
|
-
try:
|
|
1530
|
-
security_still_blocks = self._check_security_critical_failures()
|
|
1531
|
-
if not security_still_blocks:
|
|
1532
|
-
self.console.print(
|
|
1533
|
-
"[green]✅ AI agents resolved security issues - publishing allowed[/green]"
|
|
1534
|
-
)
|
|
1535
|
-
return True
|
|
1536
|
-
else:
|
|
1537
|
-
self.console.print(
|
|
1538
|
-
"[red]🔒 Security issues persist after AI fixing - publishing still BLOCKED[/red]"
|
|
1539
|
-
)
|
|
1540
|
-
return False
|
|
1541
|
-
except Exception as e:
|
|
1542
|
-
self.logger.warning(
|
|
1543
|
-
f"Security re-check failed: {e} - blocking publishing"
|
|
1544
|
-
)
|
|
1545
|
-
return False
|
|
1546
|
-
return False
|
|
1547
|
-
else:
|
|
1548
|
-
self.console.print(
|
|
1549
|
-
"[red]Security-critical hooks (bandit, pyright, gitleaks) must pass before publishing[/red]"
|
|
1550
|
-
)
|
|
1551
|
-
return False
|
|
1664
|
+
Args:
|
|
1665
|
+
options: Configuration options
|
|
1552
1666
|
|
|
1553
|
-
|
|
1554
|
-
|
|
1555
|
-
|
|
1556
|
-
|
|
1557
|
-
publishing_requested: bool,
|
|
1558
|
-
) -> bool:
|
|
1559
|
-
if publishing_requested:
|
|
1560
|
-
return not testing_passed or not comprehensive_passed
|
|
1667
|
+
Returns:
|
|
1668
|
+
True if security issues were resolved, False otherwise
|
|
1669
|
+
"""
|
|
1670
|
+
self._display_ai_fixing_messages()
|
|
1561
1671
|
|
|
1562
|
-
|
|
1672
|
+
ai_fix_success = await self._run_ai_agent_fixing_phase(options)
|
|
1673
|
+
if ai_fix_success:
|
|
1674
|
+
return self._verify_security_fix_success()
|
|
1563
1675
|
|
|
1564
|
-
|
|
1565
|
-
|
|
1566
|
-
|
|
1567
|
-
|
|
1568
|
-
|
|
1569
|
-
|
|
1570
|
-
|
|
1571
|
-
|
|
1676
|
+
return False
|
|
1677
|
+
|
|
1678
|
+
def _display_ai_fixing_messages(self) -> None:
|
|
1679
|
+
"""Display messages about AI-assisted security fixing."""
|
|
1680
|
+
self.console.print(
|
|
1681
|
+
"[red]Security-critical hooks (bandit, pyright, gitleaks) must pass before publishing[/red]"
|
|
1682
|
+
)
|
|
1683
|
+
self.console.print(
|
|
1684
|
+
"[yellow]🤖 Attempting AI-assisted security issue resolution...[/yellow]"
|
|
1685
|
+
)
|
|
1572
1686
|
|
|
1573
|
-
|
|
1687
|
+
def _verify_security_fix_success(self) -> bool:
|
|
1688
|
+
"""Verify that AI fixes resolved the security issues.
|
|
1574
1689
|
|
|
1575
|
-
|
|
1576
|
-
|
|
1577
|
-
|
|
1690
|
+
Returns:
|
|
1691
|
+
True if security issues were resolved, False otherwise
|
|
1692
|
+
"""
|
|
1693
|
+
try:
|
|
1694
|
+
security_still_blocks = self._check_security_critical_failures()
|
|
1695
|
+
if not security_still_blocks:
|
|
1696
|
+
self.console.print(
|
|
1697
|
+
"[green]✅ AI agents resolved security issues - publishing allowed[/green]"
|
|
1698
|
+
)
|
|
1699
|
+
return True
|
|
1700
|
+
else:
|
|
1701
|
+
self.console.print(
|
|
1702
|
+
"[red]🔒 Security issues persist after AI fixing - publishing still BLOCKED[/red]"
|
|
1703
|
+
)
|
|
1704
|
+
return False
|
|
1705
|
+
except Exception as e:
|
|
1706
|
+
self.logger.warning(f"Security re-check failed: {e} - blocking publishing")
|
|
1707
|
+
return False
|
|
1708
|
+
|
|
1709
|
+
def _handle_manual_security_fix(self) -> bool:
|
|
1710
|
+
"""Handle security fix when AI assistance is not allowed.
|
|
1711
|
+
|
|
1712
|
+
Returns:
|
|
1713
|
+
Always False since manual intervention is required
|
|
1714
|
+
"""
|
|
1578
1715
|
self.console.print(
|
|
1579
|
-
|
|
1716
|
+
"[red]Security-critical hooks (bandit, pyright, gitleaks) must pass before publishing[/red]"
|
|
1580
1717
|
)
|
|
1581
|
-
|
|
1582
|
-
self.console.print("[yellow] → Tests reported failure[/yellow]")
|
|
1583
|
-
if not comprehensive_passed:
|
|
1584
|
-
self.console.print(
|
|
1585
|
-
"[yellow] → Comprehensive hooks reported failure[/yellow]"
|
|
1586
|
-
)
|
|
1718
|
+
return False
|
|
1587
1719
|
|
|
1588
1720
|
def _check_security_critical_failures(self) -> bool:
|
|
1589
1721
|
try:
|
|
@@ -1680,141 +1812,60 @@ class WorkflowPipeline:
|
|
|
1680
1812
|
|
|
1681
1813
|
return hook_name in security_critical_hooks and is_failed
|
|
1682
1814
|
|
|
1683
|
-
def
|
|
1684
|
-
|
|
1685
|
-
|
|
1686
|
-
|
|
1687
|
-
|
|
1688
|
-
|
|
1689
|
-
|
|
1690
|
-
|
|
1691
|
-
|
|
1692
|
-
|
|
1693
|
-
|
|
1694
|
-
|
|
1695
|
-
|
|
1696
|
-
|
|
1697
|
-
|
|
1698
|
-
|
|
1699
|
-
if audit_report.recommendations:
|
|
1700
|
-
self.console.print("[bold]Security Recommendations: [/bold]")
|
|
1701
|
-
for rec in audit_report.recommendations[:3]:
|
|
1702
|
-
self.console.print(f"[dim]{rec}[/dim]")
|
|
1703
|
-
else:
|
|
1704
|
-
self.console.print(
|
|
1705
|
-
"[yellow]⚠️ SECURITY AUDIT: Proceeding with partial quality success[/yellow]"
|
|
1706
|
-
)
|
|
1707
|
-
self.console.print(
|
|
1708
|
-
"[yellow]✅ Security-critical checks (bandit, pyright, gitleaks) have passed[/yellow]"
|
|
1709
|
-
)
|
|
1710
|
-
self.console.print(
|
|
1711
|
-
"[yellow]⚠️ Some non-critical quality checks failed - consider reviewing before production deployment[/yellow]"
|
|
1712
|
-
)
|
|
1713
|
-
|
|
1714
|
-
async def _run_initial_fast_hooks_async(
|
|
1715
|
-
self, options: OptionsProtocol, iteration: int, monitor: t.Any
|
|
1716
|
-
) -> bool:
|
|
1717
|
-
monitor.record_sequential_op()
|
|
1718
|
-
fast_hooks_passed = self._run_fast_hooks_phase(options)
|
|
1719
|
-
if not fast_hooks_passed:
|
|
1720
|
-
if options.ai_agent and self._should_debug():
|
|
1721
|
-
self.debugger.log_iteration_end(iteration, False)
|
|
1722
|
-
return False
|
|
1723
|
-
return True
|
|
1724
|
-
|
|
1725
|
-
async def _run_fast_hooks_phase_monitored(
|
|
1726
|
-
self, options: OptionsProtocol, workflow_id: str
|
|
1727
|
-
) -> bool:
|
|
1728
|
-
with phase_monitor(workflow_id, "fast_hooks") as monitor:
|
|
1729
|
-
monitor.record_sequential_op()
|
|
1730
|
-
return self._run_fast_hooks_phase(options)
|
|
1731
|
-
|
|
1732
|
-
async def _run_comprehensive_hooks_phase_monitored(
|
|
1733
|
-
self, options: OptionsProtocol, workflow_id: str
|
|
1734
|
-
) -> bool:
|
|
1735
|
-
with phase_monitor(workflow_id, "comprehensive_hooks") as monitor:
|
|
1736
|
-
monitor.record_sequential_op()
|
|
1737
|
-
return self._run_comprehensive_hooks_phase(options)
|
|
1738
|
-
|
|
1739
|
-
async def _run_testing_phase_async(
|
|
1740
|
-
self, options: OptionsProtocol, workflow_id: str
|
|
1741
|
-
) -> bool:
|
|
1742
|
-
with phase_monitor(workflow_id, "testing") as monitor:
|
|
1743
|
-
monitor.record_sequential_op()
|
|
1744
|
-
return self._run_testing_phase(options)
|
|
1745
|
-
|
|
1746
|
-
async def _execute_standard_hooks_workflow_monitored(
|
|
1747
|
-
self, options: OptionsProtocol, workflow_id: str
|
|
1748
|
-
) -> bool:
|
|
1749
|
-
with phase_monitor(workflow_id, "hooks") as monitor:
|
|
1750
|
-
self._update_hooks_status_running()
|
|
1751
|
-
|
|
1752
|
-
fast_hooks_success = self._execute_monitored_fast_hooks_phase(
|
|
1753
|
-
options, monitor
|
|
1754
|
-
)
|
|
1755
|
-
if not fast_hooks_success:
|
|
1756
|
-
self._handle_hooks_completion(False)
|
|
1757
|
-
return False
|
|
1815
|
+
def _workflow_context(
|
|
1816
|
+
self,
|
|
1817
|
+
workflow_id: str,
|
|
1818
|
+
options: OptionsProtocol,
|
|
1819
|
+
) -> dict[str, t.Any]:
|
|
1820
|
+
"""Build a consistent payload for workflow-level events."""
|
|
1821
|
+
return {
|
|
1822
|
+
"workflow_id": workflow_id,
|
|
1823
|
+
"test_mode": getattr(options, "test", False),
|
|
1824
|
+
"skip_hooks": getattr(options, "skip_hooks", False),
|
|
1825
|
+
"publish": getattr(options, "publish", False),
|
|
1826
|
+
"all": getattr(options, "all", False),
|
|
1827
|
+
"commit": getattr(options, "commit", False),
|
|
1828
|
+
"ai_agent": getattr(options, "ai_agent", False),
|
|
1829
|
+
}
|
|
1758
1830
|
|
|
1759
|
-
|
|
1760
|
-
|
|
1761
|
-
|
|
1831
|
+
async def _publish_event(
|
|
1832
|
+
self, event: WorkflowEvent, payload: dict[str, t.Any]
|
|
1833
|
+
) -> None:
|
|
1834
|
+
"""Publish workflow events when the bus is available."""
|
|
1835
|
+
if not getattr(self, "_event_bus", None):
|
|
1836
|
+
return
|
|
1762
1837
|
|
|
1763
|
-
|
|
1764
|
-
|
|
1838
|
+
try:
|
|
1839
|
+
await self._event_bus.publish(event, payload) # type: ignore[union-attr]
|
|
1840
|
+
except Exception as exc: # pragma: no cover - logging only
|
|
1841
|
+
self.logger.debug(
|
|
1842
|
+
"Failed to publish workflow event",
|
|
1843
|
+
extra={"event": event.value, "error": str(exc)},
|
|
1765
1844
|
)
|
|
1766
1845
|
|
|
1767
|
-
hooks_success = fast_hooks_success and comprehensive_success
|
|
1768
|
-
self._handle_hooks_completion(hooks_success)
|
|
1769
|
-
return hooks_success
|
|
1770
|
-
|
|
1771
|
-
def _execute_monitored_fast_hooks_phase(
|
|
1772
|
-
self, options: OptionsProtocol, monitor: t.Any
|
|
1773
|
-
) -> bool:
|
|
1774
|
-
fast_hooks_success = self._run_fast_hooks_phase(options)
|
|
1775
|
-
if fast_hooks_success:
|
|
1776
|
-
monitor.record_sequential_op()
|
|
1777
|
-
return fast_hooks_success
|
|
1778
|
-
|
|
1779
|
-
def _execute_monitored_cleaning_phase(self, options: OptionsProtocol) -> bool:
|
|
1780
|
-
if not getattr(options, "clean", False):
|
|
1781
|
-
return True
|
|
1782
|
-
|
|
1783
|
-
if not self._run_code_cleaning_phase(options):
|
|
1784
|
-
return False
|
|
1785
|
-
|
|
1786
|
-
if not self._run_post_cleaning_fast_hooks(options):
|
|
1787
|
-
return False
|
|
1788
|
-
|
|
1789
|
-
self._mark_code_cleaning_complete()
|
|
1790
|
-
return True
|
|
1791
|
-
|
|
1792
|
-
def _execute_monitored_comprehensive_phase(
|
|
1793
|
-
self, options: OptionsProtocol, monitor: t.Any
|
|
1794
|
-
) -> bool:
|
|
1795
|
-
comprehensive_success = self._run_comprehensive_hooks_phase(options)
|
|
1796
|
-
if comprehensive_success:
|
|
1797
|
-
monitor.record_sequential_op()
|
|
1798
|
-
return comprehensive_success
|
|
1799
|
-
|
|
1800
1846
|
|
|
1801
1847
|
class WorkflowOrchestrator:
|
|
1802
1848
|
def __init__(
|
|
1803
1849
|
self,
|
|
1804
|
-
console: Console | None = None,
|
|
1805
1850
|
pkg_path: Path | None = None,
|
|
1806
1851
|
dry_run: bool = False,
|
|
1807
1852
|
web_job_id: str | None = None,
|
|
1808
1853
|
verbose: bool = False,
|
|
1809
1854
|
debug: bool = False,
|
|
1855
|
+
changed_only: bool = False,
|
|
1810
1856
|
) -> None:
|
|
1811
|
-
|
|
1857
|
+
# Initialize console and pkg_path first
|
|
1858
|
+
from acb.console import Console
|
|
1859
|
+
|
|
1860
|
+
self.console = depends.get_sync(Console)
|
|
1812
1861
|
self.pkg_path = pkg_path or Path.cwd()
|
|
1813
1862
|
self.dry_run = dry_run
|
|
1814
1863
|
self.web_job_id = web_job_id
|
|
1815
1864
|
self.verbose = verbose
|
|
1816
1865
|
self.debug = debug
|
|
1866
|
+
self.changed_only = changed_only
|
|
1817
1867
|
|
|
1868
|
+
# Import protocols for retrieving dependencies via ACB
|
|
1818
1869
|
from crackerjack.models.protocols import (
|
|
1819
1870
|
ConfigMergeServiceProtocol,
|
|
1820
1871
|
FileSystemInterface,
|
|
@@ -1824,41 +1875,207 @@ class WorkflowOrchestrator:
|
|
|
1824
1875
|
TestManagerProtocol,
|
|
1825
1876
|
)
|
|
1826
1877
|
|
|
1878
|
+
# Setup services with ACB DI
|
|
1879
|
+
self._setup_acb_services()
|
|
1880
|
+
|
|
1827
1881
|
self._initialize_logging()
|
|
1828
1882
|
|
|
1829
|
-
self.logger =
|
|
1883
|
+
self.logger = depends.get_sync(LoggerProtocol)
|
|
1830
1884
|
|
|
1831
|
-
|
|
1885
|
+
# Create coordinators - dependencies retrieved via ACB's depends.get_sync()
|
|
1886
|
+
self.session = SessionCoordinator(self.console, self.pkg_path, self.web_job_id)
|
|
1832
1887
|
|
|
1833
|
-
|
|
1834
|
-
|
|
1835
|
-
pkg_path=self.pkg_path,
|
|
1836
|
-
dry_run=self.dry_run,
|
|
1837
|
-
verbose=self.verbose,
|
|
1838
|
-
)
|
|
1888
|
+
# Register SessionCoordinator in DI for WorkflowPipeline injection
|
|
1889
|
+
depends.set(SessionCoordinator, self.session)
|
|
1839
1890
|
|
|
1840
|
-
self.session = SessionCoordinator(self.console, self.pkg_path, self.web_job_id)
|
|
1841
1891
|
self.phases = PhaseCoordinator(
|
|
1842
1892
|
console=self.console,
|
|
1843
1893
|
pkg_path=self.pkg_path,
|
|
1844
1894
|
session=self.session,
|
|
1845
|
-
filesystem=
|
|
1846
|
-
git_service=
|
|
1847
|
-
hook_manager=
|
|
1848
|
-
test_manager=
|
|
1849
|
-
publish_manager=
|
|
1850
|
-
config_merge_service=
|
|
1895
|
+
filesystem=depends.get_sync(FileSystemInterface),
|
|
1896
|
+
git_service=depends.get_sync(GitInterface),
|
|
1897
|
+
hook_manager=depends.get_sync(HookManager),
|
|
1898
|
+
test_manager=depends.get_sync(TestManagerProtocol),
|
|
1899
|
+
publish_manager=depends.get_sync(PublishManager),
|
|
1900
|
+
config_merge_service=depends.get_sync(ConfigMergeServiceProtocol),
|
|
1901
|
+
)
|
|
1902
|
+
|
|
1903
|
+
# Register PhaseCoordinator in DI for WorkflowPipeline injection
|
|
1904
|
+
depends.set(PhaseCoordinator, self.phases)
|
|
1905
|
+
|
|
1906
|
+
# WorkflowPipeline uses @depends.inject, so all parameters are auto-injected
|
|
1907
|
+
self.pipeline = WorkflowPipeline()
|
|
1908
|
+
|
|
1909
|
+
def _setup_acb_services(self) -> None:
|
|
1910
|
+
"""Setup all services using ACB dependency injection."""
|
|
1911
|
+
self._register_filesystem_and_git_services()
|
|
1912
|
+
self._register_manager_services()
|
|
1913
|
+
self._register_core_services()
|
|
1914
|
+
self._register_quality_services()
|
|
1915
|
+
self._register_monitoring_services()
|
|
1916
|
+
self._setup_event_system()
|
|
1917
|
+
|
|
1918
|
+
def _register_filesystem_and_git_services(self) -> None:
|
|
1919
|
+
"""Register filesystem and git services."""
|
|
1920
|
+
from acb.depends import depends
|
|
1921
|
+
|
|
1922
|
+
from crackerjack.models.protocols import (
|
|
1923
|
+
FileSystemInterface,
|
|
1924
|
+
GitInterface,
|
|
1925
|
+
GitServiceProtocol,
|
|
1851
1926
|
)
|
|
1927
|
+
from crackerjack.services.enhanced_filesystem import EnhancedFileSystemService
|
|
1928
|
+
from crackerjack.services.git import GitService
|
|
1852
1929
|
|
|
1853
|
-
|
|
1930
|
+
filesystem = EnhancedFileSystemService()
|
|
1931
|
+
depends.set(FileSystemInterface, filesystem)
|
|
1932
|
+
|
|
1933
|
+
git_service = GitService(self.pkg_path)
|
|
1934
|
+
depends.set(GitInterface, git_service)
|
|
1935
|
+
depends.set(GitServiceProtocol, git_service)
|
|
1936
|
+
|
|
1937
|
+
def _register_manager_services(self) -> None:
|
|
1938
|
+
"""Register hook, test, and publish managers."""
|
|
1939
|
+
from acb.depends import depends
|
|
1940
|
+
|
|
1941
|
+
from crackerjack.managers.hook_manager import HookManagerImpl
|
|
1942
|
+
from crackerjack.managers.publish_manager import PublishManagerImpl
|
|
1943
|
+
from crackerjack.managers.test_manager import TestManager
|
|
1944
|
+
from crackerjack.models.protocols import (
|
|
1945
|
+
HookManager,
|
|
1946
|
+
PublishManager,
|
|
1947
|
+
TestManagerProtocol,
|
|
1948
|
+
)
|
|
1949
|
+
|
|
1950
|
+
hook_manager = HookManagerImpl(
|
|
1951
|
+
self.pkg_path,
|
|
1952
|
+
verbose=self.verbose,
|
|
1953
|
+
debug=self.debug,
|
|
1954
|
+
use_incremental=self.changed_only,
|
|
1955
|
+
)
|
|
1956
|
+
depends.set(HookManager, hook_manager)
|
|
1957
|
+
|
|
1958
|
+
test_manager = TestManager()
|
|
1959
|
+
depends.set(TestManagerProtocol, test_manager)
|
|
1960
|
+
|
|
1961
|
+
publish_manager = PublishManagerImpl()
|
|
1962
|
+
depends.set(PublishManager, publish_manager)
|
|
1963
|
+
|
|
1964
|
+
def _register_core_services(self) -> None:
|
|
1965
|
+
"""Register core configuration and security services."""
|
|
1966
|
+
from acb.depends import depends
|
|
1967
|
+
|
|
1968
|
+
from crackerjack.executors.hook_lock_manager import HookLockManager
|
|
1969
|
+
from crackerjack.models.protocols import (
|
|
1970
|
+
ConfigIntegrityServiceProtocol,
|
|
1971
|
+
ConfigMergeServiceProtocol,
|
|
1972
|
+
EnhancedFileSystemServiceProtocol,
|
|
1973
|
+
HookLockManagerProtocol,
|
|
1974
|
+
SecurityServiceProtocol,
|
|
1975
|
+
SmartSchedulingServiceProtocol,
|
|
1976
|
+
UnifiedConfigurationServiceProtocol,
|
|
1977
|
+
)
|
|
1978
|
+
from crackerjack.services.cache import CrackerjackCache
|
|
1979
|
+
from crackerjack.services.config_integrity import ConfigIntegrityService
|
|
1980
|
+
from crackerjack.services.config_merge import ConfigMergeService
|
|
1981
|
+
from crackerjack.services.enhanced_filesystem import EnhancedFileSystemService
|
|
1982
|
+
from crackerjack.services.security import SecurityService
|
|
1983
|
+
from crackerjack.services.smart_scheduling import SmartSchedulingService
|
|
1984
|
+
from crackerjack.services.unified_config import UnifiedConfigurationService
|
|
1985
|
+
|
|
1986
|
+
depends.set(
|
|
1987
|
+
UnifiedConfigurationServiceProtocol,
|
|
1988
|
+
UnifiedConfigurationService(pkg_path=self.pkg_path),
|
|
1989
|
+
)
|
|
1990
|
+
depends.set(
|
|
1991
|
+
ConfigIntegrityServiceProtocol,
|
|
1992
|
+
ConfigIntegrityService(project_path=self.pkg_path),
|
|
1993
|
+
)
|
|
1994
|
+
depends.set(ConfigMergeServiceProtocol, ConfigMergeService())
|
|
1995
|
+
depends.set(
|
|
1996
|
+
SmartSchedulingServiceProtocol,
|
|
1997
|
+
SmartSchedulingService(project_path=self.pkg_path),
|
|
1998
|
+
)
|
|
1999
|
+
depends.set(EnhancedFileSystemServiceProtocol, EnhancedFileSystemService())
|
|
2000
|
+
depends.set(SecurityServiceProtocol, SecurityService())
|
|
2001
|
+
depends.set(HookLockManagerProtocol, HookLockManager())
|
|
2002
|
+
depends.set(CrackerjackCache, CrackerjackCache())
|
|
2003
|
+
|
|
2004
|
+
def _register_quality_services(self) -> None:
|
|
2005
|
+
"""Register coverage, version analysis, and code quality services."""
|
|
2006
|
+
from acb.depends import depends
|
|
2007
|
+
|
|
2008
|
+
from crackerjack.models.protocols import (
|
|
2009
|
+
ChangelogGeneratorProtocol,
|
|
2010
|
+
CoverageBadgeServiceProtocol,
|
|
2011
|
+
CoverageRatchetProtocol,
|
|
2012
|
+
GitInterface,
|
|
2013
|
+
RegexPatternsProtocol,
|
|
2014
|
+
VersionAnalyzerProtocol,
|
|
2015
|
+
)
|
|
2016
|
+
from crackerjack.services.changelog_automation import ChangelogGenerator
|
|
2017
|
+
from crackerjack.services.coverage_badge_service import CoverageBadgeService
|
|
2018
|
+
from crackerjack.services.coverage_ratchet import CoverageRatchetService
|
|
2019
|
+
from crackerjack.services.regex_patterns import RegexPatternsService
|
|
2020
|
+
from crackerjack.services.version_analyzer import VersionAnalyzer
|
|
2021
|
+
|
|
2022
|
+
coverage_ratchet = CoverageRatchetService(self.pkg_path)
|
|
2023
|
+
depends.set(CoverageRatchetProtocol, coverage_ratchet)
|
|
2024
|
+
|
|
2025
|
+
coverage_badge = CoverageBadgeService(project_root=self.pkg_path)
|
|
2026
|
+
depends.set(CoverageBadgeServiceProtocol, coverage_badge)
|
|
2027
|
+
|
|
2028
|
+
git_service = depends.get_sync(GitInterface)
|
|
2029
|
+
version_analyzer = VersionAnalyzer(git_service=git_service)
|
|
2030
|
+
depends.set(VersionAnalyzerProtocol, version_analyzer)
|
|
2031
|
+
|
|
2032
|
+
changelog_generator = ChangelogGenerator()
|
|
2033
|
+
depends.set(ChangelogGeneratorProtocol, changelog_generator)
|
|
2034
|
+
|
|
2035
|
+
regex_patterns = RegexPatternsService()
|
|
2036
|
+
depends.set(RegexPatternsProtocol, regex_patterns)
|
|
2037
|
+
|
|
2038
|
+
def _register_monitoring_services(self) -> None:
|
|
2039
|
+
"""Register performance monitoring and benchmarking services."""
|
|
2040
|
+
from acb.depends import depends
|
|
2041
|
+
from acb.logger import Logger
|
|
2042
|
+
|
|
2043
|
+
from crackerjack.models.protocols import PerformanceBenchmarkProtocol
|
|
2044
|
+
from crackerjack.services.monitoring.performance_benchmarks import (
|
|
2045
|
+
PerformanceBenchmarkService,
|
|
2046
|
+
)
|
|
2047
|
+
|
|
2048
|
+
performance_benchmarks = PerformanceBenchmarkService(
|
|
1854
2049
|
console=self.console,
|
|
2050
|
+
logger=depends.get_sync(Logger),
|
|
1855
2051
|
pkg_path=self.pkg_path,
|
|
1856
|
-
session=self.session,
|
|
1857
|
-
phases=self.phases,
|
|
1858
2052
|
)
|
|
2053
|
+
depends.set(PerformanceBenchmarkProtocol, performance_benchmarks)
|
|
2054
|
+
|
|
2055
|
+
def _setup_event_system(self) -> None:
|
|
2056
|
+
"""Setup event bus and telemetry."""
|
|
2057
|
+
from acb.depends import depends
|
|
2058
|
+
|
|
2059
|
+
from crackerjack.events import (
|
|
2060
|
+
WorkflowEventBus,
|
|
2061
|
+
WorkflowEventTelemetry,
|
|
2062
|
+
register_default_subscribers,
|
|
2063
|
+
)
|
|
2064
|
+
|
|
2065
|
+
default_state_dir = Path.home() / ".crackerjack" / "state"
|
|
2066
|
+
default_state_dir.mkdir(parents=True, exist_ok=True)
|
|
2067
|
+
|
|
2068
|
+
event_bus = WorkflowEventBus()
|
|
2069
|
+
telemetry_state_file = default_state_dir / "workflow_events.json"
|
|
2070
|
+
telemetry = WorkflowEventTelemetry(state_file=telemetry_state_file)
|
|
2071
|
+
register_default_subscribers(event_bus, telemetry)
|
|
2072
|
+
|
|
2073
|
+
depends.set(WorkflowEventBus, event_bus)
|
|
2074
|
+
depends.set(WorkflowEventTelemetry, telemetry)
|
|
1859
2075
|
|
|
1860
2076
|
def _initialize_logging(self) -> None:
|
|
1861
2077
|
from crackerjack.services.log_manager import get_log_manager
|
|
2078
|
+
from crackerjack.services.logging import setup_structured_logging
|
|
1862
2079
|
|
|
1863
2080
|
log_manager = get_log_manager()
|
|
1864
2081
|
session_id = getattr(self, "web_job_id", None) or str(int(time.time()))[:8]
|
|
@@ -1869,7 +2086,7 @@ class WorkflowOrchestrator:
|
|
|
1869
2086
|
level=log_level, json_output=False, log_file=debug_log_file
|
|
1870
2087
|
)
|
|
1871
2088
|
|
|
1872
|
-
temp_logger =
|
|
2089
|
+
temp_logger = depends.get_sync(LoggerProtocol)
|
|
1873
2090
|
temp_logger.debug(
|
|
1874
2091
|
"Structured logging initialized",
|
|
1875
2092
|
log_file=str(debug_log_file),
|
|
@@ -1891,33 +2108,102 @@ class WorkflowOrchestrator:
|
|
|
1891
2108
|
self.session.fail_task(task_id, error)
|
|
1892
2109
|
|
|
1893
2110
|
def run_cleaning_phase(self, options: OptionsProtocol) -> bool:
|
|
1894
|
-
|
|
2111
|
+
result: bool = self.phases.run_cleaning_phase(options) # type: ignore[arg-type,assignment]
|
|
2112
|
+
return result
|
|
1895
2113
|
|
|
1896
2114
|
def run_fast_hooks_only(self, options: OptionsProtocol) -> bool:
|
|
1897
|
-
|
|
2115
|
+
result: bool = self.phases.run_fast_hooks_only(options) # type: ignore[arg-type,assignment]
|
|
2116
|
+
return result
|
|
1898
2117
|
|
|
1899
2118
|
def run_comprehensive_hooks_only(self, options: OptionsProtocol) -> bool:
|
|
1900
|
-
|
|
2119
|
+
result: bool = self.phases.run_comprehensive_hooks_only(options) # type: ignore[arg-type,assignment]
|
|
2120
|
+
return result
|
|
1901
2121
|
|
|
1902
2122
|
def run_hooks_phase(self, options: OptionsProtocol) -> bool:
|
|
1903
|
-
|
|
2123
|
+
result: bool = self.phases.run_hooks_phase(options) # type: ignore[arg-type,assignment]
|
|
2124
|
+
return result
|
|
1904
2125
|
|
|
1905
2126
|
def run_testing_phase(self, options: OptionsProtocol) -> bool:
|
|
1906
|
-
|
|
2127
|
+
result: bool = self.phases.run_testing_phase(options) # type: ignore[arg-type,assignment]
|
|
2128
|
+
return result
|
|
1907
2129
|
|
|
1908
2130
|
def run_publishing_phase(self, options: OptionsProtocol) -> bool:
|
|
1909
|
-
|
|
2131
|
+
result: bool = self.phases.run_publishing_phase(options) # type: ignore[arg-type,assignment]
|
|
2132
|
+
return result
|
|
1910
2133
|
|
|
1911
2134
|
def run_commit_phase(self, options: OptionsProtocol) -> bool:
|
|
1912
|
-
|
|
2135
|
+
result: bool = self.phases.run_commit_phase(options) # type: ignore[arg-type,assignment]
|
|
2136
|
+
return result
|
|
1913
2137
|
|
|
1914
2138
|
def run_configuration_phase(self, options: OptionsProtocol) -> bool:
|
|
1915
|
-
|
|
2139
|
+
result: bool = self.phases.run_configuration_phase(options) # type: ignore[arg-type,assignment]
|
|
2140
|
+
return result
|
|
1916
2141
|
|
|
1917
2142
|
async def run_complete_workflow(self, options: OptionsProtocol) -> bool:
|
|
1918
2143
|
result: bool = await self.pipeline.run_complete_workflow(options)
|
|
2144
|
+
# Ensure we properly clean up any pending tasks before finishing
|
|
2145
|
+
await self._cleanup_pending_tasks()
|
|
1919
2146
|
return result
|
|
1920
2147
|
|
|
2148
|
+
async def _cleanup_pending_tasks(self) -> None:
|
|
2149
|
+
"""Clean up any remaining asyncio tasks before event loop closes."""
|
|
2150
|
+
# First call the pipeline cleanup methods if they exist
|
|
2151
|
+
await self._cleanup_pipeline_executors()
|
|
2152
|
+
|
|
2153
|
+
# Then handle general asyncio task cleanup
|
|
2154
|
+
await self._cleanup_remaining_tasks()
|
|
2155
|
+
|
|
2156
|
+
async def _cleanup_pipeline_executors(self) -> None:
|
|
2157
|
+
"""Clean up specific pipeline executors."""
|
|
2158
|
+
with suppress(Exception):
|
|
2159
|
+
# Try to call specific async cleanup methods on executors/pipeline if they exist
|
|
2160
|
+
if hasattr(self, "pipeline") and hasattr(self.pipeline, "phases"):
|
|
2161
|
+
await self._cleanup_executor_if_exists(
|
|
2162
|
+
self.pipeline.phases, "_parallel_executor"
|
|
2163
|
+
)
|
|
2164
|
+
await self._cleanup_executor_if_exists(
|
|
2165
|
+
self.pipeline.phases, "_async_executor"
|
|
2166
|
+
)
|
|
2167
|
+
|
|
2168
|
+
async def _cleanup_executor_if_exists(
|
|
2169
|
+
self, phases_obj: t.Any, executor_attr: str
|
|
2170
|
+
) -> None:
|
|
2171
|
+
"""Clean up an executor if it exists and has the required cleanup method."""
|
|
2172
|
+
if hasattr(phases_obj, executor_attr):
|
|
2173
|
+
executor = getattr(phases_obj, executor_attr)
|
|
2174
|
+
if hasattr(executor, "async_cleanup"):
|
|
2175
|
+
await executor.async_cleanup()
|
|
2176
|
+
|
|
2177
|
+
async def _cleanup_remaining_tasks(self) -> None:
|
|
2178
|
+
"""Clean up any remaining asyncio tasks."""
|
|
2179
|
+
with suppress(RuntimeError):
|
|
2180
|
+
loop = asyncio.get_running_loop()
|
|
2181
|
+
# Get all pending tasks
|
|
2182
|
+
pending_tasks = [
|
|
2183
|
+
task for task in asyncio.all_tasks(loop) if not task.done()
|
|
2184
|
+
]
|
|
2185
|
+
await self._cancel_pending_tasks(pending_tasks)
|
|
2186
|
+
|
|
2187
|
+
async def _cancel_pending_tasks(self, pending_tasks: list) -> None:
|
|
2188
|
+
"""Cancel pending tasks with proper error handling."""
|
|
2189
|
+
for task in pending_tasks:
|
|
2190
|
+
if not task.done():
|
|
2191
|
+
try:
|
|
2192
|
+
task.cancel()
|
|
2193
|
+
# Wait a short time for cancellation to complete
|
|
2194
|
+
await asyncio.wait_for(task, timeout=0.1)
|
|
2195
|
+
except (TimeoutError, asyncio.CancelledError):
|
|
2196
|
+
# Task was cancelled or couldn't finish in time, continue
|
|
2197
|
+
pass
|
|
2198
|
+
except RuntimeError as e:
|
|
2199
|
+
# Catch the specific error when event loop is closed during task cancellation
|
|
2200
|
+
if "Event loop is closed" in str(e):
|
|
2201
|
+
# Event loop was closed while trying to cancel tasks, just return
|
|
2202
|
+
return
|
|
2203
|
+
else:
|
|
2204
|
+
# Re-raise other RuntimeErrors
|
|
2205
|
+
raise
|
|
2206
|
+
|
|
1921
2207
|
def run_complete_workflow_sync(self, options: OptionsProtocol) -> bool:
|
|
1922
2208
|
"""Sync wrapper for run_complete_workflow."""
|
|
1923
2209
|
return asyncio.run(self.run_complete_workflow(options))
|
|
@@ -1933,7 +2219,7 @@ class WorkflowOrchestrator:
|
|
|
1933
2219
|
|
|
1934
2220
|
def _get_version(self) -> str:
|
|
1935
2221
|
try:
|
|
1936
|
-
return version()
|
|
2222
|
+
return version("crackerjack")
|
|
1937
2223
|
except Exception:
|
|
1938
2224
|
return "unknown"
|
|
1939
2225
|
|
|
@@ -1942,8 +2228,16 @@ class WorkflowOrchestrator:
|
|
|
1942
2228
|
|
|
1943
2229
|
try:
|
|
1944
2230
|
result = await self.run_complete_workflow(options)
|
|
1945
|
-
self.
|
|
1946
|
-
return result
|
|
2231
|
+
return self._finalize_session_with_result(result)
|
|
1947
2232
|
except Exception:
|
|
1948
|
-
self.
|
|
1949
|
-
|
|
2233
|
+
return self._finalize_session_on_exception()
|
|
2234
|
+
|
|
2235
|
+
def _finalize_session_with_result(self, result: bool) -> bool:
|
|
2236
|
+
"""Finalize session with the workflow result."""
|
|
2237
|
+
self.session.end_session(success=result)
|
|
2238
|
+
return result
|
|
2239
|
+
|
|
2240
|
+
def _finalize_session_on_exception(self) -> bool:
|
|
2241
|
+
"""Finalize session when an exception occurs."""
|
|
2242
|
+
self.session.end_session(success=False)
|
|
2243
|
+
return False
|