crackerjack 0.18.2__py3-none-any.whl → 0.45.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- crackerjack/README.md +19 -0
- crackerjack/__init__.py +96 -2
- crackerjack/__main__.py +637 -138
- crackerjack/adapters/README.md +18 -0
- crackerjack/adapters/__init__.py +39 -0
- crackerjack/adapters/_output_paths.py +167 -0
- crackerjack/adapters/_qa_adapter_base.py +309 -0
- crackerjack/adapters/_tool_adapter_base.py +706 -0
- crackerjack/adapters/ai/README.md +65 -0
- crackerjack/adapters/ai/__init__.py +5 -0
- crackerjack/adapters/ai/claude.py +853 -0
- crackerjack/adapters/complexity/README.md +53 -0
- crackerjack/adapters/complexity/__init__.py +10 -0
- crackerjack/adapters/complexity/complexipy.py +641 -0
- crackerjack/adapters/dependency/__init__.py +22 -0
- crackerjack/adapters/dependency/pip_audit.py +418 -0
- crackerjack/adapters/format/README.md +72 -0
- crackerjack/adapters/format/__init__.py +11 -0
- crackerjack/adapters/format/mdformat.py +313 -0
- crackerjack/adapters/format/ruff.py +516 -0
- crackerjack/adapters/lint/README.md +47 -0
- crackerjack/adapters/lint/__init__.py +11 -0
- crackerjack/adapters/lint/codespell.py +273 -0
- crackerjack/adapters/lsp/README.md +49 -0
- crackerjack/adapters/lsp/__init__.py +27 -0
- crackerjack/adapters/lsp/_base.py +194 -0
- crackerjack/adapters/lsp/_client.py +358 -0
- crackerjack/adapters/lsp/_manager.py +193 -0
- crackerjack/adapters/lsp/skylos.py +283 -0
- crackerjack/adapters/lsp/zuban.py +557 -0
- crackerjack/adapters/refactor/README.md +59 -0
- crackerjack/adapters/refactor/__init__.py +12 -0
- crackerjack/adapters/refactor/creosote.py +318 -0
- crackerjack/adapters/refactor/refurb.py +406 -0
- crackerjack/adapters/refactor/skylos.py +494 -0
- crackerjack/adapters/sast/README.md +132 -0
- crackerjack/adapters/sast/__init__.py +32 -0
- crackerjack/adapters/sast/_base.py +201 -0
- crackerjack/adapters/sast/bandit.py +423 -0
- crackerjack/adapters/sast/pyscn.py +405 -0
- crackerjack/adapters/sast/semgrep.py +241 -0
- crackerjack/adapters/security/README.md +111 -0
- crackerjack/adapters/security/__init__.py +17 -0
- crackerjack/adapters/security/gitleaks.py +339 -0
- crackerjack/adapters/type/README.md +52 -0
- crackerjack/adapters/type/__init__.py +12 -0
- crackerjack/adapters/type/pyrefly.py +402 -0
- crackerjack/adapters/type/ty.py +402 -0
- crackerjack/adapters/type/zuban.py +522 -0
- crackerjack/adapters/utility/README.md +51 -0
- crackerjack/adapters/utility/__init__.py +10 -0
- crackerjack/adapters/utility/checks.py +884 -0
- crackerjack/agents/README.md +264 -0
- crackerjack/agents/__init__.py +66 -0
- crackerjack/agents/architect_agent.py +238 -0
- crackerjack/agents/base.py +167 -0
- crackerjack/agents/claude_code_bridge.py +641 -0
- crackerjack/agents/coordinator.py +600 -0
- crackerjack/agents/documentation_agent.py +520 -0
- crackerjack/agents/dry_agent.py +585 -0
- crackerjack/agents/enhanced_coordinator.py +279 -0
- crackerjack/agents/enhanced_proactive_agent.py +185 -0
- crackerjack/agents/error_middleware.py +53 -0
- crackerjack/agents/formatting_agent.py +230 -0
- crackerjack/agents/helpers/__init__.py +9 -0
- crackerjack/agents/helpers/performance/__init__.py +22 -0
- crackerjack/agents/helpers/performance/performance_ast_analyzer.py +357 -0
- crackerjack/agents/helpers/performance/performance_pattern_detector.py +909 -0
- crackerjack/agents/helpers/performance/performance_recommender.py +572 -0
- crackerjack/agents/helpers/refactoring/__init__.py +22 -0
- crackerjack/agents/helpers/refactoring/code_transformer.py +536 -0
- crackerjack/agents/helpers/refactoring/complexity_analyzer.py +344 -0
- crackerjack/agents/helpers/refactoring/dead_code_detector.py +437 -0
- crackerjack/agents/helpers/test_creation/__init__.py +19 -0
- crackerjack/agents/helpers/test_creation/test_ast_analyzer.py +216 -0
- crackerjack/agents/helpers/test_creation/test_coverage_analyzer.py +643 -0
- crackerjack/agents/helpers/test_creation/test_template_generator.py +1031 -0
- crackerjack/agents/import_optimization_agent.py +1181 -0
- crackerjack/agents/performance_agent.py +325 -0
- crackerjack/agents/performance_helpers.py +205 -0
- crackerjack/agents/proactive_agent.py +55 -0
- crackerjack/agents/refactoring_agent.py +511 -0
- crackerjack/agents/refactoring_helpers.py +247 -0
- crackerjack/agents/security_agent.py +793 -0
- crackerjack/agents/semantic_agent.py +479 -0
- crackerjack/agents/semantic_helpers.py +356 -0
- crackerjack/agents/test_creation_agent.py +570 -0
- crackerjack/agents/test_specialist_agent.py +526 -0
- crackerjack/agents/tracker.py +110 -0
- crackerjack/api.py +647 -0
- crackerjack/cli/README.md +394 -0
- crackerjack/cli/__init__.py +24 -0
- crackerjack/cli/cache_handlers.py +209 -0
- crackerjack/cli/cache_handlers_enhanced.py +680 -0
- crackerjack/cli/facade.py +162 -0
- crackerjack/cli/formatting.py +13 -0
- crackerjack/cli/handlers/__init__.py +85 -0
- crackerjack/cli/handlers/advanced.py +103 -0
- crackerjack/cli/handlers/ai_features.py +62 -0
- crackerjack/cli/handlers/analytics.py +479 -0
- crackerjack/cli/handlers/changelog.py +271 -0
- crackerjack/cli/handlers/config_handlers.py +16 -0
- crackerjack/cli/handlers/coverage.py +84 -0
- crackerjack/cli/handlers/documentation.py +280 -0
- crackerjack/cli/handlers/main_handlers.py +497 -0
- crackerjack/cli/handlers/monitoring.py +371 -0
- crackerjack/cli/handlers.py +700 -0
- crackerjack/cli/interactive.py +488 -0
- crackerjack/cli/options.py +1216 -0
- crackerjack/cli/semantic_handlers.py +292 -0
- crackerjack/cli/utils.py +19 -0
- crackerjack/cli/version.py +19 -0
- crackerjack/code_cleaner.py +1307 -0
- crackerjack/config/README.md +472 -0
- crackerjack/config/__init__.py +275 -0
- crackerjack/config/global_lock_config.py +207 -0
- crackerjack/config/hooks.py +390 -0
- crackerjack/config/loader.py +239 -0
- crackerjack/config/settings.py +141 -0
- crackerjack/config/tool_commands.py +331 -0
- crackerjack/core/README.md +393 -0
- crackerjack/core/__init__.py +0 -0
- crackerjack/core/async_workflow_orchestrator.py +738 -0
- crackerjack/core/autofix_coordinator.py +282 -0
- crackerjack/core/container.py +105 -0
- crackerjack/core/enhanced_container.py +583 -0
- crackerjack/core/file_lifecycle.py +472 -0
- crackerjack/core/performance.py +244 -0
- crackerjack/core/performance_monitor.py +357 -0
- crackerjack/core/phase_coordinator.py +1227 -0
- crackerjack/core/proactive_workflow.py +267 -0
- crackerjack/core/resource_manager.py +425 -0
- crackerjack/core/retry.py +275 -0
- crackerjack/core/service_watchdog.py +601 -0
- crackerjack/core/session_coordinator.py +239 -0
- crackerjack/core/timeout_manager.py +563 -0
- crackerjack/core/websocket_lifecycle.py +410 -0
- crackerjack/core/workflow/__init__.py +21 -0
- crackerjack/core/workflow/workflow_ai_coordinator.py +863 -0
- crackerjack/core/workflow/workflow_event_orchestrator.py +1107 -0
- crackerjack/core/workflow/workflow_issue_parser.py +714 -0
- crackerjack/core/workflow/workflow_phase_executor.py +1158 -0
- crackerjack/core/workflow/workflow_security_gates.py +400 -0
- crackerjack/core/workflow_orchestrator.py +2243 -0
- crackerjack/data/README.md +11 -0
- crackerjack/data/__init__.py +8 -0
- crackerjack/data/models.py +79 -0
- crackerjack/data/repository.py +210 -0
- crackerjack/decorators/README.md +180 -0
- crackerjack/decorators/__init__.py +35 -0
- crackerjack/decorators/error_handling.py +649 -0
- crackerjack/decorators/error_handling_decorators.py +334 -0
- crackerjack/decorators/helpers.py +58 -0
- crackerjack/decorators/patterns.py +281 -0
- crackerjack/decorators/utils.py +58 -0
- crackerjack/docs/INDEX.md +11 -0
- crackerjack/docs/README.md +11 -0
- crackerjack/docs/generated/api/API_REFERENCE.md +10895 -0
- crackerjack/docs/generated/api/CLI_REFERENCE.md +109 -0
- crackerjack/docs/generated/api/CROSS_REFERENCES.md +1755 -0
- crackerjack/docs/generated/api/PROTOCOLS.md +3 -0
- crackerjack/docs/generated/api/SERVICES.md +1252 -0
- crackerjack/documentation/README.md +11 -0
- crackerjack/documentation/__init__.py +31 -0
- crackerjack/documentation/ai_templates.py +756 -0
- crackerjack/documentation/dual_output_generator.py +767 -0
- crackerjack/documentation/mkdocs_integration.py +518 -0
- crackerjack/documentation/reference_generator.py +1065 -0
- crackerjack/dynamic_config.py +678 -0
- crackerjack/errors.py +378 -0
- crackerjack/events/README.md +11 -0
- crackerjack/events/__init__.py +16 -0
- crackerjack/events/telemetry.py +175 -0
- crackerjack/events/workflow_bus.py +346 -0
- crackerjack/exceptions/README.md +301 -0
- crackerjack/exceptions/__init__.py +5 -0
- crackerjack/exceptions/config.py +4 -0
- crackerjack/exceptions/tool_execution_error.py +245 -0
- crackerjack/executors/README.md +591 -0
- crackerjack/executors/__init__.py +13 -0
- crackerjack/executors/async_hook_executor.py +938 -0
- crackerjack/executors/cached_hook_executor.py +316 -0
- crackerjack/executors/hook_executor.py +1295 -0
- crackerjack/executors/hook_lock_manager.py +708 -0
- crackerjack/executors/individual_hook_executor.py +739 -0
- crackerjack/executors/lsp_aware_hook_executor.py +349 -0
- crackerjack/executors/progress_hook_executor.py +282 -0
- crackerjack/executors/tool_proxy.py +433 -0
- crackerjack/hooks/README.md +485 -0
- crackerjack/hooks/lsp_hook.py +93 -0
- crackerjack/intelligence/README.md +557 -0
- crackerjack/intelligence/__init__.py +37 -0
- crackerjack/intelligence/adaptive_learning.py +693 -0
- crackerjack/intelligence/agent_orchestrator.py +485 -0
- crackerjack/intelligence/agent_registry.py +377 -0
- crackerjack/intelligence/agent_selector.py +439 -0
- crackerjack/intelligence/integration.py +250 -0
- crackerjack/interactive.py +719 -0
- crackerjack/managers/README.md +369 -0
- crackerjack/managers/__init__.py +11 -0
- crackerjack/managers/async_hook_manager.py +135 -0
- crackerjack/managers/hook_manager.py +585 -0
- crackerjack/managers/publish_manager.py +631 -0
- crackerjack/managers/test_command_builder.py +391 -0
- crackerjack/managers/test_executor.py +474 -0
- crackerjack/managers/test_manager.py +1357 -0
- crackerjack/managers/test_progress.py +187 -0
- crackerjack/mcp/README.md +374 -0
- crackerjack/mcp/__init__.py +0 -0
- crackerjack/mcp/cache.py +352 -0
- crackerjack/mcp/client_runner.py +121 -0
- crackerjack/mcp/context.py +802 -0
- crackerjack/mcp/dashboard.py +657 -0
- crackerjack/mcp/enhanced_progress_monitor.py +493 -0
- crackerjack/mcp/file_monitor.py +394 -0
- crackerjack/mcp/progress_components.py +607 -0
- crackerjack/mcp/progress_monitor.py +1016 -0
- crackerjack/mcp/rate_limiter.py +336 -0
- crackerjack/mcp/server.py +24 -0
- crackerjack/mcp/server_core.py +526 -0
- crackerjack/mcp/service_watchdog.py +505 -0
- crackerjack/mcp/state.py +407 -0
- crackerjack/mcp/task_manager.py +259 -0
- crackerjack/mcp/tools/README.md +27 -0
- crackerjack/mcp/tools/__init__.py +19 -0
- crackerjack/mcp/tools/core_tools.py +469 -0
- crackerjack/mcp/tools/error_analyzer.py +283 -0
- crackerjack/mcp/tools/execution_tools.py +384 -0
- crackerjack/mcp/tools/intelligence_tool_registry.py +46 -0
- crackerjack/mcp/tools/intelligence_tools.py +264 -0
- crackerjack/mcp/tools/monitoring_tools.py +628 -0
- crackerjack/mcp/tools/proactive_tools.py +367 -0
- crackerjack/mcp/tools/progress_tools.py +222 -0
- crackerjack/mcp/tools/semantic_tools.py +584 -0
- crackerjack/mcp/tools/utility_tools.py +358 -0
- crackerjack/mcp/tools/workflow_executor.py +699 -0
- crackerjack/mcp/websocket/README.md +31 -0
- crackerjack/mcp/websocket/__init__.py +14 -0
- crackerjack/mcp/websocket/app.py +54 -0
- crackerjack/mcp/websocket/endpoints.py +492 -0
- crackerjack/mcp/websocket/event_bridge.py +188 -0
- crackerjack/mcp/websocket/jobs.py +406 -0
- crackerjack/mcp/websocket/monitoring/__init__.py +25 -0
- crackerjack/mcp/websocket/monitoring/api/__init__.py +19 -0
- crackerjack/mcp/websocket/monitoring/api/dependencies.py +141 -0
- crackerjack/mcp/websocket/monitoring/api/heatmap.py +154 -0
- crackerjack/mcp/websocket/monitoring/api/intelligence.py +199 -0
- crackerjack/mcp/websocket/monitoring/api/metrics.py +203 -0
- crackerjack/mcp/websocket/monitoring/api/telemetry.py +101 -0
- crackerjack/mcp/websocket/monitoring/dashboard.py +18 -0
- crackerjack/mcp/websocket/monitoring/factory.py +109 -0
- crackerjack/mcp/websocket/monitoring/filters.py +10 -0
- crackerjack/mcp/websocket/monitoring/metrics.py +64 -0
- crackerjack/mcp/websocket/monitoring/models.py +90 -0
- crackerjack/mcp/websocket/monitoring/utils.py +171 -0
- crackerjack/mcp/websocket/monitoring/websocket_manager.py +78 -0
- crackerjack/mcp/websocket/monitoring/websockets/__init__.py +17 -0
- crackerjack/mcp/websocket/monitoring/websockets/dependencies.py +126 -0
- crackerjack/mcp/websocket/monitoring/websockets/heatmap.py +176 -0
- crackerjack/mcp/websocket/monitoring/websockets/intelligence.py +291 -0
- crackerjack/mcp/websocket/monitoring/websockets/metrics.py +291 -0
- crackerjack/mcp/websocket/monitoring_endpoints.py +21 -0
- crackerjack/mcp/websocket/server.py +174 -0
- crackerjack/mcp/websocket/websocket_handler.py +276 -0
- crackerjack/mcp/websocket_server.py +10 -0
- crackerjack/models/README.md +308 -0
- crackerjack/models/__init__.py +40 -0
- crackerjack/models/config.py +730 -0
- crackerjack/models/config_adapter.py +265 -0
- crackerjack/models/protocols.py +1535 -0
- crackerjack/models/pydantic_models.py +320 -0
- crackerjack/models/qa_config.py +145 -0
- crackerjack/models/qa_results.py +134 -0
- crackerjack/models/resource_protocols.py +299 -0
- crackerjack/models/results.py +35 -0
- crackerjack/models/semantic_models.py +258 -0
- crackerjack/models/task.py +173 -0
- crackerjack/models/test_models.py +60 -0
- crackerjack/monitoring/README.md +11 -0
- crackerjack/monitoring/__init__.py +0 -0
- crackerjack/monitoring/ai_agent_watchdog.py +405 -0
- crackerjack/monitoring/metrics_collector.py +427 -0
- crackerjack/monitoring/regression_prevention.py +580 -0
- crackerjack/monitoring/websocket_server.py +406 -0
- crackerjack/orchestration/README.md +340 -0
- crackerjack/orchestration/__init__.py +43 -0
- crackerjack/orchestration/advanced_orchestrator.py +894 -0
- crackerjack/orchestration/cache/README.md +312 -0
- crackerjack/orchestration/cache/__init__.py +37 -0
- crackerjack/orchestration/cache/memory_cache.py +338 -0
- crackerjack/orchestration/cache/tool_proxy_cache.py +340 -0
- crackerjack/orchestration/config.py +297 -0
- crackerjack/orchestration/coverage_improvement.py +180 -0
- crackerjack/orchestration/execution_strategies.py +361 -0
- crackerjack/orchestration/hook_orchestrator.py +1398 -0
- crackerjack/orchestration/strategies/README.md +401 -0
- crackerjack/orchestration/strategies/__init__.py +39 -0
- crackerjack/orchestration/strategies/adaptive_strategy.py +630 -0
- crackerjack/orchestration/strategies/parallel_strategy.py +237 -0
- crackerjack/orchestration/strategies/sequential_strategy.py +299 -0
- crackerjack/orchestration/test_progress_streamer.py +647 -0
- crackerjack/plugins/README.md +11 -0
- crackerjack/plugins/__init__.py +15 -0
- crackerjack/plugins/base.py +200 -0
- crackerjack/plugins/hooks.py +254 -0
- crackerjack/plugins/loader.py +335 -0
- crackerjack/plugins/managers.py +264 -0
- crackerjack/py313.py +191 -0
- crackerjack/security/README.md +11 -0
- crackerjack/security/__init__.py +0 -0
- crackerjack/security/audit.py +197 -0
- crackerjack/services/README.md +374 -0
- crackerjack/services/__init__.py +9 -0
- crackerjack/services/ai/README.md +295 -0
- crackerjack/services/ai/__init__.py +7 -0
- crackerjack/services/ai/advanced_optimizer.py +878 -0
- crackerjack/services/ai/contextual_ai_assistant.py +542 -0
- crackerjack/services/ai/embeddings.py +444 -0
- crackerjack/services/ai/intelligent_commit.py +328 -0
- crackerjack/services/ai/predictive_analytics.py +510 -0
- crackerjack/services/anomaly_detector.py +392 -0
- crackerjack/services/api_extractor.py +617 -0
- crackerjack/services/backup_service.py +467 -0
- crackerjack/services/bounded_status_operations.py +530 -0
- crackerjack/services/cache.py +369 -0
- crackerjack/services/changelog_automation.py +399 -0
- crackerjack/services/command_execution_service.py +305 -0
- crackerjack/services/config_integrity.py +132 -0
- crackerjack/services/config_merge.py +546 -0
- crackerjack/services/config_service.py +198 -0
- crackerjack/services/config_template.py +493 -0
- crackerjack/services/coverage_badge_service.py +173 -0
- crackerjack/services/coverage_ratchet.py +381 -0
- crackerjack/services/debug.py +733 -0
- crackerjack/services/dependency_analyzer.py +460 -0
- crackerjack/services/dependency_monitor.py +622 -0
- crackerjack/services/documentation_generator.py +493 -0
- crackerjack/services/documentation_service.py +704 -0
- crackerjack/services/enhanced_filesystem.py +497 -0
- crackerjack/services/enterprise_optimizer.py +865 -0
- crackerjack/services/error_pattern_analyzer.py +676 -0
- crackerjack/services/file_filter.py +221 -0
- crackerjack/services/file_hasher.py +149 -0
- crackerjack/services/file_io_service.py +361 -0
- crackerjack/services/file_modifier.py +615 -0
- crackerjack/services/filesystem.py +381 -0
- crackerjack/services/git.py +422 -0
- crackerjack/services/health_metrics.py +615 -0
- crackerjack/services/heatmap_generator.py +744 -0
- crackerjack/services/incremental_executor.py +380 -0
- crackerjack/services/initialization.py +823 -0
- crackerjack/services/input_validator.py +668 -0
- crackerjack/services/intelligent_commit.py +327 -0
- crackerjack/services/log_manager.py +289 -0
- crackerjack/services/logging.py +228 -0
- crackerjack/services/lsp_client.py +628 -0
- crackerjack/services/memory_optimizer.py +414 -0
- crackerjack/services/metrics.py +587 -0
- crackerjack/services/monitoring/README.md +30 -0
- crackerjack/services/monitoring/__init__.py +9 -0
- crackerjack/services/monitoring/dependency_monitor.py +678 -0
- crackerjack/services/monitoring/error_pattern_analyzer.py +676 -0
- crackerjack/services/monitoring/health_metrics.py +716 -0
- crackerjack/services/monitoring/metrics.py +587 -0
- crackerjack/services/monitoring/performance_benchmarks.py +410 -0
- crackerjack/services/monitoring/performance_cache.py +388 -0
- crackerjack/services/monitoring/performance_monitor.py +569 -0
- crackerjack/services/parallel_executor.py +527 -0
- crackerjack/services/pattern_cache.py +333 -0
- crackerjack/services/pattern_detector.py +478 -0
- crackerjack/services/patterns/__init__.py +142 -0
- crackerjack/services/patterns/agents.py +107 -0
- crackerjack/services/patterns/code/__init__.py +15 -0
- crackerjack/services/patterns/code/detection.py +118 -0
- crackerjack/services/patterns/code/imports.py +107 -0
- crackerjack/services/patterns/code/paths.py +159 -0
- crackerjack/services/patterns/code/performance.py +119 -0
- crackerjack/services/patterns/code/replacement.py +36 -0
- crackerjack/services/patterns/core.py +212 -0
- crackerjack/services/patterns/documentation/__init__.py +14 -0
- crackerjack/services/patterns/documentation/badges_markdown.py +96 -0
- crackerjack/services/patterns/documentation/comments_blocks.py +83 -0
- crackerjack/services/patterns/documentation/docstrings.py +89 -0
- crackerjack/services/patterns/formatting.py +226 -0
- crackerjack/services/patterns/operations.py +339 -0
- crackerjack/services/patterns/security/__init__.py +23 -0
- crackerjack/services/patterns/security/code_injection.py +122 -0
- crackerjack/services/patterns/security/credentials.py +190 -0
- crackerjack/services/patterns/security/path_traversal.py +221 -0
- crackerjack/services/patterns/security/unsafe_operations.py +216 -0
- crackerjack/services/patterns/templates.py +62 -0
- crackerjack/services/patterns/testing/__init__.py +18 -0
- crackerjack/services/patterns/testing/error_patterns.py +107 -0
- crackerjack/services/patterns/testing/pytest_output.py +126 -0
- crackerjack/services/patterns/tool_output/__init__.py +16 -0
- crackerjack/services/patterns/tool_output/bandit.py +72 -0
- crackerjack/services/patterns/tool_output/other.py +97 -0
- crackerjack/services/patterns/tool_output/pyright.py +67 -0
- crackerjack/services/patterns/tool_output/ruff.py +44 -0
- crackerjack/services/patterns/url_sanitization.py +114 -0
- crackerjack/services/patterns/utilities.py +42 -0
- crackerjack/services/patterns/utils.py +339 -0
- crackerjack/services/patterns/validation.py +46 -0
- crackerjack/services/patterns/versioning.py +62 -0
- crackerjack/services/predictive_analytics.py +523 -0
- crackerjack/services/profiler.py +280 -0
- crackerjack/services/quality/README.md +415 -0
- crackerjack/services/quality/__init__.py +11 -0
- crackerjack/services/quality/anomaly_detector.py +392 -0
- crackerjack/services/quality/pattern_cache.py +333 -0
- crackerjack/services/quality/pattern_detector.py +479 -0
- crackerjack/services/quality/qa_orchestrator.py +491 -0
- crackerjack/services/quality/quality_baseline.py +395 -0
- crackerjack/services/quality/quality_baseline_enhanced.py +649 -0
- crackerjack/services/quality/quality_intelligence.py +949 -0
- crackerjack/services/regex_patterns.py +58 -0
- crackerjack/services/regex_utils.py +483 -0
- crackerjack/services/secure_path_utils.py +524 -0
- crackerjack/services/secure_status_formatter.py +450 -0
- crackerjack/services/secure_subprocess.py +635 -0
- crackerjack/services/security.py +239 -0
- crackerjack/services/security_logger.py +495 -0
- crackerjack/services/server_manager.py +411 -0
- crackerjack/services/smart_scheduling.py +167 -0
- crackerjack/services/status_authentication.py +460 -0
- crackerjack/services/status_security_manager.py +315 -0
- crackerjack/services/terminal_utils.py +0 -0
- crackerjack/services/thread_safe_status_collector.py +441 -0
- crackerjack/services/tool_filter.py +368 -0
- crackerjack/services/tool_version_service.py +43 -0
- crackerjack/services/unified_config.py +115 -0
- crackerjack/services/validation_rate_limiter.py +220 -0
- crackerjack/services/vector_store.py +689 -0
- crackerjack/services/version_analyzer.py +461 -0
- crackerjack/services/version_checker.py +223 -0
- crackerjack/services/websocket_resource_limiter.py +438 -0
- crackerjack/services/zuban_lsp_service.py +391 -0
- crackerjack/slash_commands/README.md +11 -0
- crackerjack/slash_commands/__init__.py +59 -0
- crackerjack/slash_commands/init.md +112 -0
- crackerjack/slash_commands/run.md +197 -0
- crackerjack/slash_commands/status.md +127 -0
- crackerjack/tools/README.md +11 -0
- crackerjack/tools/__init__.py +30 -0
- crackerjack/tools/_git_utils.py +105 -0
- crackerjack/tools/check_added_large_files.py +139 -0
- crackerjack/tools/check_ast.py +105 -0
- crackerjack/tools/check_json.py +103 -0
- crackerjack/tools/check_jsonschema.py +297 -0
- crackerjack/tools/check_toml.py +103 -0
- crackerjack/tools/check_yaml.py +110 -0
- crackerjack/tools/codespell_wrapper.py +72 -0
- crackerjack/tools/end_of_file_fixer.py +202 -0
- crackerjack/tools/format_json.py +128 -0
- crackerjack/tools/mdformat_wrapper.py +114 -0
- crackerjack/tools/trailing_whitespace.py +198 -0
- crackerjack/tools/validate_input_validator_patterns.py +236 -0
- crackerjack/tools/validate_regex_patterns.py +188 -0
- crackerjack/ui/README.md +11 -0
- crackerjack/ui/__init__.py +1 -0
- crackerjack/ui/dashboard_renderer.py +28 -0
- crackerjack/ui/templates/README.md +11 -0
- crackerjack/utils/console_utils.py +13 -0
- crackerjack/utils/dependency_guard.py +230 -0
- crackerjack/utils/retry_utils.py +275 -0
- crackerjack/workflows/README.md +590 -0
- crackerjack/workflows/__init__.py +46 -0
- crackerjack/workflows/actions.py +811 -0
- crackerjack/workflows/auto_fix.py +444 -0
- crackerjack/workflows/container_builder.py +499 -0
- crackerjack/workflows/definitions.py +443 -0
- crackerjack/workflows/engine.py +177 -0
- crackerjack/workflows/event_bridge.py +242 -0
- crackerjack-0.45.2.dist-info/METADATA +1678 -0
- crackerjack-0.45.2.dist-info/RECORD +478 -0
- {crackerjack-0.18.2.dist-info → crackerjack-0.45.2.dist-info}/WHEEL +1 -1
- crackerjack-0.45.2.dist-info/entry_points.txt +2 -0
- crackerjack/.gitignore +0 -14
- crackerjack/.libcst.codemod.yaml +0 -18
- crackerjack/.pdm.toml +0 -1
- crackerjack/.pre-commit-config.yaml +0 -91
- crackerjack/.pytest_cache/.gitignore +0 -2
- crackerjack/.pytest_cache/CACHEDIR.TAG +0 -4
- crackerjack/.pytest_cache/README.md +0 -8
- crackerjack/.pytest_cache/v/cache/nodeids +0 -1
- crackerjack/.pytest_cache/v/cache/stepwise +0 -1
- crackerjack/.ruff_cache/.gitignore +0 -1
- crackerjack/.ruff_cache/0.1.11/3256171999636029978 +0 -0
- crackerjack/.ruff_cache/0.1.14/602324811142551221 +0 -0
- crackerjack/.ruff_cache/0.1.4/10355199064880463147 +0 -0
- crackerjack/.ruff_cache/0.1.6/15140459877605758699 +0 -0
- crackerjack/.ruff_cache/0.1.7/1790508110482614856 +0 -0
- crackerjack/.ruff_cache/0.1.9/17041001205004563469 +0 -0
- crackerjack/.ruff_cache/0.11.2/4070660268492669020 +0 -0
- crackerjack/.ruff_cache/0.11.3/9818742842212983150 +0 -0
- crackerjack/.ruff_cache/0.11.4/9818742842212983150 +0 -0
- crackerjack/.ruff_cache/0.11.6/3557596832929915217 +0 -0
- crackerjack/.ruff_cache/0.11.7/10386934055395314831 +0 -0
- crackerjack/.ruff_cache/0.11.7/3557596832929915217 +0 -0
- crackerjack/.ruff_cache/0.11.8/530407680854991027 +0 -0
- crackerjack/.ruff_cache/0.2.0/10047773857155985907 +0 -0
- crackerjack/.ruff_cache/0.2.1/8522267973936635051 +0 -0
- crackerjack/.ruff_cache/0.2.2/18053836298936336950 +0 -0
- crackerjack/.ruff_cache/0.3.0/12548816621480535786 +0 -0
- crackerjack/.ruff_cache/0.3.3/11081883392474770722 +0 -0
- crackerjack/.ruff_cache/0.3.4/676973378459347183 +0 -0
- crackerjack/.ruff_cache/0.3.5/16311176246009842383 +0 -0
- crackerjack/.ruff_cache/0.5.7/1493622539551733492 +0 -0
- crackerjack/.ruff_cache/0.5.7/6231957614044513175 +0 -0
- crackerjack/.ruff_cache/0.5.7/9932762556785938009 +0 -0
- crackerjack/.ruff_cache/0.6.0/11982804814124138945 +0 -0
- crackerjack/.ruff_cache/0.6.0/12055761203849489982 +0 -0
- crackerjack/.ruff_cache/0.6.2/1206147804896221174 +0 -0
- crackerjack/.ruff_cache/0.6.4/1206147804896221174 +0 -0
- crackerjack/.ruff_cache/0.6.5/1206147804896221174 +0 -0
- crackerjack/.ruff_cache/0.6.7/3657366982708166874 +0 -0
- crackerjack/.ruff_cache/0.6.9/285614542852677309 +0 -0
- crackerjack/.ruff_cache/0.7.1/1024065805990144819 +0 -0
- crackerjack/.ruff_cache/0.7.1/285614542852677309 +0 -0
- crackerjack/.ruff_cache/0.7.3/16061516852537040135 +0 -0
- crackerjack/.ruff_cache/0.8.4/16354268377385700367 +0 -0
- crackerjack/.ruff_cache/0.9.10/12813592349865671909 +0 -0
- crackerjack/.ruff_cache/0.9.10/923908772239632759 +0 -0
- crackerjack/.ruff_cache/0.9.3/13948373885254993391 +0 -0
- crackerjack/.ruff_cache/0.9.9/12813592349865671909 +0 -0
- crackerjack/.ruff_cache/0.9.9/8843823720003377982 +0 -0
- crackerjack/.ruff_cache/CACHEDIR.TAG +0 -1
- crackerjack/crackerjack.py +0 -855
- crackerjack/pyproject.toml +0 -214
- crackerjack-0.18.2.dist-info/METADATA +0 -420
- crackerjack-0.18.2.dist-info/RECORD +0 -59
- crackerjack-0.18.2.dist-info/entry_points.txt +0 -4
- {crackerjack-0.18.2.dist-info → crackerjack-0.45.2.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,1357 @@
|
|
|
1
|
+
import re
|
|
2
|
+
import subprocess
|
|
3
|
+
import time
|
|
4
|
+
import typing as t
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
from acb.config import root_path
|
|
8
|
+
from acb.console import Console
|
|
9
|
+
from acb.depends import Inject, depends
|
|
10
|
+
from rich import box
|
|
11
|
+
from rich.panel import Panel
|
|
12
|
+
from rich.table import Table
|
|
13
|
+
from rich.text import Text
|
|
14
|
+
|
|
15
|
+
from crackerjack.config import get_console_width
|
|
16
|
+
from crackerjack.models.protocols import (
|
|
17
|
+
CoverageBadgeServiceProtocol,
|
|
18
|
+
CoverageRatchetProtocol,
|
|
19
|
+
OptionsProtocol,
|
|
20
|
+
)
|
|
21
|
+
from crackerjack.models.test_models import TestFailure
|
|
22
|
+
from crackerjack.services.lsp_client import LSPClient
|
|
23
|
+
|
|
24
|
+
from .test_command_builder import TestCommandBuilder
|
|
25
|
+
from .test_executor import TestExecutor
|
|
26
|
+
|
|
27
|
+
ANSI_ESCAPE_RE = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class TestManager:
|
|
31
|
+
@depends.inject
|
|
32
|
+
def __init__(
|
|
33
|
+
self,
|
|
34
|
+
console: Inject[Console],
|
|
35
|
+
coverage_ratchet: Inject[CoverageRatchetProtocol],
|
|
36
|
+
coverage_badge: Inject[CoverageBadgeServiceProtocol],
|
|
37
|
+
command_builder: Inject[TestCommandBuilder],
|
|
38
|
+
lsp_client: Inject[LSPClient] | None = None,
|
|
39
|
+
) -> None:
|
|
40
|
+
self.console = console
|
|
41
|
+
# Ensure a concrete pathlib.Path instance to avoid async Path behaviors
|
|
42
|
+
# and to guarantee sync filesystem operations in this manager.
|
|
43
|
+
try:
|
|
44
|
+
self.pkg_path = Path(str(root_path))
|
|
45
|
+
except Exception:
|
|
46
|
+
# Fallback in the unlikely event root_path lacks __str__
|
|
47
|
+
self.pkg_path = Path(root_path)
|
|
48
|
+
|
|
49
|
+
# Ensure downstream components receive a concrete pathlib.Path
|
|
50
|
+
self.executor = TestExecutor(console, self.pkg_path)
|
|
51
|
+
self.command_builder = command_builder
|
|
52
|
+
|
|
53
|
+
# Services injected via ACB DI
|
|
54
|
+
self.coverage_ratchet = coverage_ratchet
|
|
55
|
+
self._coverage_badge_service = coverage_badge
|
|
56
|
+
self._lsp_client = lsp_client
|
|
57
|
+
|
|
58
|
+
self._last_test_failures: list[str] = []
|
|
59
|
+
self._progress_callback: t.Callable[[dict[str, t.Any]], None] | None = None
|
|
60
|
+
self.coverage_ratchet_enabled = True
|
|
61
|
+
self.use_lsp_diagnostics = True
|
|
62
|
+
|
|
63
|
+
def set_progress_callback(
|
|
64
|
+
self,
|
|
65
|
+
callback: t.Callable[[dict[str, t.Any]], None] | None,
|
|
66
|
+
) -> None:
|
|
67
|
+
self._progress_callback = callback
|
|
68
|
+
|
|
69
|
+
def set_coverage_ratchet_enabled(self, enabled: bool) -> None:
|
|
70
|
+
self.coverage_ratchet_enabled = enabled
|
|
71
|
+
if enabled:
|
|
72
|
+
self.console.print(
|
|
73
|
+
"[cyan]📊[/cyan] Coverage ratchet enabled-targeting 100 % coverage"
|
|
74
|
+
)
|
|
75
|
+
else:
|
|
76
|
+
self.console.print("[yellow]⚠️[/yellow] Coverage ratchet disabled")
|
|
77
|
+
|
|
78
|
+
def run_tests(self, options: OptionsProtocol) -> bool:
|
|
79
|
+
# Early return if tests are disabled
|
|
80
|
+
if hasattr(options, "test") and not options.test:
|
|
81
|
+
return True
|
|
82
|
+
|
|
83
|
+
start_time = time.time()
|
|
84
|
+
|
|
85
|
+
try:
|
|
86
|
+
result = self._execute_test_workflow(options)
|
|
87
|
+
duration = time.time() - start_time
|
|
88
|
+
|
|
89
|
+
# Get worker count for statistics panel (don't print info messages)
|
|
90
|
+
workers = self.command_builder.get_optimal_workers(
|
|
91
|
+
options, print_info=False
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
if result.returncode == 0:
|
|
95
|
+
return self._handle_test_success(
|
|
96
|
+
result.stdout, duration, options, workers
|
|
97
|
+
)
|
|
98
|
+
else:
|
|
99
|
+
return self._handle_test_failure(
|
|
100
|
+
result.stderr if result else "",
|
|
101
|
+
result.stdout if result else "",
|
|
102
|
+
duration,
|
|
103
|
+
options,
|
|
104
|
+
workers,
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
except Exception as e:
|
|
108
|
+
return self._handle_test_error(start_time, e)
|
|
109
|
+
|
|
110
|
+
def run_specific_tests(self, test_pattern: str) -> bool:
|
|
111
|
+
self.console.print(f"[cyan]🧪[/cyan] Running tests matching: {test_pattern}")
|
|
112
|
+
|
|
113
|
+
cmd = self.command_builder.build_specific_test_command(test_pattern)
|
|
114
|
+
result = self.executor.execute_with_progress(cmd)
|
|
115
|
+
|
|
116
|
+
success = result.returncode == 0
|
|
117
|
+
if success:
|
|
118
|
+
self.console.print("[green]✅[/green] Specific tests passed")
|
|
119
|
+
else:
|
|
120
|
+
self.console.print("[red]❌[/red] Some specific tests failed")
|
|
121
|
+
|
|
122
|
+
return success
|
|
123
|
+
|
|
124
|
+
def validate_test_environment(self) -> bool:
|
|
125
|
+
if not self.has_tests():
|
|
126
|
+
self.console.print("[yellow]⚠️[/yellow] No tests found")
|
|
127
|
+
return False
|
|
128
|
+
|
|
129
|
+
from rich.live import Live
|
|
130
|
+
from rich.spinner import Spinner
|
|
131
|
+
|
|
132
|
+
cmd = self.command_builder.build_validation_command()
|
|
133
|
+
|
|
134
|
+
spinner = Spinner("dots", text="[cyan]Validating test environment...[/cyan]")
|
|
135
|
+
with Live(spinner, console=self.console, transient=True):
|
|
136
|
+
result = subprocess.run(
|
|
137
|
+
cmd, cwd=self.pkg_path, capture_output=True, text=True
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
if result.returncode != 0:
|
|
141
|
+
self.console.print("[red]❌[/red] Test environment validation failed")
|
|
142
|
+
self.console.print(result.stderr)
|
|
143
|
+
return False
|
|
144
|
+
|
|
145
|
+
self.console.print("[green]✅[/green] Test environment validated")
|
|
146
|
+
return True
|
|
147
|
+
|
|
148
|
+
def get_coverage_ratchet_status(self) -> dict[str, t.Any]:
|
|
149
|
+
return self.coverage_ratchet.get_status_report()
|
|
150
|
+
|
|
151
|
+
def get_test_stats(self) -> dict[str, t.Any]:
|
|
152
|
+
return {
|
|
153
|
+
"has_tests": self.has_tests(),
|
|
154
|
+
"coverage_ratchet_enabled": self.coverage_ratchet_enabled,
|
|
155
|
+
"last_failures_count": len(self._last_test_failures),
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
def get_test_failures(self) -> list[str]:
|
|
159
|
+
return self._last_test_failures.copy()
|
|
160
|
+
|
|
161
|
+
def get_test_command(self, options: OptionsProtocol) -> list[str]:
|
|
162
|
+
return self.command_builder.build_command(options)
|
|
163
|
+
|
|
164
|
+
def get_coverage_report(self) -> str | None:
|
|
165
|
+
try:
|
|
166
|
+
return self.coverage_ratchet.get_coverage_report()
|
|
167
|
+
except Exception:
|
|
168
|
+
return None
|
|
169
|
+
|
|
170
|
+
def _get_coverage_from_file(self) -> float | None:
|
|
171
|
+
"""Extract coverage from coverage.json file."""
|
|
172
|
+
import json
|
|
173
|
+
|
|
174
|
+
coverage_json_path = self.pkg_path / "coverage.json"
|
|
175
|
+
if not coverage_json_path.exists():
|
|
176
|
+
return None
|
|
177
|
+
|
|
178
|
+
try:
|
|
179
|
+
with coverage_json_path.open() as f:
|
|
180
|
+
coverage_data = json.load(f)
|
|
181
|
+
|
|
182
|
+
# Extract coverage percentage from totals
|
|
183
|
+
totals = coverage_data.get("totals", {})
|
|
184
|
+
percent_covered = totals.get("percent_covered", None)
|
|
185
|
+
|
|
186
|
+
if percent_covered is not None:
|
|
187
|
+
return float(percent_covered)
|
|
188
|
+
|
|
189
|
+
# Alternative extraction methods for different coverage formats
|
|
190
|
+
if "percent_covered" in coverage_data:
|
|
191
|
+
return float(coverage_data["percent_covered"])
|
|
192
|
+
|
|
193
|
+
# Check for coverage in files section
|
|
194
|
+
files = coverage_data.get("files", {})
|
|
195
|
+
if files:
|
|
196
|
+
total_lines = 0
|
|
197
|
+
covered_lines = 0
|
|
198
|
+
for file_data in files.values():
|
|
199
|
+
summary = file_data.get("summary", {})
|
|
200
|
+
total_lines += summary.get("num_statements", 0)
|
|
201
|
+
covered_lines += summary.get("covered_lines", 0)
|
|
202
|
+
|
|
203
|
+
if total_lines > 0:
|
|
204
|
+
return (covered_lines / total_lines) * 100
|
|
205
|
+
|
|
206
|
+
return None
|
|
207
|
+
|
|
208
|
+
except (json.JSONDecodeError, ValueError, KeyError, TypeError):
|
|
209
|
+
return None
|
|
210
|
+
|
|
211
|
+
def _handle_no_ratchet_status(
|
|
212
|
+
self, direct_coverage: float | None
|
|
213
|
+
) -> dict[str, t.Any]:
|
|
214
|
+
"""Handle case when ratchet is not initialized."""
|
|
215
|
+
if direct_coverage is not None:
|
|
216
|
+
return {
|
|
217
|
+
"status": "coverage_available",
|
|
218
|
+
"coverage_percent": direct_coverage,
|
|
219
|
+
"message": "Coverage data available from coverage.json",
|
|
220
|
+
"source": "coverage.json",
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
return {
|
|
224
|
+
"status": "not_initialized",
|
|
225
|
+
"coverage_percent": 0.0,
|
|
226
|
+
"message": "Coverage ratchet not initialized",
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
def _get_final_coverage(
|
|
230
|
+
self, ratchet_coverage: float, direct_coverage: float | None
|
|
231
|
+
) -> float:
|
|
232
|
+
"""Determine final coverage value."""
|
|
233
|
+
return direct_coverage if direct_coverage is not None else ratchet_coverage
|
|
234
|
+
|
|
235
|
+
def get_coverage(self) -> dict[str, t.Any]:
|
|
236
|
+
try:
|
|
237
|
+
status = self.coverage_ratchet.get_status_report()
|
|
238
|
+
|
|
239
|
+
# Check if we have actual coverage data from coverage.json even if ratchet is not initialized
|
|
240
|
+
direct_coverage = self._get_coverage_from_file()
|
|
241
|
+
|
|
242
|
+
# If ratchet is not initialized but we have direct coverage data, use it
|
|
243
|
+
if (
|
|
244
|
+
not status or status.get("status") == "not_initialized"
|
|
245
|
+
) and direct_coverage is not None:
|
|
246
|
+
return self._handle_no_ratchet_status(direct_coverage)
|
|
247
|
+
|
|
248
|
+
# If ratchet is not initialized and no direct coverage, return not initialized
|
|
249
|
+
if not status or status.get("status") == "not_initialized":
|
|
250
|
+
return self._handle_no_ratchet_status(None)
|
|
251
|
+
|
|
252
|
+
# Use ratchet data, but prefer direct coverage if available and different
|
|
253
|
+
ratchet_coverage = status.get("current_coverage", 0.0)
|
|
254
|
+
final_coverage = self._get_final_coverage(ratchet_coverage, direct_coverage)
|
|
255
|
+
|
|
256
|
+
return {
|
|
257
|
+
"status": "active",
|
|
258
|
+
"coverage_percent": final_coverage,
|
|
259
|
+
"target_coverage": status.get("target_coverage", 100.0),
|
|
260
|
+
"next_milestone": status.get("next_milestone"),
|
|
261
|
+
"progress_percent": status.get("progress_percent", 0.0),
|
|
262
|
+
"last_updated": status.get("last_updated"),
|
|
263
|
+
"milestones_achieved": status.get("milestones_achieved", []),
|
|
264
|
+
"source": "coverage.json" if direct_coverage is not None else "ratchet",
|
|
265
|
+
}
|
|
266
|
+
except Exception as e:
|
|
267
|
+
return {
|
|
268
|
+
"status": "error",
|
|
269
|
+
"coverage_percent": 0.0,
|
|
270
|
+
"error": str(e),
|
|
271
|
+
"message": "Failed to get coverage information",
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
def has_tests(self) -> bool:
|
|
275
|
+
test_directories = ["tests", "test"]
|
|
276
|
+
test_files = ["test_*.py", "*_test.py"]
|
|
277
|
+
|
|
278
|
+
for test_dir in test_directories:
|
|
279
|
+
test_path = self.pkg_path / test_dir
|
|
280
|
+
if test_path.exists() and test_path.is_dir():
|
|
281
|
+
for test_file_pattern in test_files:
|
|
282
|
+
if list(test_path.glob(f"**/{test_file_pattern}")):
|
|
283
|
+
return True
|
|
284
|
+
|
|
285
|
+
for test_file_pattern in test_files:
|
|
286
|
+
if list(self.pkg_path.glob(test_file_pattern)):
|
|
287
|
+
return True
|
|
288
|
+
|
|
289
|
+
return False
|
|
290
|
+
|
|
291
|
+
def _execute_test_workflow(
|
|
292
|
+
self, options: OptionsProtocol
|
|
293
|
+
) -> subprocess.CompletedProcess[str]:
|
|
294
|
+
self._print_test_start_message(options)
|
|
295
|
+
|
|
296
|
+
cmd = self.command_builder.build_command(options)
|
|
297
|
+
|
|
298
|
+
if self._progress_callback:
|
|
299
|
+
return self.executor.execute_with_ai_progress(
|
|
300
|
+
cmd, self._progress_callback, self._get_timeout(options)
|
|
301
|
+
)
|
|
302
|
+
return self.executor.execute_with_progress(cmd, self._get_timeout(options))
|
|
303
|
+
|
|
304
|
+
def _print_test_start_message(self, options: OptionsProtocol) -> None:
|
|
305
|
+
workers = self.command_builder.get_optimal_workers(options, print_info=False)
|
|
306
|
+
timeout = self.command_builder.get_test_timeout(options)
|
|
307
|
+
|
|
308
|
+
self.console.print(
|
|
309
|
+
f"[cyan]🧪[/cyan] Running tests (workers: {workers}, timeout: {timeout}s)"
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
def _handle_test_success(
|
|
313
|
+
self,
|
|
314
|
+
output: str,
|
|
315
|
+
duration: float,
|
|
316
|
+
options: OptionsProtocol,
|
|
317
|
+
workers: int | str,
|
|
318
|
+
) -> bool:
|
|
319
|
+
self.console.print(f"[green]✅[/green] Tests passed in {duration: .1f}s")
|
|
320
|
+
|
|
321
|
+
# Parse and display test statistics panel
|
|
322
|
+
stats = self._parse_test_statistics(output)
|
|
323
|
+
if self._should_render_test_panel(stats):
|
|
324
|
+
self._render_test_results_panel(stats, workers, success=True)
|
|
325
|
+
|
|
326
|
+
if self.coverage_ratchet_enabled:
|
|
327
|
+
return self._process_coverage_ratchet()
|
|
328
|
+
|
|
329
|
+
return True
|
|
330
|
+
|
|
331
|
+
def _handle_test_failure(
|
|
332
|
+
self,
|
|
333
|
+
stderr: str,
|
|
334
|
+
stdout: str,
|
|
335
|
+
duration: float,
|
|
336
|
+
options: OptionsProtocol,
|
|
337
|
+
workers: int | str,
|
|
338
|
+
) -> bool:
|
|
339
|
+
self.console.print(f"[red]❌[/red] Tests failed in {duration:.1f}s")
|
|
340
|
+
|
|
341
|
+
# Parse and display test statistics panel (use stdout for stats)
|
|
342
|
+
combined_output = stdout + "\n" + stderr
|
|
343
|
+
clean_output = self._strip_ansi_codes(combined_output)
|
|
344
|
+
stats = self._parse_test_statistics(clean_output, already_clean=True)
|
|
345
|
+
if self._should_render_test_panel(stats):
|
|
346
|
+
self._render_test_results_panel(stats, workers, success=False)
|
|
347
|
+
|
|
348
|
+
# Always show key failure information, not just in verbose mode
|
|
349
|
+
if clean_output.strip():
|
|
350
|
+
# Extract and show essential failure details even in non-verbose mode
|
|
351
|
+
failure_lines = self._extract_failure_lines(clean_output)
|
|
352
|
+
if failure_lines:
|
|
353
|
+
self._last_test_failures = failure_lines
|
|
354
|
+
self._render_banner("Key Test Failures", line_style="red")
|
|
355
|
+
|
|
356
|
+
for failure in failure_lines:
|
|
357
|
+
self.console.print(f"[red]• {failure}[/red]")
|
|
358
|
+
else:
|
|
359
|
+
self._last_test_failures = []
|
|
360
|
+
|
|
361
|
+
# Enhanced error reporting in verbose mode
|
|
362
|
+
if options.verbose or getattr(options, "ai_debug", False):
|
|
363
|
+
self._render_banner(
|
|
364
|
+
"Full Test Output (Enhanced)",
|
|
365
|
+
line_style="red",
|
|
366
|
+
)
|
|
367
|
+
# Use Rich-formatted output instead of raw dump
|
|
368
|
+
self._render_formatted_output(clean_output, options, already_clean=True)
|
|
369
|
+
else:
|
|
370
|
+
# Show some information even when there's no output
|
|
371
|
+
border_line = "-" * getattr(options, "column_width", 70)
|
|
372
|
+
self.console.print("\n🧪 TESTS Failed test execution")
|
|
373
|
+
self.console.print(border_line)
|
|
374
|
+
|
|
375
|
+
self.console.print(
|
|
376
|
+
" [yellow]This may indicate a timeout or critical error[/yellow]"
|
|
377
|
+
)
|
|
378
|
+
self.console.print(
|
|
379
|
+
f" [yellow]Duration: {duration:.1f}s, Workers: {workers}[/yellow]"
|
|
380
|
+
)
|
|
381
|
+
if duration > 290: # Approaching 300s timeout
|
|
382
|
+
self.console.print(
|
|
383
|
+
" [yellow]⚠️ Execution time was very close to timeout, may have timed out[/yellow]"
|
|
384
|
+
)
|
|
385
|
+
self.console.print(
|
|
386
|
+
" [red]Workflow failed: Test workflow execution failed[/red]"
|
|
387
|
+
)
|
|
388
|
+
self.console.print(border_line)
|
|
389
|
+
self._last_test_failures = []
|
|
390
|
+
|
|
391
|
+
return False
|
|
392
|
+
|
|
393
|
+
def _handle_test_error(self, start_time: float, error: Exception) -> bool:
|
|
394
|
+
duration = time.time() - start_time
|
|
395
|
+
self.console.print(
|
|
396
|
+
f"[red]💥[/red] Test execution error after {duration: .1f}s: {error}"
|
|
397
|
+
)
|
|
398
|
+
return False
|
|
399
|
+
|
|
400
|
+
def _parse_test_statistics(
|
|
401
|
+
self, output: str, *, already_clean: bool = False
|
|
402
|
+
) -> dict[str, t.Any]:
|
|
403
|
+
"""Parse test statistics from pytest output.
|
|
404
|
+
|
|
405
|
+
Extracts metrics like passed, failed, skipped, errors, and duration
|
|
406
|
+
from pytest's summary line.
|
|
407
|
+
|
|
408
|
+
Args:
|
|
409
|
+
output: Raw pytest output text
|
|
410
|
+
|
|
411
|
+
Returns:
|
|
412
|
+
Dictionary containing test statistics
|
|
413
|
+
"""
|
|
414
|
+
clean_output = output if already_clean else self._strip_ansi_codes(output)
|
|
415
|
+
stats = {
|
|
416
|
+
"total": 0,
|
|
417
|
+
"passed": 0,
|
|
418
|
+
"failed": 0,
|
|
419
|
+
"skipped": 0,
|
|
420
|
+
"errors": 0,
|
|
421
|
+
"xfailed": 0,
|
|
422
|
+
"xpassed": 0,
|
|
423
|
+
"duration": 0.0,
|
|
424
|
+
"coverage": None,
|
|
425
|
+
}
|
|
426
|
+
|
|
427
|
+
try:
|
|
428
|
+
# Extract summary and duration
|
|
429
|
+
summary_match = self._extract_pytest_summary(clean_output)
|
|
430
|
+
if summary_match:
|
|
431
|
+
summary_text, duration = self._parse_summary_match(
|
|
432
|
+
summary_match, clean_output
|
|
433
|
+
)
|
|
434
|
+
stats["duration"] = duration
|
|
435
|
+
|
|
436
|
+
# Extract metrics from summary
|
|
437
|
+
self._extract_test_metrics(summary_text, stats)
|
|
438
|
+
|
|
439
|
+
# Calculate totals and fallback if summary missing
|
|
440
|
+
self._calculate_total_tests(stats, clean_output)
|
|
441
|
+
|
|
442
|
+
# Extract coverage if present
|
|
443
|
+
stats["coverage"] = self._extract_coverage_from_output(clean_output)
|
|
444
|
+
|
|
445
|
+
except (ValueError, AttributeError) as e:
|
|
446
|
+
self.console.print(f"[dim]⚠️ Failed to parse test statistics: {e}[/dim]")
|
|
447
|
+
|
|
448
|
+
return stats
|
|
449
|
+
|
|
450
|
+
def _extract_pytest_summary(self, output: str) -> re.Match[str] | None:
|
|
451
|
+
"""Extract pytest summary line match from output."""
|
|
452
|
+
summary_patterns = [
|
|
453
|
+
r"=+\s+(.+?)\s+in\s+([\d.]+)s?\s*=+", # "======= 5 passed in 1.23s ======="
|
|
454
|
+
r"(\d+\s+\w+)+\s+in\s+([\d.]+)s?", # "5 passed, 2 failed in 1.23s"
|
|
455
|
+
r"(\d+.*)in\s+([\d.]+)s?", # More flexible format
|
|
456
|
+
]
|
|
457
|
+
|
|
458
|
+
for pattern in summary_patterns:
|
|
459
|
+
match = re.search(pattern, output)
|
|
460
|
+
if match:
|
|
461
|
+
return match
|
|
462
|
+
return None
|
|
463
|
+
|
|
464
|
+
def _parse_summary_match(
|
|
465
|
+
self, match: re.Match[str], output: str
|
|
466
|
+
) -> tuple[str, float]:
|
|
467
|
+
"""Parse summary text and duration from regex match."""
|
|
468
|
+
if len(match.groups()) >= 2:
|
|
469
|
+
summary_text = match.group(1)
|
|
470
|
+
duration = float(match.group(2))
|
|
471
|
+
else:
|
|
472
|
+
# Pattern only captured duration
|
|
473
|
+
duration = (
|
|
474
|
+
float(match.group(1))
|
|
475
|
+
if match.group(1).replace(".", "").isdigit()
|
|
476
|
+
else 0.0
|
|
477
|
+
)
|
|
478
|
+
summary_text = output
|
|
479
|
+
|
|
480
|
+
return summary_text, duration
|
|
481
|
+
|
|
482
|
+
def _extract_test_metrics(self, summary_text: str, stats: dict[str, t.Any]) -> None:
|
|
483
|
+
"""Extract individual test metrics from summary text."""
|
|
484
|
+
for metric in ("passed", "failed", "skipped", "error", "xfailed", "xpassed"):
|
|
485
|
+
metric_pattern = rf"(\d+)\s+{metric}"
|
|
486
|
+
metric_match = re.search(metric_pattern, summary_text, re.IGNORECASE)
|
|
487
|
+
if metric_match:
|
|
488
|
+
count = int(metric_match.group(1))
|
|
489
|
+
key = "errors" if metric == "error" else metric
|
|
490
|
+
stats[key] = count
|
|
491
|
+
|
|
492
|
+
def _calculate_total_tests(self, stats: dict[str, t.Any], output: str) -> None:
|
|
493
|
+
"""Calculate total tests and apply fallback counting if needed."""
|
|
494
|
+
stats["total"] = sum(
|
|
495
|
+
[
|
|
496
|
+
stats["passed"],
|
|
497
|
+
stats["failed"],
|
|
498
|
+
stats["skipped"],
|
|
499
|
+
stats["errors"],
|
|
500
|
+
stats["xfailed"],
|
|
501
|
+
stats["xpassed"],
|
|
502
|
+
]
|
|
503
|
+
)
|
|
504
|
+
|
|
505
|
+
# Fallback: manually count from output if total is still 0
|
|
506
|
+
if stats["total"] == 0:
|
|
507
|
+
self._fallback_count_tests(output, stats)
|
|
508
|
+
|
|
509
|
+
def _fallback_count_tests(self, output: str, stats: dict[str, t.Any]) -> None:
|
|
510
|
+
"""Manually count test results from output when parsing fails."""
|
|
511
|
+
status_tokens = [
|
|
512
|
+
("passed", "PASSED"),
|
|
513
|
+
("failed", "FAILED"),
|
|
514
|
+
("skipped", "SKIPPED"),
|
|
515
|
+
("errors", "ERROR"),
|
|
516
|
+
("xfailed", "XFAIL"),
|
|
517
|
+
("xpassed", "XPASS"),
|
|
518
|
+
]
|
|
519
|
+
|
|
520
|
+
for raw_line in output.splitlines():
|
|
521
|
+
line = raw_line.strip()
|
|
522
|
+
if "::" not in line:
|
|
523
|
+
continue
|
|
524
|
+
|
|
525
|
+
line_upper = line.upper()
|
|
526
|
+
if line_upper.startswith(
|
|
527
|
+
("FAILED", "ERROR", "XPASS", "XFAIL", "SKIPPED", "PASSED")
|
|
528
|
+
):
|
|
529
|
+
continue
|
|
530
|
+
|
|
531
|
+
for key, token in status_tokens:
|
|
532
|
+
if token in line_upper:
|
|
533
|
+
stats[key] += 1
|
|
534
|
+
break
|
|
535
|
+
|
|
536
|
+
stats["total"] = sum(
|
|
537
|
+
[
|
|
538
|
+
stats["passed"],
|
|
539
|
+
stats["failed"],
|
|
540
|
+
stats["skipped"],
|
|
541
|
+
stats["errors"],
|
|
542
|
+
stats.get("xfailed", 0),
|
|
543
|
+
stats.get("xpassed", 0),
|
|
544
|
+
]
|
|
545
|
+
)
|
|
546
|
+
|
|
547
|
+
if stats["total"] == 0:
|
|
548
|
+
legacy_patterns = {
|
|
549
|
+
"passed": r"(?:\.|✓)\s*(?:PASSED|pass)",
|
|
550
|
+
"failed": r"(?:F|X|❌)\s*(?:FAILED|fail)",
|
|
551
|
+
"skipped": r"(?:s|S|.SKIPPED|skip)",
|
|
552
|
+
"errors": r"ERROR|E\s+",
|
|
553
|
+
}
|
|
554
|
+
for key, pattern in legacy_patterns.items():
|
|
555
|
+
stats[key] = len(re.findall(pattern, output, re.IGNORECASE))
|
|
556
|
+
|
|
557
|
+
stats["total"] = (
|
|
558
|
+
stats["passed"] + stats["failed"] + stats["skipped"] + stats["errors"]
|
|
559
|
+
)
|
|
560
|
+
|
|
561
|
+
def _extract_coverage_from_output(self, output: str) -> float | None:
|
|
562
|
+
"""Extract coverage percentage from pytest output."""
|
|
563
|
+
coverage_pattern = r"TOTAL\s+\d+\s+\d+\s+(\d+)%"
|
|
564
|
+
coverage_match = re.search(coverage_pattern, output)
|
|
565
|
+
if coverage_match:
|
|
566
|
+
return float(coverage_match.group(1))
|
|
567
|
+
return None
|
|
568
|
+
|
|
569
|
+
def _should_render_test_panel(self, stats: dict[str, t.Any]) -> bool:
|
|
570
|
+
"""Determine if the test results panel should be rendered."""
|
|
571
|
+
return any(
|
|
572
|
+
[
|
|
573
|
+
stats.get("total", 0) > 0,
|
|
574
|
+
stats.get("passed", 0) > 0,
|
|
575
|
+
stats.get("failed", 0) > 0,
|
|
576
|
+
stats.get("errors", 0) > 0,
|
|
577
|
+
stats.get("skipped", 0) > 0,
|
|
578
|
+
stats.get("xfailed", 0) > 0,
|
|
579
|
+
stats.get("xpassed", 0) > 0,
|
|
580
|
+
stats.get("duration", 0.0) > 0.0,
|
|
581
|
+
stats.get("coverage") is not None,
|
|
582
|
+
]
|
|
583
|
+
)
|
|
584
|
+
|
|
585
|
+
def _render_test_results_panel(
|
|
586
|
+
self,
|
|
587
|
+
stats: dict[str, t.Any],
|
|
588
|
+
workers: int | str,
|
|
589
|
+
success: bool,
|
|
590
|
+
) -> None:
|
|
591
|
+
"""Render test results panel with statistics similar to hook results.
|
|
592
|
+
|
|
593
|
+
Args:
|
|
594
|
+
stats: Dictionary of test statistics from _parse_test_statistics
|
|
595
|
+
workers: Number of workers used (or "auto")
|
|
596
|
+
success: Whether tests passed overall
|
|
597
|
+
"""
|
|
598
|
+
table = Table(box=box.SIMPLE, header_style="bold bright_white")
|
|
599
|
+
table.add_column("Metric", style="cyan", overflow="fold")
|
|
600
|
+
table.add_column("Count", justify="right", style="bright_white")
|
|
601
|
+
table.add_column("Percentage", justify="right", style="magenta")
|
|
602
|
+
|
|
603
|
+
total = stats["total"]
|
|
604
|
+
|
|
605
|
+
# Add rows for each metric
|
|
606
|
+
metrics = [
|
|
607
|
+
("✅ Passed", stats["passed"], "green"),
|
|
608
|
+
("❌ Failed", stats["failed"], "red"),
|
|
609
|
+
("⏭ Skipped", stats["skipped"], "yellow"),
|
|
610
|
+
("💥 Errors", stats["errors"], "red"),
|
|
611
|
+
]
|
|
612
|
+
|
|
613
|
+
# Only show xfailed/xpassed if they exist
|
|
614
|
+
if stats.get("xfailed", 0) > 0:
|
|
615
|
+
metrics.append(("⚠️ Expected Failures", stats["xfailed"], "yellow"))
|
|
616
|
+
if stats.get("xpassed", 0) > 0:
|
|
617
|
+
metrics.append(("✨ Unexpected Passes", stats["xpassed"], "green"))
|
|
618
|
+
|
|
619
|
+
for label, count, _ in metrics:
|
|
620
|
+
percentage = f"{(count / total * 100):.1f}%" if total > 0 else "0.0%"
|
|
621
|
+
table.add_row(label, str(count), percentage)
|
|
622
|
+
|
|
623
|
+
# Add separator and summary rows
|
|
624
|
+
table.add_row("─" * 20, "─" * 10, "─" * 15, style="dim")
|
|
625
|
+
table.add_row("📊 Total Tests", str(total), "100.0%", style="bold")
|
|
626
|
+
table.add_row(
|
|
627
|
+
"⏱ Duration",
|
|
628
|
+
f"{stats['duration']:.2f}s",
|
|
629
|
+
"",
|
|
630
|
+
style="bold magenta",
|
|
631
|
+
)
|
|
632
|
+
table.add_row(
|
|
633
|
+
"👥 Workers",
|
|
634
|
+
str(workers),
|
|
635
|
+
"",
|
|
636
|
+
style="bold cyan",
|
|
637
|
+
)
|
|
638
|
+
|
|
639
|
+
# Add coverage if available
|
|
640
|
+
if stats.get("coverage") is not None:
|
|
641
|
+
table.add_row(
|
|
642
|
+
"📈 Coverage",
|
|
643
|
+
f"{stats['coverage']:.1f}%",
|
|
644
|
+
"",
|
|
645
|
+
style="bold green",
|
|
646
|
+
)
|
|
647
|
+
|
|
648
|
+
# Create panel with appropriate styling
|
|
649
|
+
border_style = "green" if success else "red"
|
|
650
|
+
title_icon = "✅" if success else "❌"
|
|
651
|
+
title_text = "Test Results" if success else "Test Results (Failed)"
|
|
652
|
+
|
|
653
|
+
panel = Panel(
|
|
654
|
+
table,
|
|
655
|
+
title=f"[bold]{title_icon} {title_text}[/bold]",
|
|
656
|
+
border_style=border_style,
|
|
657
|
+
padding=(0, 1),
|
|
658
|
+
width=get_console_width(),
|
|
659
|
+
)
|
|
660
|
+
|
|
661
|
+
self.console.print(panel)
|
|
662
|
+
|
|
663
|
+
def _render_banner(
|
|
664
|
+
self,
|
|
665
|
+
title: str,
|
|
666
|
+
*,
|
|
667
|
+
line_style: str = "red",
|
|
668
|
+
title_style: str | None = None,
|
|
669
|
+
char: str = "━",
|
|
670
|
+
padding: bool = True,
|
|
671
|
+
) -> None:
|
|
672
|
+
"""Render a horizontal banner that respects configured console width."""
|
|
673
|
+
width = max(20, get_console_width())
|
|
674
|
+
line_text = Text(char * width, style=line_style)
|
|
675
|
+
resolved_title_style = title_style or ("bold " + line_style).strip()
|
|
676
|
+
title_text = Text(title, style=resolved_title_style)
|
|
677
|
+
|
|
678
|
+
if padding:
|
|
679
|
+
self.console.print()
|
|
680
|
+
|
|
681
|
+
self.console.print(line_text)
|
|
682
|
+
self.console.print(title_text)
|
|
683
|
+
self.console.print(line_text)
|
|
684
|
+
|
|
685
|
+
if padding:
|
|
686
|
+
self.console.print()
|
|
687
|
+
|
|
688
|
+
def _process_coverage_ratchet(self) -> bool:
|
|
689
|
+
if not self.coverage_ratchet_enabled:
|
|
690
|
+
return True
|
|
691
|
+
|
|
692
|
+
ratchet_result = self.coverage_ratchet.check_and_update_coverage()
|
|
693
|
+
|
|
694
|
+
# Update coverage badge if coverage information is available
|
|
695
|
+
self._update_coverage_badge(ratchet_result)
|
|
696
|
+
|
|
697
|
+
return self._handle_ratchet_result(ratchet_result)
|
|
698
|
+
|
|
699
|
+
def _attempt_coverage_extraction(self) -> float | None:
|
|
700
|
+
"""Attempt to extract coverage from various sources."""
|
|
701
|
+
# Primary: Try to extract from coverage.json
|
|
702
|
+
current_coverage = self._get_coverage_from_file()
|
|
703
|
+
if current_coverage is not None:
|
|
704
|
+
return current_coverage
|
|
705
|
+
|
|
706
|
+
return None
|
|
707
|
+
|
|
708
|
+
def _handle_coverage_extraction_result(
|
|
709
|
+
self, current_coverage: float | None
|
|
710
|
+
) -> float | None:
|
|
711
|
+
"""Handle the result of coverage extraction attempts."""
|
|
712
|
+
if current_coverage is not None:
|
|
713
|
+
self.console.print(
|
|
714
|
+
f"[dim]📊 Coverage extracted from coverage.json: {current_coverage:.2f}%[/dim]"
|
|
715
|
+
)
|
|
716
|
+
return current_coverage
|
|
717
|
+
|
|
718
|
+
def _try_service_coverage(self) -> float | None:
|
|
719
|
+
"""Try coverage service fallback.
|
|
720
|
+
|
|
721
|
+
Returns:
|
|
722
|
+
Coverage value if available, None otherwise
|
|
723
|
+
"""
|
|
724
|
+
try:
|
|
725
|
+
current_coverage = self.coverage_ratchet.get_baseline_coverage()
|
|
726
|
+
if current_coverage is not None and current_coverage > 0:
|
|
727
|
+
self.console.print(
|
|
728
|
+
f"[dim]📊 Coverage from service fallback: {current_coverage:.2f}%[/dim]"
|
|
729
|
+
)
|
|
730
|
+
return current_coverage
|
|
731
|
+
return None
|
|
732
|
+
except (AttributeError, Exception):
|
|
733
|
+
# Service method doesn't exist or failed, skip
|
|
734
|
+
return None
|
|
735
|
+
|
|
736
|
+
def _handle_zero_coverage_fallback(self, current_coverage: float | None) -> None:
|
|
737
|
+
"""Handle 0.0% fallback case when coverage.json exists."""
|
|
738
|
+
coverage_json_path = self.pkg_path / "coverage.json"
|
|
739
|
+
if current_coverage is None and coverage_json_path.exists():
|
|
740
|
+
self.console.print(
|
|
741
|
+
"[yellow]⚠️[/yellow] Skipping 0.0% fallback when coverage.json exists"
|
|
742
|
+
)
|
|
743
|
+
|
|
744
|
+
def _get_fallback_coverage(
|
|
745
|
+
self, ratchet_result: dict[str, t.Any], current_coverage: float | None
|
|
746
|
+
) -> float | None:
|
|
747
|
+
"""Get coverage from fallback sources."""
|
|
748
|
+
# Secondary: Try ratchet result if coverage.json failed
|
|
749
|
+
if current_coverage is None and ratchet_result:
|
|
750
|
+
# Try to extract from ratchet result
|
|
751
|
+
if "current_coverage" in ratchet_result:
|
|
752
|
+
current_coverage = ratchet_result["current_coverage"]
|
|
753
|
+
if current_coverage is not None and current_coverage > 0:
|
|
754
|
+
self.console.print(
|
|
755
|
+
f"[dim]📊 Coverage from ratchet result: {current_coverage:.2f}%[/dim]"
|
|
756
|
+
)
|
|
757
|
+
|
|
758
|
+
# Tertiary: Try coverage service, but only accept non-zero values
|
|
759
|
+
if current_coverage is None:
|
|
760
|
+
current_coverage = self._try_service_coverage()
|
|
761
|
+
if current_coverage is None:
|
|
762
|
+
self._handle_zero_coverage_fallback(current_coverage)
|
|
763
|
+
|
|
764
|
+
return current_coverage
|
|
765
|
+
|
|
766
|
+
def _update_coverage_badge(self, ratchet_result: dict[str, t.Any]) -> None:
|
|
767
|
+
"""Update coverage badge in README.md if coverage changed."""
|
|
768
|
+
try:
|
|
769
|
+
# Check if coverage files exist and inform user
|
|
770
|
+
coverage_json_path = self.pkg_path / "coverage.json"
|
|
771
|
+
ratchet_path = self.pkg_path / ".coverage-ratchet.json"
|
|
772
|
+
|
|
773
|
+
if not coverage_json_path.exists():
|
|
774
|
+
self.console.print(
|
|
775
|
+
"[yellow]ℹ️[/yellow] Coverage file doesn't exist yet, will be created after test run"
|
|
776
|
+
)
|
|
777
|
+
if not ratchet_path.exists():
|
|
778
|
+
self.console.print(
|
|
779
|
+
"[yellow]ℹ️[/yellow] Coverage ratchet file doesn't exist yet, initializing..."
|
|
780
|
+
)
|
|
781
|
+
|
|
782
|
+
# Get current coverage directly from coverage.json to ensure freshest data
|
|
783
|
+
current_coverage = self._attempt_coverage_extraction()
|
|
784
|
+
current_coverage = self._handle_coverage_extraction_result(current_coverage)
|
|
785
|
+
|
|
786
|
+
# Get fallback coverage if needed
|
|
787
|
+
current_coverage = self._get_fallback_coverage(
|
|
788
|
+
ratchet_result, current_coverage
|
|
789
|
+
)
|
|
790
|
+
|
|
791
|
+
# Only update badge if we have valid coverage data
|
|
792
|
+
if current_coverage is not None and current_coverage >= 0:
|
|
793
|
+
if self._coverage_badge_service.should_update_badge(current_coverage):
|
|
794
|
+
self._coverage_badge_service.update_readme_coverage_badge(
|
|
795
|
+
current_coverage
|
|
796
|
+
)
|
|
797
|
+
self.console.print(
|
|
798
|
+
f"[green]✅[/green] Badge updated to {current_coverage:.2f}%"
|
|
799
|
+
)
|
|
800
|
+
else:
|
|
801
|
+
self.console.print(
|
|
802
|
+
f"[dim]📊 Badge unchanged (current: {current_coverage:.2f}%)[/dim]"
|
|
803
|
+
)
|
|
804
|
+
else:
|
|
805
|
+
self.console.print(
|
|
806
|
+
"[yellow]⚠️[/yellow] No valid coverage data found for badge update"
|
|
807
|
+
)
|
|
808
|
+
|
|
809
|
+
except Exception as e:
|
|
810
|
+
# Don't fail the test process if badge update fails
|
|
811
|
+
self.console.print(f"[yellow]⚠️[/yellow] Badge update failed: {e}")
|
|
812
|
+
|
|
813
|
+
def _handle_ratchet_result(self, ratchet_result: dict[str, t.Any]) -> bool:
|
|
814
|
+
if ratchet_result.get("success", False):
|
|
815
|
+
if ratchet_result.get("improved", False):
|
|
816
|
+
self._handle_coverage_improvement(ratchet_result)
|
|
817
|
+
return True
|
|
818
|
+
else:
|
|
819
|
+
if "message" in ratchet_result:
|
|
820
|
+
self.console.print(f"[red]📉[/red] {ratchet_result['message']}")
|
|
821
|
+
else:
|
|
822
|
+
current = ratchet_result.get("current_coverage", 0)
|
|
823
|
+
previous = ratchet_result.get("previous_coverage", 0)
|
|
824
|
+
self.console.print(
|
|
825
|
+
f"[red]📉[/red] Coverage regression: "
|
|
826
|
+
f"{current: .2f}% < {previous: .2f}%"
|
|
827
|
+
)
|
|
828
|
+
return False
|
|
829
|
+
|
|
830
|
+
def _handle_coverage_improvement(self, ratchet_result: dict[str, t.Any]) -> None:
|
|
831
|
+
improvement = ratchet_result.get("improvement", 0)
|
|
832
|
+
current = ratchet_result.get("current_coverage", 0)
|
|
833
|
+
|
|
834
|
+
self.console.print(
|
|
835
|
+
f"[green]📈[/green] Coverage improved by {improvement: .2f}% "
|
|
836
|
+
f"to {current: .2f}%"
|
|
837
|
+
)
|
|
838
|
+
|
|
839
|
+
def _extract_failure_lines(self, output: str) -> list[str]:
|
|
840
|
+
failures = []
|
|
841
|
+
lines = output.split("\n")
|
|
842
|
+
|
|
843
|
+
for line in lines:
|
|
844
|
+
if any(
|
|
845
|
+
keyword in line for keyword in ("FAILED", "ERROR", "AssertionError")
|
|
846
|
+
):
|
|
847
|
+
failures.append(line.strip())
|
|
848
|
+
|
|
849
|
+
return failures[:10]
|
|
850
|
+
|
|
851
|
+
@staticmethod
|
|
852
|
+
def _strip_ansi_codes(text: str) -> str:
|
|
853
|
+
"""Remove ANSI escape sequences from a string."""
|
|
854
|
+
return ANSI_ESCAPE_RE.sub("", text)
|
|
855
|
+
|
|
856
|
+
def _split_output_sections(self, output: str) -> list[tuple[str, str]]:
|
|
857
|
+
"""Split pytest output into logical sections for rendering.
|
|
858
|
+
|
|
859
|
+
Sections:
|
|
860
|
+
- header: Session start, test collection
|
|
861
|
+
- failure: Individual test failures with tracebacks
|
|
862
|
+
- summary: Short test summary info
|
|
863
|
+
- footer: Coverage, timing, final stats
|
|
864
|
+
|
|
865
|
+
Returns:
|
|
866
|
+
List of (section_type, section_content) tuples
|
|
867
|
+
"""
|
|
868
|
+
sections = []
|
|
869
|
+
lines = output.split("\n")
|
|
870
|
+
|
|
871
|
+
current_section: list[str] = []
|
|
872
|
+
current_type = "header"
|
|
873
|
+
|
|
874
|
+
for line in lines:
|
|
875
|
+
# Detect section boundaries
|
|
876
|
+
if "short test summary" in line.lower():
|
|
877
|
+
# Save previous section
|
|
878
|
+
if current_section:
|
|
879
|
+
sections.append((current_type, "\n".join(current_section)))
|
|
880
|
+
current_section = [line]
|
|
881
|
+
current_type = "summary"
|
|
882
|
+
|
|
883
|
+
elif " FAILED " in line or " ERROR " in line:
|
|
884
|
+
# Save previous section
|
|
885
|
+
if current_section and current_type != "failure":
|
|
886
|
+
sections.append((current_type, "\n".join(current_section)))
|
|
887
|
+
current_section = []
|
|
888
|
+
current_type = "failure"
|
|
889
|
+
current_section.append(line)
|
|
890
|
+
|
|
891
|
+
elif line.startswith("=") and ("passed" in line or "failed" in line):
|
|
892
|
+
# Footer section
|
|
893
|
+
if current_section:
|
|
894
|
+
sections.append((current_type, "\n".join(current_section)))
|
|
895
|
+
current_section = [line]
|
|
896
|
+
current_type = "footer"
|
|
897
|
+
|
|
898
|
+
else:
|
|
899
|
+
current_section.append(line)
|
|
900
|
+
|
|
901
|
+
# Add final section
|
|
902
|
+
if current_section:
|
|
903
|
+
sections.append((current_type, "\n".join(current_section)))
|
|
904
|
+
|
|
905
|
+
return sections
|
|
906
|
+
|
|
907
|
+
def _render_formatted_output(
|
|
908
|
+
self,
|
|
909
|
+
output: str,
|
|
910
|
+
options: OptionsProtocol,
|
|
911
|
+
*,
|
|
912
|
+
already_clean: bool = False,
|
|
913
|
+
) -> None:
|
|
914
|
+
"""Render test output with Rich formatting and sections.
|
|
915
|
+
|
|
916
|
+
Phase 2: Uses structured failure parser when available.
|
|
917
|
+
|
|
918
|
+
Args:
|
|
919
|
+
output: Raw pytest output text
|
|
920
|
+
options: Test options (for verbosity level)
|
|
921
|
+
"""
|
|
922
|
+
from rich.panel import Panel
|
|
923
|
+
|
|
924
|
+
clean_output = output if already_clean else self._strip_ansi_codes(output)
|
|
925
|
+
|
|
926
|
+
# Try structured parsing first (Phase 2)
|
|
927
|
+
try:
|
|
928
|
+
failures = self._extract_structured_failures(clean_output)
|
|
929
|
+
if failures:
|
|
930
|
+
self._render_banner(
|
|
931
|
+
"Detailed Failure Analysis",
|
|
932
|
+
line_style="red",
|
|
933
|
+
char="═",
|
|
934
|
+
)
|
|
935
|
+
|
|
936
|
+
self._render_structured_failure_panels(failures)
|
|
937
|
+
|
|
938
|
+
# Still show summary section
|
|
939
|
+
sections = self._split_output_sections(clean_output)
|
|
940
|
+
for section_type, section_content in sections:
|
|
941
|
+
if section_type == "summary":
|
|
942
|
+
panel = Panel(
|
|
943
|
+
section_content.strip(),
|
|
944
|
+
title="[bold yellow]📋 Test Summary[/bold yellow]",
|
|
945
|
+
border_style="yellow",
|
|
946
|
+
width=get_console_width(),
|
|
947
|
+
)
|
|
948
|
+
self.console.print(panel)
|
|
949
|
+
elif section_type == "footer":
|
|
950
|
+
self.console.print(
|
|
951
|
+
f"\n[cyan]{section_content.strip()}[/cyan]\n"
|
|
952
|
+
)
|
|
953
|
+
|
|
954
|
+
return
|
|
955
|
+
|
|
956
|
+
except Exception as e:
|
|
957
|
+
# Fallback to Phase 1 rendering if parsing fails
|
|
958
|
+
self.console.print(
|
|
959
|
+
f"[dim yellow]⚠️ Structured parsing failed: {e}[/dim yellow]"
|
|
960
|
+
)
|
|
961
|
+
self.console.print(
|
|
962
|
+
"[dim yellow]Falling back to standard formatting...[/dim yellow]\n"
|
|
963
|
+
)
|
|
964
|
+
|
|
965
|
+
# Fallback: Phase 1 section-based rendering
|
|
966
|
+
sections = self._split_output_sections(clean_output)
|
|
967
|
+
|
|
968
|
+
for section_type, section_content in sections:
|
|
969
|
+
if not section_content.strip():
|
|
970
|
+
continue
|
|
971
|
+
|
|
972
|
+
if section_type == "failure":
|
|
973
|
+
self._render_failure_section(section_content)
|
|
974
|
+
elif section_type == "summary":
|
|
975
|
+
panel = Panel(
|
|
976
|
+
section_content.strip(),
|
|
977
|
+
title="[bold yellow]📋 Test Summary[/bold yellow]",
|
|
978
|
+
border_style="yellow",
|
|
979
|
+
width=get_console_width(),
|
|
980
|
+
)
|
|
981
|
+
self.console.print(panel)
|
|
982
|
+
elif section_type == "footer":
|
|
983
|
+
self.console.print(f"\n[cyan]{section_content.strip()}[/cyan]\n")
|
|
984
|
+
else:
|
|
985
|
+
# Header and other sections (dimmed)
|
|
986
|
+
if options.verbose or getattr(options, "ai_debug", False):
|
|
987
|
+
self.console.print(f"[dim]{section_content}[/dim]")
|
|
988
|
+
|
|
989
|
+
def _render_failure_section(self, section_content: str) -> None:
|
|
990
|
+
"""Render a failure section with syntax highlighting.
|
|
991
|
+
|
|
992
|
+
Args:
|
|
993
|
+
section_content: Failure output text
|
|
994
|
+
"""
|
|
995
|
+
from rich.panel import Panel
|
|
996
|
+
from rich.syntax import Syntax
|
|
997
|
+
|
|
998
|
+
# Apply Python syntax highlighting to tracebacks
|
|
999
|
+
syntax = Syntax(
|
|
1000
|
+
section_content,
|
|
1001
|
+
"python",
|
|
1002
|
+
theme="monokai",
|
|
1003
|
+
line_numbers=False,
|
|
1004
|
+
word_wrap=True,
|
|
1005
|
+
background_color="default",
|
|
1006
|
+
)
|
|
1007
|
+
|
|
1008
|
+
panel = Panel(
|
|
1009
|
+
syntax,
|
|
1010
|
+
title="[bold red]❌ Test Failure[/bold red]",
|
|
1011
|
+
border_style="red",
|
|
1012
|
+
width=get_console_width(),
|
|
1013
|
+
)
|
|
1014
|
+
self.console.print(panel)
|
|
1015
|
+
|
|
1016
|
+
def _parse_failure_header(
|
|
1017
|
+
self, line: str, current_failure: "TestFailure | None"
|
|
1018
|
+
) -> tuple["TestFailure | None", bool]:
|
|
1019
|
+
"""Parse failure header line."""
|
|
1020
|
+
import re
|
|
1021
|
+
|
|
1022
|
+
from crackerjack.models.test_models import TestFailure
|
|
1023
|
+
|
|
1024
|
+
failure_match = re.match(r"^(.+?)\s+(FAILED|ERROR)\s*(?:\[(.+?)\])?", line)
|
|
1025
|
+
if failure_match:
|
|
1026
|
+
test_path, status, params = failure_match.groups()
|
|
1027
|
+
new_failure = TestFailure(
|
|
1028
|
+
test_name=test_path + (f"[{params}]" if params else ""),
|
|
1029
|
+
status=status,
|
|
1030
|
+
location=test_path,
|
|
1031
|
+
)
|
|
1032
|
+
return new_failure, True
|
|
1033
|
+
return current_failure, False
|
|
1034
|
+
|
|
1035
|
+
def _parse_location_and_assertion(
|
|
1036
|
+
self, line: str, current_failure: "TestFailure", in_traceback: bool
|
|
1037
|
+
) -> bool:
|
|
1038
|
+
"""Parse location and assertion lines."""
|
|
1039
|
+
import re
|
|
1040
|
+
|
|
1041
|
+
# Detect location: "tests/test_foo.py:42: AssertionError"
|
|
1042
|
+
location_match = re.match(r"^(.+?\.py):(\d+):\s*(.*)$", line)
|
|
1043
|
+
if location_match and in_traceback:
|
|
1044
|
+
file_path, line_num, error_type = location_match.groups()
|
|
1045
|
+
current_failure.location = f"{file_path}:{line_num}"
|
|
1046
|
+
if error_type:
|
|
1047
|
+
current_failure.short_summary = error_type
|
|
1048
|
+
return True
|
|
1049
|
+
|
|
1050
|
+
# Detect assertion errors
|
|
1051
|
+
if "AssertionError:" in line or line.strip().startswith("E assert "):
|
|
1052
|
+
assertion_text = line.strip().lstrip("E").strip()
|
|
1053
|
+
if current_failure.assertion:
|
|
1054
|
+
current_failure.assertion += "\n" + assertion_text
|
|
1055
|
+
else:
|
|
1056
|
+
current_failure.assertion = assertion_text
|
|
1057
|
+
return True
|
|
1058
|
+
|
|
1059
|
+
return False
|
|
1060
|
+
|
|
1061
|
+
def _parse_captured_section_header(self, line: str) -> tuple[bool, str | None]:
|
|
1062
|
+
"""Parse captured output section headers."""
|
|
1063
|
+
if "captured stdout" in line.lower():
|
|
1064
|
+
return True, "stdout"
|
|
1065
|
+
elif "captured stderr" in line.lower():
|
|
1066
|
+
return True, "stderr"
|
|
1067
|
+
return False, None
|
|
1068
|
+
|
|
1069
|
+
def _parse_traceback_line(
|
|
1070
|
+
self, line: str, lines: list[str], i: int, current_failure: "TestFailure"
|
|
1071
|
+
) -> bool:
|
|
1072
|
+
"""Parse traceback lines."""
|
|
1073
|
+
if line.startswith(" ") or line.startswith("\t") or line.startswith("E "):
|
|
1074
|
+
current_failure.traceback.append(line)
|
|
1075
|
+
return True
|
|
1076
|
+
elif line.strip().startswith("=") or (
|
|
1077
|
+
i < len(lines) - 1 and "FAILED" in lines[i + 1]
|
|
1078
|
+
):
|
|
1079
|
+
return False
|
|
1080
|
+
return True
|
|
1081
|
+
|
|
1082
|
+
def _parse_captured_output(
|
|
1083
|
+
self, line: str, capture_type: str | None, current_failure: "TestFailure"
|
|
1084
|
+
) -> bool:
|
|
1085
|
+
"""Parse captured output lines."""
|
|
1086
|
+
if line.strip().startswith("=") or line.strip().startswith("_"):
|
|
1087
|
+
return False
|
|
1088
|
+
|
|
1089
|
+
if capture_type == "stdout":
|
|
1090
|
+
if current_failure.captured_stdout:
|
|
1091
|
+
current_failure.captured_stdout += "\n" + line
|
|
1092
|
+
else:
|
|
1093
|
+
current_failure.captured_stdout = line
|
|
1094
|
+
elif capture_type == "stderr":
|
|
1095
|
+
if current_failure.captured_stderr:
|
|
1096
|
+
current_failure.captured_stderr += "\n" + line
|
|
1097
|
+
else:
|
|
1098
|
+
current_failure.captured_stderr = line
|
|
1099
|
+
return True
|
|
1100
|
+
|
|
1101
|
+
def _extract_structured_failures(self, output: str) -> list["TestFailure"]:
|
|
1102
|
+
"""Extract structured failure information from pytest output.
|
|
1103
|
+
|
|
1104
|
+
This parser handles pytest's standard output format and extracts:
|
|
1105
|
+
- Test names and locations
|
|
1106
|
+
- Full tracebacks
|
|
1107
|
+
- Assertion errors
|
|
1108
|
+
- Captured output (stdout/stderr)
|
|
1109
|
+
- Duration (if available)
|
|
1110
|
+
|
|
1111
|
+
Args:
|
|
1112
|
+
output: Raw pytest output text
|
|
1113
|
+
|
|
1114
|
+
Returns:
|
|
1115
|
+
List of TestFailure objects
|
|
1116
|
+
"""
|
|
1117
|
+
failures = []
|
|
1118
|
+
lines = output.split("\n")
|
|
1119
|
+
|
|
1120
|
+
current_failure = None
|
|
1121
|
+
in_traceback = False
|
|
1122
|
+
in_captured = False
|
|
1123
|
+
capture_type = None
|
|
1124
|
+
|
|
1125
|
+
for i, line in enumerate(lines):
|
|
1126
|
+
# Parse failure header
|
|
1127
|
+
new_failure, header_found = self._parse_failure_header(
|
|
1128
|
+
line, current_failure
|
|
1129
|
+
)
|
|
1130
|
+
if header_found:
|
|
1131
|
+
if current_failure:
|
|
1132
|
+
failures.append(current_failure)
|
|
1133
|
+
current_failure = new_failure
|
|
1134
|
+
in_traceback = True
|
|
1135
|
+
in_captured = False
|
|
1136
|
+
continue
|
|
1137
|
+
|
|
1138
|
+
if not current_failure:
|
|
1139
|
+
continue
|
|
1140
|
+
|
|
1141
|
+
# Parse location and assertion
|
|
1142
|
+
if self._parse_location_and_assertion(line, current_failure, in_traceback):
|
|
1143
|
+
continue
|
|
1144
|
+
|
|
1145
|
+
# Parse captured section headers
|
|
1146
|
+
is_captured, new_capture_type = self._parse_captured_section_header(line)
|
|
1147
|
+
if is_captured:
|
|
1148
|
+
in_captured = True
|
|
1149
|
+
capture_type = new_capture_type
|
|
1150
|
+
in_traceback = False
|
|
1151
|
+
continue
|
|
1152
|
+
|
|
1153
|
+
# Parse traceback lines
|
|
1154
|
+
if in_traceback:
|
|
1155
|
+
in_traceback = self._parse_traceback_line(
|
|
1156
|
+
line, lines, i, current_failure
|
|
1157
|
+
)
|
|
1158
|
+
|
|
1159
|
+
# Parse captured output
|
|
1160
|
+
if in_captured and capture_type:
|
|
1161
|
+
in_captured = self._parse_captured_output(
|
|
1162
|
+
line, capture_type, current_failure
|
|
1163
|
+
)
|
|
1164
|
+
if not in_captured:
|
|
1165
|
+
capture_type = None
|
|
1166
|
+
|
|
1167
|
+
# Save final failure
|
|
1168
|
+
if current_failure:
|
|
1169
|
+
failures.append(current_failure)
|
|
1170
|
+
|
|
1171
|
+
return failures
|
|
1172
|
+
|
|
1173
|
+
def _render_structured_failure_panels(self, failures: list["TestFailure"]) -> None:
|
|
1174
|
+
"""Render failures as Rich panels with tables and syntax highlighting.
|
|
1175
|
+
|
|
1176
|
+
Each failure is rendered in a panel containing:
|
|
1177
|
+
- Summary table (test name, location, status)
|
|
1178
|
+
- Assertion details (if present)
|
|
1179
|
+
- Syntax-highlighted traceback
|
|
1180
|
+
- Captured output (if any)
|
|
1181
|
+
|
|
1182
|
+
Args:
|
|
1183
|
+
failures: List of TestFailure objects
|
|
1184
|
+
"""
|
|
1185
|
+
from rich import box
|
|
1186
|
+
from rich.console import Group
|
|
1187
|
+
from rich.panel import Panel
|
|
1188
|
+
from rich.syntax import Syntax
|
|
1189
|
+
from rich.table import Table
|
|
1190
|
+
|
|
1191
|
+
if not failures:
|
|
1192
|
+
return
|
|
1193
|
+
|
|
1194
|
+
# Group failures by file for better organization
|
|
1195
|
+
failures_by_file: dict[str, list[TestFailure]] = {}
|
|
1196
|
+
for failure in failures:
|
|
1197
|
+
file_path = failure.get_file_path()
|
|
1198
|
+
if file_path not in failures_by_file:
|
|
1199
|
+
failures_by_file[file_path] = []
|
|
1200
|
+
failures_by_file[file_path].append(failure)
|
|
1201
|
+
|
|
1202
|
+
# Render each file group
|
|
1203
|
+
for file_path, file_failures in failures_by_file.items():
|
|
1204
|
+
self.console.print(
|
|
1205
|
+
f"\n[bold red]📁 {file_path}[/bold red] ({len(file_failures)} failure(s))\n"
|
|
1206
|
+
)
|
|
1207
|
+
|
|
1208
|
+
for i, failure in enumerate(file_failures, 1):
|
|
1209
|
+
# Create details table
|
|
1210
|
+
table = Table(
|
|
1211
|
+
show_header=False,
|
|
1212
|
+
box=box.SIMPLE,
|
|
1213
|
+
padding=(0, 1),
|
|
1214
|
+
border_style="red",
|
|
1215
|
+
)
|
|
1216
|
+
table.add_column("Key", style="cyan bold", width=12)
|
|
1217
|
+
table.add_column("Value", overflow="fold")
|
|
1218
|
+
|
|
1219
|
+
# Add rows
|
|
1220
|
+
table.add_row("Test", f"[yellow]{failure.test_name}[/yellow]")
|
|
1221
|
+
table.add_row(
|
|
1222
|
+
"Location", f"[blue underline]{failure.location}[/blue underline]"
|
|
1223
|
+
)
|
|
1224
|
+
table.add_row("Status", f"[red bold]{failure.status}[/red bold]")
|
|
1225
|
+
|
|
1226
|
+
if failure.duration:
|
|
1227
|
+
table.add_row("Duration", f"{failure.duration:.3f}s")
|
|
1228
|
+
|
|
1229
|
+
# Add summary timing insight if available
|
|
1230
|
+
duration_note = self._get_duration_note(failure)
|
|
1231
|
+
if duration_note:
|
|
1232
|
+
table.add_row("Timing", duration_note)
|
|
1233
|
+
|
|
1234
|
+
# Components for panel (mixed list of renderables for Rich Group)
|
|
1235
|
+
components: list[t.Any] = [table]
|
|
1236
|
+
|
|
1237
|
+
# Add assertion details
|
|
1238
|
+
if failure.assertion:
|
|
1239
|
+
components.append("") # Spacer
|
|
1240
|
+
components.append("[bold red]Assertion Error:[/bold red]")
|
|
1241
|
+
|
|
1242
|
+
# Syntax highlight the assertion
|
|
1243
|
+
assertion_syntax = Syntax(
|
|
1244
|
+
failure.assertion,
|
|
1245
|
+
"python",
|
|
1246
|
+
theme="monokai",
|
|
1247
|
+
line_numbers=False,
|
|
1248
|
+
background_color="default",
|
|
1249
|
+
)
|
|
1250
|
+
components.append(assertion_syntax)
|
|
1251
|
+
|
|
1252
|
+
# Add relevant traceback (last 15 lines)
|
|
1253
|
+
relevant_traceback = failure.get_relevant_traceback(max_lines=15)
|
|
1254
|
+
if relevant_traceback:
|
|
1255
|
+
components.append("") # Spacer
|
|
1256
|
+
components.append("[bold red]Traceback:[/bold red]")
|
|
1257
|
+
|
|
1258
|
+
traceback_text = "\n".join(relevant_traceback)
|
|
1259
|
+
traceback_syntax = Syntax(
|
|
1260
|
+
traceback_text,
|
|
1261
|
+
"python",
|
|
1262
|
+
theme="monokai",
|
|
1263
|
+
line_numbers=False,
|
|
1264
|
+
word_wrap=True,
|
|
1265
|
+
background_color="default",
|
|
1266
|
+
)
|
|
1267
|
+
components.append(traceback_syntax)
|
|
1268
|
+
|
|
1269
|
+
# Add captured output if present
|
|
1270
|
+
if failure.captured_stdout:
|
|
1271
|
+
components.append("") # Spacer
|
|
1272
|
+
components.append("[bold yellow]Captured stdout:[/bold yellow]")
|
|
1273
|
+
components.append(f"[dim]{failure.captured_stdout}[/dim]")
|
|
1274
|
+
|
|
1275
|
+
if failure.captured_stderr:
|
|
1276
|
+
components.append("") # Spacer
|
|
1277
|
+
components.append("[bold yellow]Captured stderr:[/bold yellow]")
|
|
1278
|
+
components.append(f"[dim]{failure.captured_stderr}[/dim]")
|
|
1279
|
+
|
|
1280
|
+
# Create grouped content
|
|
1281
|
+
group = Group(*components)
|
|
1282
|
+
|
|
1283
|
+
# Render panel
|
|
1284
|
+
panel = Panel(
|
|
1285
|
+
group,
|
|
1286
|
+
title=f"[bold red]❌ Failure {i}/{len(file_failures)}[/bold red]",
|
|
1287
|
+
border_style="red",
|
|
1288
|
+
width=get_console_width(),
|
|
1289
|
+
padding=(1, 2),
|
|
1290
|
+
)
|
|
1291
|
+
|
|
1292
|
+
self.console.print(panel)
|
|
1293
|
+
|
|
1294
|
+
def _get_duration_note(self, failure: "TestFailure") -> str | None:
|
|
1295
|
+
"""Return a duration note highlighting long-running failures."""
|
|
1296
|
+
if not failure.duration:
|
|
1297
|
+
return None
|
|
1298
|
+
|
|
1299
|
+
if failure.duration > 5:
|
|
1300
|
+
return (
|
|
1301
|
+
f"[bold red]{failure.duration:.2f}s – investigate slow test[/bold red]"
|
|
1302
|
+
)
|
|
1303
|
+
if failure.duration > 2:
|
|
1304
|
+
return f"[yellow]{failure.duration:.2f}s – moderately slow[/yellow]"
|
|
1305
|
+
return None
|
|
1306
|
+
|
|
1307
|
+
def _get_timeout(self, options: OptionsProtocol) -> int:
|
|
1308
|
+
return self.command_builder.get_test_timeout(options)
|
|
1309
|
+
|
|
1310
|
+
async def run_pre_test_lsp_diagnostics(self) -> bool:
|
|
1311
|
+
"""Run LSP diagnostics before tests to catch type errors early."""
|
|
1312
|
+
if not self.use_lsp_diagnostics or self._lsp_client is None:
|
|
1313
|
+
return True
|
|
1314
|
+
|
|
1315
|
+
try:
|
|
1316
|
+
# Use injected LSP client (already instantiated)
|
|
1317
|
+
lsp_client = self._lsp_client
|
|
1318
|
+
|
|
1319
|
+
# Check if LSP server is available
|
|
1320
|
+
if not lsp_client.is_server_running():
|
|
1321
|
+
return True # No LSP server, skip diagnostics
|
|
1322
|
+
|
|
1323
|
+
# Run type diagnostics on the project
|
|
1324
|
+
diagnostics, summary = lsp_client.check_project_with_feedback(
|
|
1325
|
+
self.pkg_path,
|
|
1326
|
+
show_progress=False, # Keep quiet for test integration
|
|
1327
|
+
)
|
|
1328
|
+
|
|
1329
|
+
# Check if there are type errors
|
|
1330
|
+
has_errors = any(diags for diags in diagnostics.values())
|
|
1331
|
+
|
|
1332
|
+
if has_errors:
|
|
1333
|
+
self.console.print(
|
|
1334
|
+
"[yellow]⚠️ LSP detected type errors before running tests[/yellow]"
|
|
1335
|
+
)
|
|
1336
|
+
# Format and show a summary
|
|
1337
|
+
error_count = sum(len(diags) for diags in diagnostics.values())
|
|
1338
|
+
self.console.print(f"[yellow]Found {error_count} type issues[/yellow]")
|
|
1339
|
+
|
|
1340
|
+
return not has_errors # Return False if there are type errors
|
|
1341
|
+
|
|
1342
|
+
except Exception as e:
|
|
1343
|
+
# If LSP diagnostics fail, don't block tests
|
|
1344
|
+
self.console.print(f"[dim]LSP diagnostics failed: {e}[/dim]")
|
|
1345
|
+
return True
|
|
1346
|
+
|
|
1347
|
+
def configure_lsp_diagnostics(self, enable: bool) -> None:
|
|
1348
|
+
"""Enable or disable LSP diagnostics integration."""
|
|
1349
|
+
self.use_lsp_diagnostics = enable
|
|
1350
|
+
|
|
1351
|
+
if enable:
|
|
1352
|
+
self.console.print(
|
|
1353
|
+
"[cyan]🔍 LSP diagnostics enabled for faster test feedback[/cyan]"
|
|
1354
|
+
)
|
|
1355
|
+
|
|
1356
|
+
|
|
1357
|
+
TestManagementImpl = TestManager
|