crackerjack 0.37.9__py3-none-any.whl → 0.45.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- crackerjack/README.md +19 -0
- crackerjack/__init__.py +30 -1
- crackerjack/__main__.py +342 -1263
- crackerjack/adapters/README.md +18 -0
- crackerjack/adapters/__init__.py +27 -5
- crackerjack/adapters/_output_paths.py +167 -0
- crackerjack/adapters/_qa_adapter_base.py +309 -0
- crackerjack/adapters/_tool_adapter_base.py +706 -0
- crackerjack/adapters/ai/README.md +65 -0
- crackerjack/adapters/ai/__init__.py +5 -0
- crackerjack/adapters/ai/claude.py +853 -0
- crackerjack/adapters/complexity/README.md +53 -0
- crackerjack/adapters/complexity/__init__.py +10 -0
- crackerjack/adapters/complexity/complexipy.py +641 -0
- crackerjack/adapters/dependency/__init__.py +22 -0
- crackerjack/adapters/dependency/pip_audit.py +418 -0
- crackerjack/adapters/format/README.md +72 -0
- crackerjack/adapters/format/__init__.py +11 -0
- crackerjack/adapters/format/mdformat.py +313 -0
- crackerjack/adapters/format/ruff.py +516 -0
- crackerjack/adapters/lint/README.md +47 -0
- crackerjack/adapters/lint/__init__.py +11 -0
- crackerjack/adapters/lint/codespell.py +273 -0
- crackerjack/adapters/lsp/README.md +49 -0
- crackerjack/adapters/lsp/__init__.py +27 -0
- crackerjack/adapters/{rust_tool_manager.py → lsp/_manager.py} +3 -3
- crackerjack/adapters/{skylos_adapter.py → lsp/skylos.py} +59 -7
- crackerjack/adapters/{zuban_adapter.py → lsp/zuban.py} +3 -6
- crackerjack/adapters/refactor/README.md +59 -0
- crackerjack/adapters/refactor/__init__.py +12 -0
- crackerjack/adapters/refactor/creosote.py +318 -0
- crackerjack/adapters/refactor/refurb.py +406 -0
- crackerjack/adapters/refactor/skylos.py +494 -0
- crackerjack/adapters/sast/README.md +132 -0
- crackerjack/adapters/sast/__init__.py +32 -0
- crackerjack/adapters/sast/_base.py +201 -0
- crackerjack/adapters/sast/bandit.py +423 -0
- crackerjack/adapters/sast/pyscn.py +405 -0
- crackerjack/adapters/sast/semgrep.py +241 -0
- crackerjack/adapters/security/README.md +111 -0
- crackerjack/adapters/security/__init__.py +17 -0
- crackerjack/adapters/security/gitleaks.py +339 -0
- crackerjack/adapters/type/README.md +52 -0
- crackerjack/adapters/type/__init__.py +12 -0
- crackerjack/adapters/type/pyrefly.py +402 -0
- crackerjack/adapters/type/ty.py +402 -0
- crackerjack/adapters/type/zuban.py +522 -0
- crackerjack/adapters/utility/README.md +51 -0
- crackerjack/adapters/utility/__init__.py +10 -0
- crackerjack/adapters/utility/checks.py +884 -0
- crackerjack/agents/README.md +264 -0
- crackerjack/agents/__init__.py +40 -12
- crackerjack/agents/base.py +1 -0
- crackerjack/agents/claude_code_bridge.py +641 -0
- crackerjack/agents/coordinator.py +49 -53
- crackerjack/agents/dry_agent.py +187 -3
- crackerjack/agents/enhanced_coordinator.py +279 -0
- crackerjack/agents/enhanced_proactive_agent.py +185 -0
- crackerjack/agents/error_middleware.py +53 -0
- crackerjack/agents/formatting_agent.py +6 -8
- crackerjack/agents/helpers/__init__.py +9 -0
- crackerjack/agents/helpers/performance/__init__.py +22 -0
- crackerjack/agents/helpers/performance/performance_ast_analyzer.py +357 -0
- crackerjack/agents/helpers/performance/performance_pattern_detector.py +909 -0
- crackerjack/agents/helpers/performance/performance_recommender.py +572 -0
- crackerjack/agents/helpers/refactoring/__init__.py +22 -0
- crackerjack/agents/helpers/refactoring/code_transformer.py +536 -0
- crackerjack/agents/helpers/refactoring/complexity_analyzer.py +344 -0
- crackerjack/agents/helpers/refactoring/dead_code_detector.py +437 -0
- crackerjack/agents/helpers/test_creation/__init__.py +19 -0
- crackerjack/agents/helpers/test_creation/test_ast_analyzer.py +216 -0
- crackerjack/agents/helpers/test_creation/test_coverage_analyzer.py +643 -0
- crackerjack/agents/helpers/test_creation/test_template_generator.py +1031 -0
- crackerjack/agents/performance_agent.py +121 -1152
- crackerjack/agents/refactoring_agent.py +156 -655
- crackerjack/agents/semantic_agent.py +479 -0
- crackerjack/agents/semantic_helpers.py +356 -0
- crackerjack/agents/test_creation_agent.py +19 -1605
- crackerjack/api.py +5 -7
- crackerjack/cli/README.md +394 -0
- crackerjack/cli/__init__.py +1 -1
- crackerjack/cli/cache_handlers.py +23 -18
- crackerjack/cli/cache_handlers_enhanced.py +1 -4
- crackerjack/cli/facade.py +70 -8
- crackerjack/cli/formatting.py +13 -0
- crackerjack/cli/handlers/__init__.py +85 -0
- crackerjack/cli/handlers/advanced.py +103 -0
- crackerjack/cli/handlers/ai_features.py +62 -0
- crackerjack/cli/handlers/analytics.py +479 -0
- crackerjack/cli/handlers/changelog.py +271 -0
- crackerjack/cli/handlers/config_handlers.py +16 -0
- crackerjack/cli/handlers/coverage.py +84 -0
- crackerjack/cli/handlers/documentation.py +280 -0
- crackerjack/cli/handlers/main_handlers.py +497 -0
- crackerjack/cli/handlers/monitoring.py +371 -0
- crackerjack/cli/handlers.py +249 -49
- crackerjack/cli/interactive.py +8 -5
- crackerjack/cli/options.py +203 -110
- crackerjack/cli/semantic_handlers.py +292 -0
- crackerjack/cli/version.py +19 -0
- crackerjack/code_cleaner.py +60 -24
- crackerjack/config/README.md +472 -0
- crackerjack/config/__init__.py +256 -0
- crackerjack/config/global_lock_config.py +191 -54
- crackerjack/config/hooks.py +188 -16
- crackerjack/config/loader.py +239 -0
- crackerjack/config/settings.py +141 -0
- crackerjack/config/tool_commands.py +331 -0
- crackerjack/core/README.md +393 -0
- crackerjack/core/async_workflow_orchestrator.py +79 -53
- crackerjack/core/autofix_coordinator.py +22 -9
- crackerjack/core/container.py +10 -9
- crackerjack/core/enhanced_container.py +9 -9
- crackerjack/core/performance.py +1 -1
- crackerjack/core/performance_monitor.py +5 -3
- crackerjack/core/phase_coordinator.py +1018 -634
- crackerjack/core/proactive_workflow.py +3 -3
- crackerjack/core/retry.py +275 -0
- crackerjack/core/service_watchdog.py +167 -23
- crackerjack/core/session_coordinator.py +187 -382
- crackerjack/core/timeout_manager.py +161 -44
- crackerjack/core/workflow/__init__.py +21 -0
- crackerjack/core/workflow/workflow_ai_coordinator.py +863 -0
- crackerjack/core/workflow/workflow_event_orchestrator.py +1107 -0
- crackerjack/core/workflow/workflow_issue_parser.py +714 -0
- crackerjack/core/workflow/workflow_phase_executor.py +1158 -0
- crackerjack/core/workflow/workflow_security_gates.py +400 -0
- crackerjack/core/workflow_orchestrator.py +1247 -953
- crackerjack/data/README.md +11 -0
- crackerjack/data/__init__.py +8 -0
- crackerjack/data/models.py +79 -0
- crackerjack/data/repository.py +210 -0
- crackerjack/decorators/README.md +180 -0
- crackerjack/decorators/__init__.py +35 -0
- crackerjack/decorators/error_handling.py +649 -0
- crackerjack/decorators/error_handling_decorators.py +334 -0
- crackerjack/decorators/helpers.py +58 -0
- crackerjack/decorators/patterns.py +281 -0
- crackerjack/decorators/utils.py +58 -0
- crackerjack/docs/README.md +11 -0
- crackerjack/docs/generated/api/CLI_REFERENCE.md +1 -1
- crackerjack/documentation/README.md +11 -0
- crackerjack/documentation/ai_templates.py +1 -1
- crackerjack/documentation/dual_output_generator.py +11 -9
- crackerjack/documentation/reference_generator.py +104 -59
- crackerjack/dynamic_config.py +52 -61
- crackerjack/errors.py +1 -1
- crackerjack/events/README.md +11 -0
- crackerjack/events/__init__.py +16 -0
- crackerjack/events/telemetry.py +175 -0
- crackerjack/events/workflow_bus.py +346 -0
- crackerjack/exceptions/README.md +301 -0
- crackerjack/exceptions/__init__.py +5 -0
- crackerjack/exceptions/config.py +4 -0
- crackerjack/exceptions/tool_execution_error.py +245 -0
- crackerjack/executors/README.md +591 -0
- crackerjack/executors/__init__.py +2 -0
- crackerjack/executors/async_hook_executor.py +539 -77
- crackerjack/executors/cached_hook_executor.py +3 -3
- crackerjack/executors/hook_executor.py +967 -102
- crackerjack/executors/hook_lock_manager.py +31 -22
- crackerjack/executors/individual_hook_executor.py +66 -32
- crackerjack/executors/lsp_aware_hook_executor.py +136 -57
- crackerjack/executors/progress_hook_executor.py +282 -0
- crackerjack/executors/tool_proxy.py +23 -7
- crackerjack/hooks/README.md +485 -0
- crackerjack/hooks/lsp_hook.py +8 -9
- crackerjack/intelligence/README.md +557 -0
- crackerjack/interactive.py +37 -10
- crackerjack/managers/README.md +369 -0
- crackerjack/managers/async_hook_manager.py +41 -57
- crackerjack/managers/hook_manager.py +449 -79
- crackerjack/managers/publish_manager.py +81 -36
- crackerjack/managers/test_command_builder.py +290 -12
- crackerjack/managers/test_executor.py +93 -8
- crackerjack/managers/test_manager.py +1082 -75
- crackerjack/managers/test_progress.py +118 -26
- crackerjack/mcp/README.md +374 -0
- crackerjack/mcp/cache.py +25 -2
- crackerjack/mcp/client_runner.py +35 -18
- crackerjack/mcp/context.py +9 -9
- crackerjack/mcp/dashboard.py +24 -8
- crackerjack/mcp/enhanced_progress_monitor.py +34 -23
- crackerjack/mcp/file_monitor.py +27 -6
- crackerjack/mcp/progress_components.py +45 -34
- crackerjack/mcp/progress_monitor.py +6 -9
- crackerjack/mcp/rate_limiter.py +11 -7
- crackerjack/mcp/server.py +2 -0
- crackerjack/mcp/server_core.py +187 -55
- crackerjack/mcp/service_watchdog.py +12 -9
- crackerjack/mcp/task_manager.py +2 -2
- crackerjack/mcp/tools/README.md +27 -0
- crackerjack/mcp/tools/__init__.py +2 -0
- crackerjack/mcp/tools/core_tools.py +75 -52
- crackerjack/mcp/tools/execution_tools.py +87 -31
- crackerjack/mcp/tools/intelligence_tools.py +2 -2
- crackerjack/mcp/tools/proactive_tools.py +1 -1
- crackerjack/mcp/tools/semantic_tools.py +584 -0
- crackerjack/mcp/tools/utility_tools.py +180 -132
- crackerjack/mcp/tools/workflow_executor.py +87 -46
- crackerjack/mcp/websocket/README.md +31 -0
- crackerjack/mcp/websocket/app.py +11 -1
- crackerjack/mcp/websocket/event_bridge.py +188 -0
- crackerjack/mcp/websocket/jobs.py +27 -4
- crackerjack/mcp/websocket/monitoring/__init__.py +25 -0
- crackerjack/mcp/websocket/monitoring/api/__init__.py +19 -0
- crackerjack/mcp/websocket/monitoring/api/dependencies.py +141 -0
- crackerjack/mcp/websocket/monitoring/api/heatmap.py +154 -0
- crackerjack/mcp/websocket/monitoring/api/intelligence.py +199 -0
- crackerjack/mcp/websocket/monitoring/api/metrics.py +203 -0
- crackerjack/mcp/websocket/monitoring/api/telemetry.py +101 -0
- crackerjack/mcp/websocket/monitoring/dashboard.py +18 -0
- crackerjack/mcp/websocket/monitoring/factory.py +109 -0
- crackerjack/mcp/websocket/monitoring/filters.py +10 -0
- crackerjack/mcp/websocket/monitoring/metrics.py +64 -0
- crackerjack/mcp/websocket/monitoring/models.py +90 -0
- crackerjack/mcp/websocket/monitoring/utils.py +171 -0
- crackerjack/mcp/websocket/monitoring/websocket_manager.py +78 -0
- crackerjack/mcp/websocket/monitoring/websockets/__init__.py +17 -0
- crackerjack/mcp/websocket/monitoring/websockets/dependencies.py +126 -0
- crackerjack/mcp/websocket/monitoring/websockets/heatmap.py +176 -0
- crackerjack/mcp/websocket/monitoring/websockets/intelligence.py +291 -0
- crackerjack/mcp/websocket/monitoring/websockets/metrics.py +291 -0
- crackerjack/mcp/websocket/monitoring_endpoints.py +16 -2930
- crackerjack/mcp/websocket/server.py +1 -3
- crackerjack/mcp/websocket/websocket_handler.py +107 -6
- crackerjack/models/README.md +308 -0
- crackerjack/models/__init__.py +10 -1
- crackerjack/models/config.py +639 -22
- crackerjack/models/config_adapter.py +6 -6
- crackerjack/models/protocols.py +1167 -23
- crackerjack/models/pydantic_models.py +320 -0
- crackerjack/models/qa_config.py +145 -0
- crackerjack/models/qa_results.py +134 -0
- crackerjack/models/results.py +35 -0
- crackerjack/models/semantic_models.py +258 -0
- crackerjack/models/task.py +19 -3
- crackerjack/models/test_models.py +60 -0
- crackerjack/monitoring/README.md +11 -0
- crackerjack/monitoring/ai_agent_watchdog.py +5 -4
- crackerjack/monitoring/metrics_collector.py +4 -3
- crackerjack/monitoring/regression_prevention.py +4 -3
- crackerjack/monitoring/websocket_server.py +4 -241
- crackerjack/orchestration/README.md +340 -0
- crackerjack/orchestration/__init__.py +43 -0
- crackerjack/orchestration/advanced_orchestrator.py +20 -67
- crackerjack/orchestration/cache/README.md +312 -0
- crackerjack/orchestration/cache/__init__.py +37 -0
- crackerjack/orchestration/cache/memory_cache.py +338 -0
- crackerjack/orchestration/cache/tool_proxy_cache.py +340 -0
- crackerjack/orchestration/config.py +297 -0
- crackerjack/orchestration/coverage_improvement.py +13 -6
- crackerjack/orchestration/execution_strategies.py +6 -6
- crackerjack/orchestration/hook_orchestrator.py +1398 -0
- crackerjack/orchestration/strategies/README.md +401 -0
- crackerjack/orchestration/strategies/__init__.py +39 -0
- crackerjack/orchestration/strategies/adaptive_strategy.py +630 -0
- crackerjack/orchestration/strategies/parallel_strategy.py +237 -0
- crackerjack/orchestration/strategies/sequential_strategy.py +299 -0
- crackerjack/orchestration/test_progress_streamer.py +1 -1
- crackerjack/plugins/README.md +11 -0
- crackerjack/plugins/hooks.py +3 -2
- crackerjack/plugins/loader.py +3 -3
- crackerjack/plugins/managers.py +1 -1
- crackerjack/py313.py +191 -0
- crackerjack/security/README.md +11 -0
- crackerjack/services/README.md +374 -0
- crackerjack/services/__init__.py +8 -21
- crackerjack/services/ai/README.md +295 -0
- crackerjack/services/ai/__init__.py +7 -0
- crackerjack/services/ai/advanced_optimizer.py +878 -0
- crackerjack/services/{contextual_ai_assistant.py → ai/contextual_ai_assistant.py} +5 -3
- crackerjack/services/ai/embeddings.py +444 -0
- crackerjack/services/ai/intelligent_commit.py +328 -0
- crackerjack/services/ai/predictive_analytics.py +510 -0
- crackerjack/services/api_extractor.py +5 -3
- crackerjack/services/bounded_status_operations.py +45 -5
- crackerjack/services/cache.py +249 -318
- crackerjack/services/changelog_automation.py +7 -3
- crackerjack/services/command_execution_service.py +305 -0
- crackerjack/services/config_integrity.py +83 -39
- crackerjack/services/config_merge.py +9 -6
- crackerjack/services/config_service.py +198 -0
- crackerjack/services/config_template.py +13 -26
- crackerjack/services/coverage_badge_service.py +6 -4
- crackerjack/services/coverage_ratchet.py +53 -27
- crackerjack/services/debug.py +18 -7
- crackerjack/services/dependency_analyzer.py +4 -4
- crackerjack/services/dependency_monitor.py +13 -13
- crackerjack/services/documentation_generator.py +4 -2
- crackerjack/services/documentation_service.py +62 -33
- crackerjack/services/enhanced_filesystem.py +81 -27
- crackerjack/services/enterprise_optimizer.py +1 -1
- crackerjack/services/error_pattern_analyzer.py +10 -10
- crackerjack/services/file_filter.py +221 -0
- crackerjack/services/file_hasher.py +5 -7
- crackerjack/services/file_io_service.py +361 -0
- crackerjack/services/file_modifier.py +615 -0
- crackerjack/services/filesystem.py +80 -109
- crackerjack/services/git.py +99 -5
- crackerjack/services/health_metrics.py +4 -6
- crackerjack/services/heatmap_generator.py +12 -3
- crackerjack/services/incremental_executor.py +380 -0
- crackerjack/services/initialization.py +101 -49
- crackerjack/services/log_manager.py +2 -2
- crackerjack/services/logging.py +120 -68
- crackerjack/services/lsp_client.py +12 -12
- crackerjack/services/memory_optimizer.py +27 -22
- crackerjack/services/monitoring/README.md +30 -0
- crackerjack/services/monitoring/__init__.py +9 -0
- crackerjack/services/monitoring/dependency_monitor.py +678 -0
- crackerjack/services/monitoring/error_pattern_analyzer.py +676 -0
- crackerjack/services/monitoring/health_metrics.py +716 -0
- crackerjack/services/monitoring/metrics.py +587 -0
- crackerjack/services/{performance_benchmarks.py → monitoring/performance_benchmarks.py} +100 -14
- crackerjack/services/{performance_cache.py → monitoring/performance_cache.py} +21 -15
- crackerjack/services/{performance_monitor.py → monitoring/performance_monitor.py} +10 -6
- crackerjack/services/parallel_executor.py +166 -55
- crackerjack/services/patterns/__init__.py +142 -0
- crackerjack/services/patterns/agents.py +107 -0
- crackerjack/services/patterns/code/__init__.py +15 -0
- crackerjack/services/patterns/code/detection.py +118 -0
- crackerjack/services/patterns/code/imports.py +107 -0
- crackerjack/services/patterns/code/paths.py +159 -0
- crackerjack/services/patterns/code/performance.py +119 -0
- crackerjack/services/patterns/code/replacement.py +36 -0
- crackerjack/services/patterns/core.py +212 -0
- crackerjack/services/patterns/documentation/__init__.py +14 -0
- crackerjack/services/patterns/documentation/badges_markdown.py +96 -0
- crackerjack/services/patterns/documentation/comments_blocks.py +83 -0
- crackerjack/services/patterns/documentation/docstrings.py +89 -0
- crackerjack/services/patterns/formatting.py +226 -0
- crackerjack/services/patterns/operations.py +339 -0
- crackerjack/services/patterns/security/__init__.py +23 -0
- crackerjack/services/patterns/security/code_injection.py +122 -0
- crackerjack/services/patterns/security/credentials.py +190 -0
- crackerjack/services/patterns/security/path_traversal.py +221 -0
- crackerjack/services/patterns/security/unsafe_operations.py +216 -0
- crackerjack/services/patterns/templates.py +62 -0
- crackerjack/services/patterns/testing/__init__.py +18 -0
- crackerjack/services/patterns/testing/error_patterns.py +107 -0
- crackerjack/services/patterns/testing/pytest_output.py +126 -0
- crackerjack/services/patterns/tool_output/__init__.py +16 -0
- crackerjack/services/patterns/tool_output/bandit.py +72 -0
- crackerjack/services/patterns/tool_output/other.py +97 -0
- crackerjack/services/patterns/tool_output/pyright.py +67 -0
- crackerjack/services/patterns/tool_output/ruff.py +44 -0
- crackerjack/services/patterns/url_sanitization.py +114 -0
- crackerjack/services/patterns/utilities.py +42 -0
- crackerjack/services/patterns/utils.py +339 -0
- crackerjack/services/patterns/validation.py +46 -0
- crackerjack/services/patterns/versioning.py +62 -0
- crackerjack/services/predictive_analytics.py +21 -8
- crackerjack/services/profiler.py +280 -0
- crackerjack/services/quality/README.md +415 -0
- crackerjack/services/quality/__init__.py +11 -0
- crackerjack/services/quality/anomaly_detector.py +392 -0
- crackerjack/services/quality/pattern_cache.py +333 -0
- crackerjack/services/quality/pattern_detector.py +479 -0
- crackerjack/services/quality/qa_orchestrator.py +491 -0
- crackerjack/services/{quality_baseline.py → quality/quality_baseline.py} +163 -2
- crackerjack/services/{quality_baseline_enhanced.py → quality/quality_baseline_enhanced.py} +4 -1
- crackerjack/services/{quality_intelligence.py → quality/quality_intelligence.py} +180 -16
- crackerjack/services/regex_patterns.py +58 -2987
- crackerjack/services/regex_utils.py +55 -29
- crackerjack/services/secure_status_formatter.py +42 -15
- crackerjack/services/secure_subprocess.py +35 -2
- crackerjack/services/security.py +16 -8
- crackerjack/services/server_manager.py +40 -51
- crackerjack/services/smart_scheduling.py +46 -6
- crackerjack/services/status_authentication.py +3 -3
- crackerjack/services/thread_safe_status_collector.py +1 -0
- crackerjack/services/tool_filter.py +368 -0
- crackerjack/services/tool_version_service.py +9 -5
- crackerjack/services/unified_config.py +43 -351
- crackerjack/services/vector_store.py +689 -0
- crackerjack/services/version_analyzer.py +6 -4
- crackerjack/services/version_checker.py +14 -8
- crackerjack/services/zuban_lsp_service.py +5 -4
- crackerjack/slash_commands/README.md +11 -0
- crackerjack/slash_commands/init.md +2 -12
- crackerjack/slash_commands/run.md +84 -50
- crackerjack/tools/README.md +11 -0
- crackerjack/tools/__init__.py +30 -0
- crackerjack/tools/_git_utils.py +105 -0
- crackerjack/tools/check_added_large_files.py +139 -0
- crackerjack/tools/check_ast.py +105 -0
- crackerjack/tools/check_json.py +103 -0
- crackerjack/tools/check_jsonschema.py +297 -0
- crackerjack/tools/check_toml.py +103 -0
- crackerjack/tools/check_yaml.py +110 -0
- crackerjack/tools/codespell_wrapper.py +72 -0
- crackerjack/tools/end_of_file_fixer.py +202 -0
- crackerjack/tools/format_json.py +128 -0
- crackerjack/tools/mdformat_wrapper.py +114 -0
- crackerjack/tools/trailing_whitespace.py +198 -0
- crackerjack/tools/validate_regex_patterns.py +7 -3
- crackerjack/ui/README.md +11 -0
- crackerjack/ui/dashboard_renderer.py +28 -0
- crackerjack/ui/templates/README.md +11 -0
- crackerjack/utils/console_utils.py +13 -0
- crackerjack/utils/dependency_guard.py +230 -0
- crackerjack/utils/retry_utils.py +275 -0
- crackerjack/workflows/README.md +590 -0
- crackerjack/workflows/__init__.py +46 -0
- crackerjack/workflows/actions.py +811 -0
- crackerjack/workflows/auto_fix.py +444 -0
- crackerjack/workflows/container_builder.py +499 -0
- crackerjack/workflows/definitions.py +443 -0
- crackerjack/workflows/engine.py +177 -0
- crackerjack/workflows/event_bridge.py +242 -0
- {crackerjack-0.37.9.dist-info → crackerjack-0.45.2.dist-info}/METADATA +678 -98
- crackerjack-0.45.2.dist-info/RECORD +478 -0
- {crackerjack-0.37.9.dist-info → crackerjack-0.45.2.dist-info}/WHEEL +1 -1
- crackerjack/managers/test_manager_backup.py +0 -1075
- crackerjack/mcp/tools/execution_tools_backup.py +0 -1011
- crackerjack/mixins/__init__.py +0 -3
- crackerjack/mixins/error_handling.py +0 -145
- crackerjack/services/config.py +0 -358
- crackerjack/ui/server_panels.py +0 -125
- crackerjack-0.37.9.dist-info/RECORD +0 -231
- /crackerjack/adapters/{rust_tool_adapter.py → lsp/_base.py} +0 -0
- /crackerjack/adapters/{lsp_client.py → lsp/_client.py} +0 -0
- {crackerjack-0.37.9.dist-info → crackerjack-0.45.2.dist-info}/entry_points.txt +0 -0
- {crackerjack-0.37.9.dist-info → crackerjack-0.45.2.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,49 +1,65 @@
|
|
|
1
|
+
import re
|
|
1
2
|
import subprocess
|
|
2
3
|
import time
|
|
3
4
|
import typing as t
|
|
4
5
|
from pathlib import Path
|
|
5
6
|
|
|
6
|
-
from
|
|
7
|
-
|
|
8
|
-
from
|
|
7
|
+
from acb.config import root_path
|
|
8
|
+
from acb.console import Console
|
|
9
|
+
from acb.depends import Inject, depends
|
|
10
|
+
from rich import box
|
|
11
|
+
from rich.panel import Panel
|
|
12
|
+
from rich.table import Table
|
|
13
|
+
from rich.text import Text
|
|
14
|
+
|
|
15
|
+
from crackerjack.config import get_console_width
|
|
16
|
+
from crackerjack.models.protocols import (
|
|
17
|
+
CoverageBadgeServiceProtocol,
|
|
18
|
+
CoverageRatchetProtocol,
|
|
19
|
+
OptionsProtocol,
|
|
20
|
+
)
|
|
21
|
+
from crackerjack.models.test_models import TestFailure
|
|
22
|
+
from crackerjack.services.lsp_client import LSPClient
|
|
9
23
|
|
|
10
24
|
from .test_command_builder import TestCommandBuilder
|
|
11
25
|
from .test_executor import TestExecutor
|
|
12
26
|
|
|
27
|
+
ANSI_ESCAPE_RE = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
|
|
28
|
+
|
|
13
29
|
|
|
14
30
|
class TestManager:
|
|
31
|
+
@depends.inject
|
|
15
32
|
def __init__(
|
|
16
33
|
self,
|
|
17
|
-
console: Console,
|
|
18
|
-
|
|
19
|
-
|
|
34
|
+
console: Inject[Console],
|
|
35
|
+
coverage_ratchet: Inject[CoverageRatchetProtocol],
|
|
36
|
+
coverage_badge: Inject[CoverageBadgeServiceProtocol],
|
|
37
|
+
command_builder: Inject[TestCommandBuilder],
|
|
38
|
+
lsp_client: Inject[LSPClient] | None = None,
|
|
20
39
|
) -> None:
|
|
21
40
|
self.console = console
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
41
|
+
# Ensure a concrete pathlib.Path instance to avoid async Path behaviors
|
|
42
|
+
# and to guarantee sync filesystem operations in this manager.
|
|
43
|
+
try:
|
|
44
|
+
self.pkg_path = Path(str(root_path))
|
|
45
|
+
except Exception:
|
|
46
|
+
# Fallback in the unlikely event root_path lacks __str__
|
|
47
|
+
self.pkg_path = Path(root_path)
|
|
26
48
|
|
|
27
|
-
|
|
28
|
-
|
|
49
|
+
# Ensure downstream components receive a concrete pathlib.Path
|
|
50
|
+
self.executor = TestExecutor(console, self.pkg_path)
|
|
51
|
+
self.command_builder = command_builder
|
|
29
52
|
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
else:
|
|
35
|
-
self.coverage_ratchet = coverage_ratchet
|
|
53
|
+
# Services injected via ACB DI
|
|
54
|
+
self.coverage_ratchet = coverage_ratchet
|
|
55
|
+
self._coverage_badge_service = coverage_badge
|
|
56
|
+
self._lsp_client = lsp_client
|
|
36
57
|
|
|
37
58
|
self._last_test_failures: list[str] = []
|
|
38
59
|
self._progress_callback: t.Callable[[dict[str, t.Any]], None] | None = None
|
|
39
60
|
self.coverage_ratchet_enabled = True
|
|
40
61
|
self.use_lsp_diagnostics = True
|
|
41
62
|
|
|
42
|
-
# Initialize coverage badge service
|
|
43
|
-
from crackerjack.services.coverage_badge_service import CoverageBadgeService
|
|
44
|
-
|
|
45
|
-
self._coverage_badge_service = CoverageBadgeService(console, pkg_path)
|
|
46
|
-
|
|
47
63
|
def set_progress_callback(
|
|
48
64
|
self,
|
|
49
65
|
callback: t.Callable[[dict[str, t.Any]], None] | None,
|
|
@@ -54,56 +70,79 @@ class TestManager:
|
|
|
54
70
|
self.coverage_ratchet_enabled = enabled
|
|
55
71
|
if enabled:
|
|
56
72
|
self.console.print(
|
|
57
|
-
"[cyan]📊[/
|
|
73
|
+
"[cyan]📊[/cyan] Coverage ratchet enabled-targeting 100 % coverage"
|
|
58
74
|
)
|
|
59
75
|
else:
|
|
60
|
-
self.console.print("[yellow]⚠️[/
|
|
76
|
+
self.console.print("[yellow]⚠️[/yellow] Coverage ratchet disabled")
|
|
61
77
|
|
|
62
78
|
def run_tests(self, options: OptionsProtocol) -> bool:
|
|
79
|
+
# Early return if tests are disabled
|
|
80
|
+
if hasattr(options, "test") and not options.test:
|
|
81
|
+
return True
|
|
82
|
+
|
|
63
83
|
start_time = time.time()
|
|
64
84
|
|
|
65
85
|
try:
|
|
66
86
|
result = self._execute_test_workflow(options)
|
|
67
87
|
duration = time.time() - start_time
|
|
68
88
|
|
|
69
|
-
|
|
70
|
-
|
|
89
|
+
# Get worker count for statistics panel (don't print info messages)
|
|
90
|
+
workers = self.command_builder.get_optimal_workers(
|
|
91
|
+
options, print_info=False
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
if result.returncode == 0:
|
|
95
|
+
return self._handle_test_success(
|
|
96
|
+
result.stdout, duration, options, workers
|
|
97
|
+
)
|
|
71
98
|
else:
|
|
72
99
|
return self._handle_test_failure(
|
|
73
|
-
result.stderr if result else "",
|
|
100
|
+
result.stderr if result else "",
|
|
101
|
+
result.stdout if result else "",
|
|
102
|
+
duration,
|
|
103
|
+
options,
|
|
104
|
+
workers,
|
|
74
105
|
)
|
|
75
106
|
|
|
76
107
|
except Exception as e:
|
|
77
108
|
return self._handle_test_error(start_time, e)
|
|
78
109
|
|
|
79
110
|
def run_specific_tests(self, test_pattern: str) -> bool:
|
|
80
|
-
self.console.print(f"[cyan]🧪[/
|
|
111
|
+
self.console.print(f"[cyan]🧪[/cyan] Running tests matching: {test_pattern}")
|
|
81
112
|
|
|
82
113
|
cmd = self.command_builder.build_specific_test_command(test_pattern)
|
|
83
114
|
result = self.executor.execute_with_progress(cmd)
|
|
84
115
|
|
|
85
116
|
success = result.returncode == 0
|
|
86
117
|
if success:
|
|
87
|
-
self.console.print("[green]✅[/
|
|
118
|
+
self.console.print("[green]✅[/green] Specific tests passed")
|
|
88
119
|
else:
|
|
89
|
-
self.console.print("[red]❌[/
|
|
120
|
+
self.console.print("[red]❌[/red] Some specific tests failed")
|
|
90
121
|
|
|
91
122
|
return success
|
|
92
123
|
|
|
93
124
|
def validate_test_environment(self) -> bool:
|
|
94
125
|
if not self.has_tests():
|
|
95
|
-
self.console.print("[yellow]⚠️[/
|
|
126
|
+
self.console.print("[yellow]⚠️[/yellow] No tests found")
|
|
96
127
|
return False
|
|
97
128
|
|
|
129
|
+
from rich.live import Live
|
|
130
|
+
from rich.spinner import Spinner
|
|
131
|
+
|
|
98
132
|
cmd = self.command_builder.build_validation_command()
|
|
99
|
-
|
|
133
|
+
|
|
134
|
+
spinner = Spinner("dots", text="[cyan]Validating test environment...[/cyan]")
|
|
135
|
+
with Live(spinner, console=self.console, transient=True):
|
|
136
|
+
result = subprocess.run(
|
|
137
|
+
cmd, cwd=self.pkg_path, capture_output=True, text=True
|
|
138
|
+
)
|
|
100
139
|
|
|
101
140
|
if result.returncode != 0:
|
|
102
|
-
self.console.print("[red]❌[/
|
|
141
|
+
self.console.print("[red]❌[/red] Test environment validation failed")
|
|
103
142
|
self.console.print(result.stderr)
|
|
104
143
|
return False
|
|
105
144
|
|
|
106
|
-
self.console.print("[green]✅[/
|
|
145
|
+
self.console.print("[green]✅[/green] Test environment validated")
|
|
107
146
|
return True
|
|
108
147
|
|
|
109
148
|
def get_coverage_ratchet_status(self) -> dict[str, t.Any]:
|
|
@@ -128,25 +167,101 @@ class TestManager:
|
|
|
128
167
|
except Exception:
|
|
129
168
|
return None
|
|
130
169
|
|
|
170
|
+
def _get_coverage_from_file(self) -> float | None:
|
|
171
|
+
"""Extract coverage from coverage.json file."""
|
|
172
|
+
import json
|
|
173
|
+
|
|
174
|
+
coverage_json_path = self.pkg_path / "coverage.json"
|
|
175
|
+
if not coverage_json_path.exists():
|
|
176
|
+
return None
|
|
177
|
+
|
|
178
|
+
try:
|
|
179
|
+
with coverage_json_path.open() as f:
|
|
180
|
+
coverage_data = json.load(f)
|
|
181
|
+
|
|
182
|
+
# Extract coverage percentage from totals
|
|
183
|
+
totals = coverage_data.get("totals", {})
|
|
184
|
+
percent_covered = totals.get("percent_covered", None)
|
|
185
|
+
|
|
186
|
+
if percent_covered is not None:
|
|
187
|
+
return float(percent_covered)
|
|
188
|
+
|
|
189
|
+
# Alternative extraction methods for different coverage formats
|
|
190
|
+
if "percent_covered" in coverage_data:
|
|
191
|
+
return float(coverage_data["percent_covered"])
|
|
192
|
+
|
|
193
|
+
# Check for coverage in files section
|
|
194
|
+
files = coverage_data.get("files", {})
|
|
195
|
+
if files:
|
|
196
|
+
total_lines = 0
|
|
197
|
+
covered_lines = 0
|
|
198
|
+
for file_data in files.values():
|
|
199
|
+
summary = file_data.get("summary", {})
|
|
200
|
+
total_lines += summary.get("num_statements", 0)
|
|
201
|
+
covered_lines += summary.get("covered_lines", 0)
|
|
202
|
+
|
|
203
|
+
if total_lines > 0:
|
|
204
|
+
return (covered_lines / total_lines) * 100
|
|
205
|
+
|
|
206
|
+
return None
|
|
207
|
+
|
|
208
|
+
except (json.JSONDecodeError, ValueError, KeyError, TypeError):
|
|
209
|
+
return None
|
|
210
|
+
|
|
211
|
+
def _handle_no_ratchet_status(
|
|
212
|
+
self, direct_coverage: float | None
|
|
213
|
+
) -> dict[str, t.Any]:
|
|
214
|
+
"""Handle case when ratchet is not initialized."""
|
|
215
|
+
if direct_coverage is not None:
|
|
216
|
+
return {
|
|
217
|
+
"status": "coverage_available",
|
|
218
|
+
"coverage_percent": direct_coverage,
|
|
219
|
+
"message": "Coverage data available from coverage.json",
|
|
220
|
+
"source": "coverage.json",
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
return {
|
|
224
|
+
"status": "not_initialized",
|
|
225
|
+
"coverage_percent": 0.0,
|
|
226
|
+
"message": "Coverage ratchet not initialized",
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
def _get_final_coverage(
|
|
230
|
+
self, ratchet_coverage: float, direct_coverage: float | None
|
|
231
|
+
) -> float:
|
|
232
|
+
"""Determine final coverage value."""
|
|
233
|
+
return direct_coverage if direct_coverage is not None else ratchet_coverage
|
|
234
|
+
|
|
131
235
|
def get_coverage(self) -> dict[str, t.Any]:
|
|
132
236
|
try:
|
|
133
237
|
status = self.coverage_ratchet.get_status_report()
|
|
134
238
|
|
|
135
|
-
if
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
239
|
+
# Check if we have actual coverage data from coverage.json even if ratchet is not initialized
|
|
240
|
+
direct_coverage = self._get_coverage_from_file()
|
|
241
|
+
|
|
242
|
+
# If ratchet is not initialized but we have direct coverage data, use it
|
|
243
|
+
if (
|
|
244
|
+
not status or status.get("status") == "not_initialized"
|
|
245
|
+
) and direct_coverage is not None:
|
|
246
|
+
return self._handle_no_ratchet_status(direct_coverage)
|
|
247
|
+
|
|
248
|
+
# If ratchet is not initialized and no direct coverage, return not initialized
|
|
249
|
+
if not status or status.get("status") == "not_initialized":
|
|
250
|
+
return self._handle_no_ratchet_status(None)
|
|
251
|
+
|
|
252
|
+
# Use ratchet data, but prefer direct coverage if available and different
|
|
253
|
+
ratchet_coverage = status.get("current_coverage", 0.0)
|
|
254
|
+
final_coverage = self._get_final_coverage(ratchet_coverage, direct_coverage)
|
|
141
255
|
|
|
142
256
|
return {
|
|
143
257
|
"status": "active",
|
|
144
|
-
"coverage_percent":
|
|
258
|
+
"coverage_percent": final_coverage,
|
|
145
259
|
"target_coverage": status.get("target_coverage", 100.0),
|
|
146
260
|
"next_milestone": status.get("next_milestone"),
|
|
147
261
|
"progress_percent": status.get("progress_percent", 0.0),
|
|
148
262
|
"last_updated": status.get("last_updated"),
|
|
149
263
|
"milestones_achieved": status.get("milestones_achieved", []),
|
|
264
|
+
"source": "coverage.json" if direct_coverage is not None else "ratchet",
|
|
150
265
|
}
|
|
151
266
|
except Exception as e:
|
|
152
267
|
return {
|
|
@@ -164,11 +279,11 @@ class TestManager:
|
|
|
164
279
|
test_path = self.pkg_path / test_dir
|
|
165
280
|
if test_path.exists() and test_path.is_dir():
|
|
166
281
|
for test_file_pattern in test_files:
|
|
167
|
-
if list
|
|
282
|
+
if list(test_path.glob(f"**/{test_file_pattern}")):
|
|
168
283
|
return True
|
|
169
284
|
|
|
170
285
|
for test_file_pattern in test_files:
|
|
171
|
-
if list
|
|
286
|
+
if list(self.pkg_path.glob(test_file_pattern)):
|
|
172
287
|
return True
|
|
173
288
|
|
|
174
289
|
return False
|
|
@@ -187,34 +302,389 @@ class TestManager:
|
|
|
187
302
|
return self.executor.execute_with_progress(cmd, self._get_timeout(options))
|
|
188
303
|
|
|
189
304
|
def _print_test_start_message(self, options: OptionsProtocol) -> None:
|
|
190
|
-
workers = self.command_builder.get_optimal_workers(options)
|
|
305
|
+
workers = self.command_builder.get_optimal_workers(options, print_info=False)
|
|
191
306
|
timeout = self.command_builder.get_test_timeout(options)
|
|
192
307
|
|
|
193
308
|
self.console.print(
|
|
194
|
-
f"[cyan]🧪[/
|
|
309
|
+
f"[cyan]🧪[/cyan] Running tests (workers: {workers}, timeout: {timeout}s)"
|
|
195
310
|
)
|
|
196
311
|
|
|
197
|
-
def _handle_test_success(
|
|
198
|
-
self
|
|
312
|
+
def _handle_test_success(
|
|
313
|
+
self,
|
|
314
|
+
output: str,
|
|
315
|
+
duration: float,
|
|
316
|
+
options: OptionsProtocol,
|
|
317
|
+
workers: int | str,
|
|
318
|
+
) -> bool:
|
|
319
|
+
self.console.print(f"[green]✅[/green] Tests passed in {duration: .1f}s")
|
|
320
|
+
|
|
321
|
+
# Parse and display test statistics panel
|
|
322
|
+
stats = self._parse_test_statistics(output)
|
|
323
|
+
if self._should_render_test_panel(stats):
|
|
324
|
+
self._render_test_results_panel(stats, workers, success=True)
|
|
199
325
|
|
|
200
326
|
if self.coverage_ratchet_enabled:
|
|
201
327
|
return self._process_coverage_ratchet()
|
|
202
328
|
|
|
203
329
|
return True
|
|
204
330
|
|
|
205
|
-
def _handle_test_failure(
|
|
206
|
-
self
|
|
331
|
+
def _handle_test_failure(
|
|
332
|
+
self,
|
|
333
|
+
stderr: str,
|
|
334
|
+
stdout: str,
|
|
335
|
+
duration: float,
|
|
336
|
+
options: OptionsProtocol,
|
|
337
|
+
workers: int | str,
|
|
338
|
+
) -> bool:
|
|
339
|
+
self.console.print(f"[red]❌[/red] Tests failed in {duration:.1f}s")
|
|
340
|
+
|
|
341
|
+
# Parse and display test statistics panel (use stdout for stats)
|
|
342
|
+
combined_output = stdout + "\n" + stderr
|
|
343
|
+
clean_output = self._strip_ansi_codes(combined_output)
|
|
344
|
+
stats = self._parse_test_statistics(clean_output, already_clean=True)
|
|
345
|
+
if self._should_render_test_panel(stats):
|
|
346
|
+
self._render_test_results_panel(stats, workers, success=False)
|
|
347
|
+
|
|
348
|
+
# Always show key failure information, not just in verbose mode
|
|
349
|
+
if clean_output.strip():
|
|
350
|
+
# Extract and show essential failure details even in non-verbose mode
|
|
351
|
+
failure_lines = self._extract_failure_lines(clean_output)
|
|
352
|
+
if failure_lines:
|
|
353
|
+
self._last_test_failures = failure_lines
|
|
354
|
+
self._render_banner("Key Test Failures", line_style="red")
|
|
355
|
+
|
|
356
|
+
for failure in failure_lines:
|
|
357
|
+
self.console.print(f"[red]• {failure}[/red]")
|
|
358
|
+
else:
|
|
359
|
+
self._last_test_failures = []
|
|
360
|
+
|
|
361
|
+
# Enhanced error reporting in verbose mode
|
|
362
|
+
if options.verbose or getattr(options, "ai_debug", False):
|
|
363
|
+
self._render_banner(
|
|
364
|
+
"Full Test Output (Enhanced)",
|
|
365
|
+
line_style="red",
|
|
366
|
+
)
|
|
367
|
+
# Use Rich-formatted output instead of raw dump
|
|
368
|
+
self._render_formatted_output(clean_output, options, already_clean=True)
|
|
369
|
+
else:
|
|
370
|
+
# Show some information even when there's no output
|
|
371
|
+
border_line = "-" * getattr(options, "column_width", 70)
|
|
372
|
+
self.console.print("\n🧪 TESTS Failed test execution")
|
|
373
|
+
self.console.print(border_line)
|
|
374
|
+
|
|
375
|
+
self.console.print(
|
|
376
|
+
" [yellow]This may indicate a timeout or critical error[/yellow]"
|
|
377
|
+
)
|
|
378
|
+
self.console.print(
|
|
379
|
+
f" [yellow]Duration: {duration:.1f}s, Workers: {workers}[/yellow]"
|
|
380
|
+
)
|
|
381
|
+
if duration > 290: # Approaching 300s timeout
|
|
382
|
+
self.console.print(
|
|
383
|
+
" [yellow]⚠️ Execution time was very close to timeout, may have timed out[/yellow]"
|
|
384
|
+
)
|
|
385
|
+
self.console.print(
|
|
386
|
+
" [red]Workflow failed: Test workflow execution failed[/red]"
|
|
387
|
+
)
|
|
388
|
+
self.console.print(border_line)
|
|
389
|
+
self._last_test_failures = []
|
|
207
390
|
|
|
208
|
-
self._last_test_failures = self._extract_failure_lines(output)
|
|
209
391
|
return False
|
|
210
392
|
|
|
211
393
|
def _handle_test_error(self, start_time: float, error: Exception) -> bool:
|
|
212
394
|
duration = time.time() - start_time
|
|
213
395
|
self.console.print(
|
|
214
|
-
f"[red]💥[/
|
|
396
|
+
f"[red]💥[/red] Test execution error after {duration: .1f}s: {error}"
|
|
215
397
|
)
|
|
216
398
|
return False
|
|
217
399
|
|
|
400
|
+
def _parse_test_statistics(
|
|
401
|
+
self, output: str, *, already_clean: bool = False
|
|
402
|
+
) -> dict[str, t.Any]:
|
|
403
|
+
"""Parse test statistics from pytest output.
|
|
404
|
+
|
|
405
|
+
Extracts metrics like passed, failed, skipped, errors, and duration
|
|
406
|
+
from pytest's summary line.
|
|
407
|
+
|
|
408
|
+
Args:
|
|
409
|
+
output: Raw pytest output text
|
|
410
|
+
|
|
411
|
+
Returns:
|
|
412
|
+
Dictionary containing test statistics
|
|
413
|
+
"""
|
|
414
|
+
clean_output = output if already_clean else self._strip_ansi_codes(output)
|
|
415
|
+
stats = {
|
|
416
|
+
"total": 0,
|
|
417
|
+
"passed": 0,
|
|
418
|
+
"failed": 0,
|
|
419
|
+
"skipped": 0,
|
|
420
|
+
"errors": 0,
|
|
421
|
+
"xfailed": 0,
|
|
422
|
+
"xpassed": 0,
|
|
423
|
+
"duration": 0.0,
|
|
424
|
+
"coverage": None,
|
|
425
|
+
}
|
|
426
|
+
|
|
427
|
+
try:
|
|
428
|
+
# Extract summary and duration
|
|
429
|
+
summary_match = self._extract_pytest_summary(clean_output)
|
|
430
|
+
if summary_match:
|
|
431
|
+
summary_text, duration = self._parse_summary_match(
|
|
432
|
+
summary_match, clean_output
|
|
433
|
+
)
|
|
434
|
+
stats["duration"] = duration
|
|
435
|
+
|
|
436
|
+
# Extract metrics from summary
|
|
437
|
+
self._extract_test_metrics(summary_text, stats)
|
|
438
|
+
|
|
439
|
+
# Calculate totals and fallback if summary missing
|
|
440
|
+
self._calculate_total_tests(stats, clean_output)
|
|
441
|
+
|
|
442
|
+
# Extract coverage if present
|
|
443
|
+
stats["coverage"] = self._extract_coverage_from_output(clean_output)
|
|
444
|
+
|
|
445
|
+
except (ValueError, AttributeError) as e:
|
|
446
|
+
self.console.print(f"[dim]⚠️ Failed to parse test statistics: {e}[/dim]")
|
|
447
|
+
|
|
448
|
+
return stats
|
|
449
|
+
|
|
450
|
+
def _extract_pytest_summary(self, output: str) -> re.Match[str] | None:
|
|
451
|
+
"""Extract pytest summary line match from output."""
|
|
452
|
+
summary_patterns = [
|
|
453
|
+
r"=+\s+(.+?)\s+in\s+([\d.]+)s?\s*=+", # "======= 5 passed in 1.23s ======="
|
|
454
|
+
r"(\d+\s+\w+)+\s+in\s+([\d.]+)s?", # "5 passed, 2 failed in 1.23s"
|
|
455
|
+
r"(\d+.*)in\s+([\d.]+)s?", # More flexible format
|
|
456
|
+
]
|
|
457
|
+
|
|
458
|
+
for pattern in summary_patterns:
|
|
459
|
+
match = re.search(pattern, output)
|
|
460
|
+
if match:
|
|
461
|
+
return match
|
|
462
|
+
return None
|
|
463
|
+
|
|
464
|
+
def _parse_summary_match(
|
|
465
|
+
self, match: re.Match[str], output: str
|
|
466
|
+
) -> tuple[str, float]:
|
|
467
|
+
"""Parse summary text and duration from regex match."""
|
|
468
|
+
if len(match.groups()) >= 2:
|
|
469
|
+
summary_text = match.group(1)
|
|
470
|
+
duration = float(match.group(2))
|
|
471
|
+
else:
|
|
472
|
+
# Pattern only captured duration
|
|
473
|
+
duration = (
|
|
474
|
+
float(match.group(1))
|
|
475
|
+
if match.group(1).replace(".", "").isdigit()
|
|
476
|
+
else 0.0
|
|
477
|
+
)
|
|
478
|
+
summary_text = output
|
|
479
|
+
|
|
480
|
+
return summary_text, duration
|
|
481
|
+
|
|
482
|
+
def _extract_test_metrics(self, summary_text: str, stats: dict[str, t.Any]) -> None:
|
|
483
|
+
"""Extract individual test metrics from summary text."""
|
|
484
|
+
for metric in ("passed", "failed", "skipped", "error", "xfailed", "xpassed"):
|
|
485
|
+
metric_pattern = rf"(\d+)\s+{metric}"
|
|
486
|
+
metric_match = re.search(metric_pattern, summary_text, re.IGNORECASE)
|
|
487
|
+
if metric_match:
|
|
488
|
+
count = int(metric_match.group(1))
|
|
489
|
+
key = "errors" if metric == "error" else metric
|
|
490
|
+
stats[key] = count
|
|
491
|
+
|
|
492
|
+
def _calculate_total_tests(self, stats: dict[str, t.Any], output: str) -> None:
|
|
493
|
+
"""Calculate total tests and apply fallback counting if needed."""
|
|
494
|
+
stats["total"] = sum(
|
|
495
|
+
[
|
|
496
|
+
stats["passed"],
|
|
497
|
+
stats["failed"],
|
|
498
|
+
stats["skipped"],
|
|
499
|
+
stats["errors"],
|
|
500
|
+
stats["xfailed"],
|
|
501
|
+
stats["xpassed"],
|
|
502
|
+
]
|
|
503
|
+
)
|
|
504
|
+
|
|
505
|
+
# Fallback: manually count from output if total is still 0
|
|
506
|
+
if stats["total"] == 0:
|
|
507
|
+
self._fallback_count_tests(output, stats)
|
|
508
|
+
|
|
509
|
+
def _fallback_count_tests(self, output: str, stats: dict[str, t.Any]) -> None:
|
|
510
|
+
"""Manually count test results from output when parsing fails."""
|
|
511
|
+
status_tokens = [
|
|
512
|
+
("passed", "PASSED"),
|
|
513
|
+
("failed", "FAILED"),
|
|
514
|
+
("skipped", "SKIPPED"),
|
|
515
|
+
("errors", "ERROR"),
|
|
516
|
+
("xfailed", "XFAIL"),
|
|
517
|
+
("xpassed", "XPASS"),
|
|
518
|
+
]
|
|
519
|
+
|
|
520
|
+
for raw_line in output.splitlines():
|
|
521
|
+
line = raw_line.strip()
|
|
522
|
+
if "::" not in line:
|
|
523
|
+
continue
|
|
524
|
+
|
|
525
|
+
line_upper = line.upper()
|
|
526
|
+
if line_upper.startswith(
|
|
527
|
+
("FAILED", "ERROR", "XPASS", "XFAIL", "SKIPPED", "PASSED")
|
|
528
|
+
):
|
|
529
|
+
continue
|
|
530
|
+
|
|
531
|
+
for key, token in status_tokens:
|
|
532
|
+
if token in line_upper:
|
|
533
|
+
stats[key] += 1
|
|
534
|
+
break
|
|
535
|
+
|
|
536
|
+
stats["total"] = sum(
|
|
537
|
+
[
|
|
538
|
+
stats["passed"],
|
|
539
|
+
stats["failed"],
|
|
540
|
+
stats["skipped"],
|
|
541
|
+
stats["errors"],
|
|
542
|
+
stats.get("xfailed", 0),
|
|
543
|
+
stats.get("xpassed", 0),
|
|
544
|
+
]
|
|
545
|
+
)
|
|
546
|
+
|
|
547
|
+
if stats["total"] == 0:
|
|
548
|
+
legacy_patterns = {
|
|
549
|
+
"passed": r"(?:\.|✓)\s*(?:PASSED|pass)",
|
|
550
|
+
"failed": r"(?:F|X|❌)\s*(?:FAILED|fail)",
|
|
551
|
+
"skipped": r"(?:s|S|.SKIPPED|skip)",
|
|
552
|
+
"errors": r"ERROR|E\s+",
|
|
553
|
+
}
|
|
554
|
+
for key, pattern in legacy_patterns.items():
|
|
555
|
+
stats[key] = len(re.findall(pattern, output, re.IGNORECASE))
|
|
556
|
+
|
|
557
|
+
stats["total"] = (
|
|
558
|
+
stats["passed"] + stats["failed"] + stats["skipped"] + stats["errors"]
|
|
559
|
+
)
|
|
560
|
+
|
|
561
|
+
def _extract_coverage_from_output(self, output: str) -> float | None:
|
|
562
|
+
"""Extract coverage percentage from pytest output."""
|
|
563
|
+
coverage_pattern = r"TOTAL\s+\d+\s+\d+\s+(\d+)%"
|
|
564
|
+
coverage_match = re.search(coverage_pattern, output)
|
|
565
|
+
if coverage_match:
|
|
566
|
+
return float(coverage_match.group(1))
|
|
567
|
+
return None
|
|
568
|
+
|
|
569
|
+
def _should_render_test_panel(self, stats: dict[str, t.Any]) -> bool:
|
|
570
|
+
"""Determine if the test results panel should be rendered."""
|
|
571
|
+
return any(
|
|
572
|
+
[
|
|
573
|
+
stats.get("total", 0) > 0,
|
|
574
|
+
stats.get("passed", 0) > 0,
|
|
575
|
+
stats.get("failed", 0) > 0,
|
|
576
|
+
stats.get("errors", 0) > 0,
|
|
577
|
+
stats.get("skipped", 0) > 0,
|
|
578
|
+
stats.get("xfailed", 0) > 0,
|
|
579
|
+
stats.get("xpassed", 0) > 0,
|
|
580
|
+
stats.get("duration", 0.0) > 0.0,
|
|
581
|
+
stats.get("coverage") is not None,
|
|
582
|
+
]
|
|
583
|
+
)
|
|
584
|
+
|
|
585
|
+
def _render_test_results_panel(
|
|
586
|
+
self,
|
|
587
|
+
stats: dict[str, t.Any],
|
|
588
|
+
workers: int | str,
|
|
589
|
+
success: bool,
|
|
590
|
+
) -> None:
|
|
591
|
+
"""Render test results panel with statistics similar to hook results.
|
|
592
|
+
|
|
593
|
+
Args:
|
|
594
|
+
stats: Dictionary of test statistics from _parse_test_statistics
|
|
595
|
+
workers: Number of workers used (or "auto")
|
|
596
|
+
success: Whether tests passed overall
|
|
597
|
+
"""
|
|
598
|
+
table = Table(box=box.SIMPLE, header_style="bold bright_white")
|
|
599
|
+
table.add_column("Metric", style="cyan", overflow="fold")
|
|
600
|
+
table.add_column("Count", justify="right", style="bright_white")
|
|
601
|
+
table.add_column("Percentage", justify="right", style="magenta")
|
|
602
|
+
|
|
603
|
+
total = stats["total"]
|
|
604
|
+
|
|
605
|
+
# Add rows for each metric
|
|
606
|
+
metrics = [
|
|
607
|
+
("✅ Passed", stats["passed"], "green"),
|
|
608
|
+
("❌ Failed", stats["failed"], "red"),
|
|
609
|
+
("⏭ Skipped", stats["skipped"], "yellow"),
|
|
610
|
+
("💥 Errors", stats["errors"], "red"),
|
|
611
|
+
]
|
|
612
|
+
|
|
613
|
+
# Only show xfailed/xpassed if they exist
|
|
614
|
+
if stats.get("xfailed", 0) > 0:
|
|
615
|
+
metrics.append(("⚠️ Expected Failures", stats["xfailed"], "yellow"))
|
|
616
|
+
if stats.get("xpassed", 0) > 0:
|
|
617
|
+
metrics.append(("✨ Unexpected Passes", stats["xpassed"], "green"))
|
|
618
|
+
|
|
619
|
+
for label, count, _ in metrics:
|
|
620
|
+
percentage = f"{(count / total * 100):.1f}%" if total > 0 else "0.0%"
|
|
621
|
+
table.add_row(label, str(count), percentage)
|
|
622
|
+
|
|
623
|
+
# Add separator and summary rows
|
|
624
|
+
table.add_row("─" * 20, "─" * 10, "─" * 15, style="dim")
|
|
625
|
+
table.add_row("📊 Total Tests", str(total), "100.0%", style="bold")
|
|
626
|
+
table.add_row(
|
|
627
|
+
"⏱ Duration",
|
|
628
|
+
f"{stats['duration']:.2f}s",
|
|
629
|
+
"",
|
|
630
|
+
style="bold magenta",
|
|
631
|
+
)
|
|
632
|
+
table.add_row(
|
|
633
|
+
"👥 Workers",
|
|
634
|
+
str(workers),
|
|
635
|
+
"",
|
|
636
|
+
style="bold cyan",
|
|
637
|
+
)
|
|
638
|
+
|
|
639
|
+
# Add coverage if available
|
|
640
|
+
if stats.get("coverage") is not None:
|
|
641
|
+
table.add_row(
|
|
642
|
+
"📈 Coverage",
|
|
643
|
+
f"{stats['coverage']:.1f}%",
|
|
644
|
+
"",
|
|
645
|
+
style="bold green",
|
|
646
|
+
)
|
|
647
|
+
|
|
648
|
+
# Create panel with appropriate styling
|
|
649
|
+
border_style = "green" if success else "red"
|
|
650
|
+
title_icon = "✅" if success else "❌"
|
|
651
|
+
title_text = "Test Results" if success else "Test Results (Failed)"
|
|
652
|
+
|
|
653
|
+
panel = Panel(
|
|
654
|
+
table,
|
|
655
|
+
title=f"[bold]{title_icon} {title_text}[/bold]",
|
|
656
|
+
border_style=border_style,
|
|
657
|
+
padding=(0, 1),
|
|
658
|
+
width=get_console_width(),
|
|
659
|
+
)
|
|
660
|
+
|
|
661
|
+
self.console.print(panel)
|
|
662
|
+
|
|
663
|
+
def _render_banner(
|
|
664
|
+
self,
|
|
665
|
+
title: str,
|
|
666
|
+
*,
|
|
667
|
+
line_style: str = "red",
|
|
668
|
+
title_style: str | None = None,
|
|
669
|
+
char: str = "━",
|
|
670
|
+
padding: bool = True,
|
|
671
|
+
) -> None:
|
|
672
|
+
"""Render a horizontal banner that respects configured console width."""
|
|
673
|
+
width = max(20, get_console_width())
|
|
674
|
+
line_text = Text(char * width, style=line_style)
|
|
675
|
+
resolved_title_style = title_style or ("bold " + line_style).strip()
|
|
676
|
+
title_text = Text(title, style=resolved_title_style)
|
|
677
|
+
|
|
678
|
+
if padding:
|
|
679
|
+
self.console.print()
|
|
680
|
+
|
|
681
|
+
self.console.print(line_text)
|
|
682
|
+
self.console.print(title_text)
|
|
683
|
+
self.console.print(line_text)
|
|
684
|
+
|
|
685
|
+
if padding:
|
|
686
|
+
self.console.print()
|
|
687
|
+
|
|
218
688
|
def _process_coverage_ratchet(self) -> bool:
|
|
219
689
|
if not self.coverage_ratchet_enabled:
|
|
220
690
|
return True
|
|
@@ -226,34 +696,116 @@ class TestManager:
|
|
|
226
696
|
|
|
227
697
|
return self._handle_ratchet_result(ratchet_result)
|
|
228
698
|
|
|
699
|
+
def _attempt_coverage_extraction(self) -> float | None:
|
|
700
|
+
"""Attempt to extract coverage from various sources."""
|
|
701
|
+
# Primary: Try to extract from coverage.json
|
|
702
|
+
current_coverage = self._get_coverage_from_file()
|
|
703
|
+
if current_coverage is not None:
|
|
704
|
+
return current_coverage
|
|
705
|
+
|
|
706
|
+
return None
|
|
707
|
+
|
|
708
|
+
def _handle_coverage_extraction_result(
|
|
709
|
+
self, current_coverage: float | None
|
|
710
|
+
) -> float | None:
|
|
711
|
+
"""Handle the result of coverage extraction attempts."""
|
|
712
|
+
if current_coverage is not None:
|
|
713
|
+
self.console.print(
|
|
714
|
+
f"[dim]📊 Coverage extracted from coverage.json: {current_coverage:.2f}%[/dim]"
|
|
715
|
+
)
|
|
716
|
+
return current_coverage
|
|
717
|
+
|
|
718
|
+
def _try_service_coverage(self) -> float | None:
|
|
719
|
+
"""Try coverage service fallback.
|
|
720
|
+
|
|
721
|
+
Returns:
|
|
722
|
+
Coverage value if available, None otherwise
|
|
723
|
+
"""
|
|
724
|
+
try:
|
|
725
|
+
current_coverage = self.coverage_ratchet.get_baseline_coverage()
|
|
726
|
+
if current_coverage is not None and current_coverage > 0:
|
|
727
|
+
self.console.print(
|
|
728
|
+
f"[dim]📊 Coverage from service fallback: {current_coverage:.2f}%[/dim]"
|
|
729
|
+
)
|
|
730
|
+
return current_coverage
|
|
731
|
+
return None
|
|
732
|
+
except (AttributeError, Exception):
|
|
733
|
+
# Service method doesn't exist or failed, skip
|
|
734
|
+
return None
|
|
735
|
+
|
|
736
|
+
def _handle_zero_coverage_fallback(self, current_coverage: float | None) -> None:
|
|
737
|
+
"""Handle 0.0% fallback case when coverage.json exists."""
|
|
738
|
+
coverage_json_path = self.pkg_path / "coverage.json"
|
|
739
|
+
if current_coverage is None and coverage_json_path.exists():
|
|
740
|
+
self.console.print(
|
|
741
|
+
"[yellow]⚠️[/yellow] Skipping 0.0% fallback when coverage.json exists"
|
|
742
|
+
)
|
|
743
|
+
|
|
744
|
+
def _get_fallback_coverage(
|
|
745
|
+
self, ratchet_result: dict[str, t.Any], current_coverage: float | None
|
|
746
|
+
) -> float | None:
|
|
747
|
+
"""Get coverage from fallback sources."""
|
|
748
|
+
# Secondary: Try ratchet result if coverage.json failed
|
|
749
|
+
if current_coverage is None and ratchet_result:
|
|
750
|
+
# Try to extract from ratchet result
|
|
751
|
+
if "current_coverage" in ratchet_result:
|
|
752
|
+
current_coverage = ratchet_result["current_coverage"]
|
|
753
|
+
if current_coverage is not None and current_coverage > 0:
|
|
754
|
+
self.console.print(
|
|
755
|
+
f"[dim]📊 Coverage from ratchet result: {current_coverage:.2f}%[/dim]"
|
|
756
|
+
)
|
|
757
|
+
|
|
758
|
+
# Tertiary: Try coverage service, but only accept non-zero values
|
|
759
|
+
if current_coverage is None:
|
|
760
|
+
current_coverage = self._try_service_coverage()
|
|
761
|
+
if current_coverage is None:
|
|
762
|
+
self._handle_zero_coverage_fallback(current_coverage)
|
|
763
|
+
|
|
764
|
+
return current_coverage
|
|
765
|
+
|
|
229
766
|
def _update_coverage_badge(self, ratchet_result: dict[str, t.Any]) -> None:
|
|
230
767
|
"""Update coverage badge in README.md if coverage changed."""
|
|
231
768
|
try:
|
|
232
|
-
#
|
|
233
|
-
import json
|
|
234
|
-
|
|
235
|
-
current_coverage = None
|
|
769
|
+
# Check if coverage files exist and inform user
|
|
236
770
|
coverage_json_path = self.pkg_path / "coverage.json"
|
|
771
|
+
ratchet_path = self.pkg_path / ".coverage-ratchet.json"
|
|
237
772
|
|
|
238
|
-
if coverage_json_path.exists():
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
773
|
+
if not coverage_json_path.exists():
|
|
774
|
+
self.console.print(
|
|
775
|
+
"[yellow]ℹ️[/yellow] Coverage file doesn't exist yet, will be created after test run"
|
|
776
|
+
)
|
|
777
|
+
if not ratchet_path.exists():
|
|
778
|
+
self.console.print(
|
|
779
|
+
"[yellow]ℹ️[/yellow] Coverage ratchet file doesn't exist yet, initializing..."
|
|
780
|
+
)
|
|
242
781
|
|
|
243
|
-
#
|
|
244
|
-
|
|
245
|
-
|
|
782
|
+
# Get current coverage directly from coverage.json to ensure freshest data
|
|
783
|
+
current_coverage = self._attempt_coverage_extraction()
|
|
784
|
+
current_coverage = self._handle_coverage_extraction_result(current_coverage)
|
|
246
785
|
|
|
247
|
-
#
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
786
|
+
# Get fallback coverage if needed
|
|
787
|
+
current_coverage = self._get_fallback_coverage(
|
|
788
|
+
ratchet_result, current_coverage
|
|
789
|
+
)
|
|
251
790
|
|
|
252
|
-
if
|
|
791
|
+
# Only update badge if we have valid coverage data
|
|
792
|
+
if current_coverage is not None and current_coverage >= 0:
|
|
253
793
|
if self._coverage_badge_service.should_update_badge(current_coverage):
|
|
254
794
|
self._coverage_badge_service.update_readme_coverage_badge(
|
|
255
795
|
current_coverage
|
|
256
796
|
)
|
|
797
|
+
self.console.print(
|
|
798
|
+
f"[green]✅[/green] Badge updated to {current_coverage:.2f}%"
|
|
799
|
+
)
|
|
800
|
+
else:
|
|
801
|
+
self.console.print(
|
|
802
|
+
f"[dim]📊 Badge unchanged (current: {current_coverage:.2f}%)[/dim]"
|
|
803
|
+
)
|
|
804
|
+
else:
|
|
805
|
+
self.console.print(
|
|
806
|
+
"[yellow]⚠️[/yellow] No valid coverage data found for badge update"
|
|
807
|
+
)
|
|
808
|
+
|
|
257
809
|
except Exception as e:
|
|
258
810
|
# Don't fail the test process if badge update fails
|
|
259
811
|
self.console.print(f"[yellow]⚠️[/yellow] Badge update failed: {e}")
|
|
@@ -265,12 +817,12 @@ class TestManager:
|
|
|
265
817
|
return True
|
|
266
818
|
else:
|
|
267
819
|
if "message" in ratchet_result:
|
|
268
|
-
self.console.print(f"[red]📉[/
|
|
820
|
+
self.console.print(f"[red]📉[/red] {ratchet_result['message']}")
|
|
269
821
|
else:
|
|
270
822
|
current = ratchet_result.get("current_coverage", 0)
|
|
271
823
|
previous = ratchet_result.get("previous_coverage", 0)
|
|
272
824
|
self.console.print(
|
|
273
|
-
f"[red]📉[/
|
|
825
|
+
f"[red]📉[/red] Coverage regression: "
|
|
274
826
|
f"{current: .2f}% < {previous: .2f}%"
|
|
275
827
|
)
|
|
276
828
|
return False
|
|
@@ -280,7 +832,7 @@ class TestManager:
|
|
|
280
832
|
current = ratchet_result.get("current_coverage", 0)
|
|
281
833
|
|
|
282
834
|
self.console.print(
|
|
283
|
-
f"[green]📈[/
|
|
835
|
+
f"[green]📈[/green] Coverage improved by {improvement: .2f}% "
|
|
284
836
|
f"to {current: .2f}%"
|
|
285
837
|
)
|
|
286
838
|
|
|
@@ -296,18 +848,473 @@ class TestManager:
|
|
|
296
848
|
|
|
297
849
|
return failures[:10]
|
|
298
850
|
|
|
851
|
+
@staticmethod
|
|
852
|
+
def _strip_ansi_codes(text: str) -> str:
|
|
853
|
+
"""Remove ANSI escape sequences from a string."""
|
|
854
|
+
return ANSI_ESCAPE_RE.sub("", text)
|
|
855
|
+
|
|
856
|
+
def _split_output_sections(self, output: str) -> list[tuple[str, str]]:
|
|
857
|
+
"""Split pytest output into logical sections for rendering.
|
|
858
|
+
|
|
859
|
+
Sections:
|
|
860
|
+
- header: Session start, test collection
|
|
861
|
+
- failure: Individual test failures with tracebacks
|
|
862
|
+
- summary: Short test summary info
|
|
863
|
+
- footer: Coverage, timing, final stats
|
|
864
|
+
|
|
865
|
+
Returns:
|
|
866
|
+
List of (section_type, section_content) tuples
|
|
867
|
+
"""
|
|
868
|
+
sections = []
|
|
869
|
+
lines = output.split("\n")
|
|
870
|
+
|
|
871
|
+
current_section: list[str] = []
|
|
872
|
+
current_type = "header"
|
|
873
|
+
|
|
874
|
+
for line in lines:
|
|
875
|
+
# Detect section boundaries
|
|
876
|
+
if "short test summary" in line.lower():
|
|
877
|
+
# Save previous section
|
|
878
|
+
if current_section:
|
|
879
|
+
sections.append((current_type, "\n".join(current_section)))
|
|
880
|
+
current_section = [line]
|
|
881
|
+
current_type = "summary"
|
|
882
|
+
|
|
883
|
+
elif " FAILED " in line or " ERROR " in line:
|
|
884
|
+
# Save previous section
|
|
885
|
+
if current_section and current_type != "failure":
|
|
886
|
+
sections.append((current_type, "\n".join(current_section)))
|
|
887
|
+
current_section = []
|
|
888
|
+
current_type = "failure"
|
|
889
|
+
current_section.append(line)
|
|
890
|
+
|
|
891
|
+
elif line.startswith("=") and ("passed" in line or "failed" in line):
|
|
892
|
+
# Footer section
|
|
893
|
+
if current_section:
|
|
894
|
+
sections.append((current_type, "\n".join(current_section)))
|
|
895
|
+
current_section = [line]
|
|
896
|
+
current_type = "footer"
|
|
897
|
+
|
|
898
|
+
else:
|
|
899
|
+
current_section.append(line)
|
|
900
|
+
|
|
901
|
+
# Add final section
|
|
902
|
+
if current_section:
|
|
903
|
+
sections.append((current_type, "\n".join(current_section)))
|
|
904
|
+
|
|
905
|
+
return sections
|
|
906
|
+
|
|
907
|
+
def _render_formatted_output(
|
|
908
|
+
self,
|
|
909
|
+
output: str,
|
|
910
|
+
options: OptionsProtocol,
|
|
911
|
+
*,
|
|
912
|
+
already_clean: bool = False,
|
|
913
|
+
) -> None:
|
|
914
|
+
"""Render test output with Rich formatting and sections.
|
|
915
|
+
|
|
916
|
+
Phase 2: Uses structured failure parser when available.
|
|
917
|
+
|
|
918
|
+
Args:
|
|
919
|
+
output: Raw pytest output text
|
|
920
|
+
options: Test options (for verbosity level)
|
|
921
|
+
"""
|
|
922
|
+
from rich.panel import Panel
|
|
923
|
+
|
|
924
|
+
clean_output = output if already_clean else self._strip_ansi_codes(output)
|
|
925
|
+
|
|
926
|
+
# Try structured parsing first (Phase 2)
|
|
927
|
+
try:
|
|
928
|
+
failures = self._extract_structured_failures(clean_output)
|
|
929
|
+
if failures:
|
|
930
|
+
self._render_banner(
|
|
931
|
+
"Detailed Failure Analysis",
|
|
932
|
+
line_style="red",
|
|
933
|
+
char="═",
|
|
934
|
+
)
|
|
935
|
+
|
|
936
|
+
self._render_structured_failure_panels(failures)
|
|
937
|
+
|
|
938
|
+
# Still show summary section
|
|
939
|
+
sections = self._split_output_sections(clean_output)
|
|
940
|
+
for section_type, section_content in sections:
|
|
941
|
+
if section_type == "summary":
|
|
942
|
+
panel = Panel(
|
|
943
|
+
section_content.strip(),
|
|
944
|
+
title="[bold yellow]📋 Test Summary[/bold yellow]",
|
|
945
|
+
border_style="yellow",
|
|
946
|
+
width=get_console_width(),
|
|
947
|
+
)
|
|
948
|
+
self.console.print(panel)
|
|
949
|
+
elif section_type == "footer":
|
|
950
|
+
self.console.print(
|
|
951
|
+
f"\n[cyan]{section_content.strip()}[/cyan]\n"
|
|
952
|
+
)
|
|
953
|
+
|
|
954
|
+
return
|
|
955
|
+
|
|
956
|
+
except Exception as e:
|
|
957
|
+
# Fallback to Phase 1 rendering if parsing fails
|
|
958
|
+
self.console.print(
|
|
959
|
+
f"[dim yellow]⚠️ Structured parsing failed: {e}[/dim yellow]"
|
|
960
|
+
)
|
|
961
|
+
self.console.print(
|
|
962
|
+
"[dim yellow]Falling back to standard formatting...[/dim yellow]\n"
|
|
963
|
+
)
|
|
964
|
+
|
|
965
|
+
# Fallback: Phase 1 section-based rendering
|
|
966
|
+
sections = self._split_output_sections(clean_output)
|
|
967
|
+
|
|
968
|
+
for section_type, section_content in sections:
|
|
969
|
+
if not section_content.strip():
|
|
970
|
+
continue
|
|
971
|
+
|
|
972
|
+
if section_type == "failure":
|
|
973
|
+
self._render_failure_section(section_content)
|
|
974
|
+
elif section_type == "summary":
|
|
975
|
+
panel = Panel(
|
|
976
|
+
section_content.strip(),
|
|
977
|
+
title="[bold yellow]📋 Test Summary[/bold yellow]",
|
|
978
|
+
border_style="yellow",
|
|
979
|
+
width=get_console_width(),
|
|
980
|
+
)
|
|
981
|
+
self.console.print(panel)
|
|
982
|
+
elif section_type == "footer":
|
|
983
|
+
self.console.print(f"\n[cyan]{section_content.strip()}[/cyan]\n")
|
|
984
|
+
else:
|
|
985
|
+
# Header and other sections (dimmed)
|
|
986
|
+
if options.verbose or getattr(options, "ai_debug", False):
|
|
987
|
+
self.console.print(f"[dim]{section_content}[/dim]")
|
|
988
|
+
|
|
989
|
+
def _render_failure_section(self, section_content: str) -> None:
|
|
990
|
+
"""Render a failure section with syntax highlighting.
|
|
991
|
+
|
|
992
|
+
Args:
|
|
993
|
+
section_content: Failure output text
|
|
994
|
+
"""
|
|
995
|
+
from rich.panel import Panel
|
|
996
|
+
from rich.syntax import Syntax
|
|
997
|
+
|
|
998
|
+
# Apply Python syntax highlighting to tracebacks
|
|
999
|
+
syntax = Syntax(
|
|
1000
|
+
section_content,
|
|
1001
|
+
"python",
|
|
1002
|
+
theme="monokai",
|
|
1003
|
+
line_numbers=False,
|
|
1004
|
+
word_wrap=True,
|
|
1005
|
+
background_color="default",
|
|
1006
|
+
)
|
|
1007
|
+
|
|
1008
|
+
panel = Panel(
|
|
1009
|
+
syntax,
|
|
1010
|
+
title="[bold red]❌ Test Failure[/bold red]",
|
|
1011
|
+
border_style="red",
|
|
1012
|
+
width=get_console_width(),
|
|
1013
|
+
)
|
|
1014
|
+
self.console.print(panel)
|
|
1015
|
+
|
|
1016
|
+
def _parse_failure_header(
|
|
1017
|
+
self, line: str, current_failure: "TestFailure | None"
|
|
1018
|
+
) -> tuple["TestFailure | None", bool]:
|
|
1019
|
+
"""Parse failure header line."""
|
|
1020
|
+
import re
|
|
1021
|
+
|
|
1022
|
+
from crackerjack.models.test_models import TestFailure
|
|
1023
|
+
|
|
1024
|
+
failure_match = re.match(r"^(.+?)\s+(FAILED|ERROR)\s*(?:\[(.+?)\])?", line)
|
|
1025
|
+
if failure_match:
|
|
1026
|
+
test_path, status, params = failure_match.groups()
|
|
1027
|
+
new_failure = TestFailure(
|
|
1028
|
+
test_name=test_path + (f"[{params}]" if params else ""),
|
|
1029
|
+
status=status,
|
|
1030
|
+
location=test_path,
|
|
1031
|
+
)
|
|
1032
|
+
return new_failure, True
|
|
1033
|
+
return current_failure, False
|
|
1034
|
+
|
|
1035
|
+
def _parse_location_and_assertion(
|
|
1036
|
+
self, line: str, current_failure: "TestFailure", in_traceback: bool
|
|
1037
|
+
) -> bool:
|
|
1038
|
+
"""Parse location and assertion lines."""
|
|
1039
|
+
import re
|
|
1040
|
+
|
|
1041
|
+
# Detect location: "tests/test_foo.py:42: AssertionError"
|
|
1042
|
+
location_match = re.match(r"^(.+?\.py):(\d+):\s*(.*)$", line)
|
|
1043
|
+
if location_match and in_traceback:
|
|
1044
|
+
file_path, line_num, error_type = location_match.groups()
|
|
1045
|
+
current_failure.location = f"{file_path}:{line_num}"
|
|
1046
|
+
if error_type:
|
|
1047
|
+
current_failure.short_summary = error_type
|
|
1048
|
+
return True
|
|
1049
|
+
|
|
1050
|
+
# Detect assertion errors
|
|
1051
|
+
if "AssertionError:" in line or line.strip().startswith("E assert "):
|
|
1052
|
+
assertion_text = line.strip().lstrip("E").strip()
|
|
1053
|
+
if current_failure.assertion:
|
|
1054
|
+
current_failure.assertion += "\n" + assertion_text
|
|
1055
|
+
else:
|
|
1056
|
+
current_failure.assertion = assertion_text
|
|
1057
|
+
return True
|
|
1058
|
+
|
|
1059
|
+
return False
|
|
1060
|
+
|
|
1061
|
+
def _parse_captured_section_header(self, line: str) -> tuple[bool, str | None]:
|
|
1062
|
+
"""Parse captured output section headers."""
|
|
1063
|
+
if "captured stdout" in line.lower():
|
|
1064
|
+
return True, "stdout"
|
|
1065
|
+
elif "captured stderr" in line.lower():
|
|
1066
|
+
return True, "stderr"
|
|
1067
|
+
return False, None
|
|
1068
|
+
|
|
1069
|
+
def _parse_traceback_line(
|
|
1070
|
+
self, line: str, lines: list[str], i: int, current_failure: "TestFailure"
|
|
1071
|
+
) -> bool:
|
|
1072
|
+
"""Parse traceback lines."""
|
|
1073
|
+
if line.startswith(" ") or line.startswith("\t") or line.startswith("E "):
|
|
1074
|
+
current_failure.traceback.append(line)
|
|
1075
|
+
return True
|
|
1076
|
+
elif line.strip().startswith("=") or (
|
|
1077
|
+
i < len(lines) - 1 and "FAILED" in lines[i + 1]
|
|
1078
|
+
):
|
|
1079
|
+
return False
|
|
1080
|
+
return True
|
|
1081
|
+
|
|
1082
|
+
def _parse_captured_output(
|
|
1083
|
+
self, line: str, capture_type: str | None, current_failure: "TestFailure"
|
|
1084
|
+
) -> bool:
|
|
1085
|
+
"""Parse captured output lines."""
|
|
1086
|
+
if line.strip().startswith("=") or line.strip().startswith("_"):
|
|
1087
|
+
return False
|
|
1088
|
+
|
|
1089
|
+
if capture_type == "stdout":
|
|
1090
|
+
if current_failure.captured_stdout:
|
|
1091
|
+
current_failure.captured_stdout += "\n" + line
|
|
1092
|
+
else:
|
|
1093
|
+
current_failure.captured_stdout = line
|
|
1094
|
+
elif capture_type == "stderr":
|
|
1095
|
+
if current_failure.captured_stderr:
|
|
1096
|
+
current_failure.captured_stderr += "\n" + line
|
|
1097
|
+
else:
|
|
1098
|
+
current_failure.captured_stderr = line
|
|
1099
|
+
return True
|
|
1100
|
+
|
|
1101
|
+
def _extract_structured_failures(self, output: str) -> list["TestFailure"]:
|
|
1102
|
+
"""Extract structured failure information from pytest output.
|
|
1103
|
+
|
|
1104
|
+
This parser handles pytest's standard output format and extracts:
|
|
1105
|
+
- Test names and locations
|
|
1106
|
+
- Full tracebacks
|
|
1107
|
+
- Assertion errors
|
|
1108
|
+
- Captured output (stdout/stderr)
|
|
1109
|
+
- Duration (if available)
|
|
1110
|
+
|
|
1111
|
+
Args:
|
|
1112
|
+
output: Raw pytest output text
|
|
1113
|
+
|
|
1114
|
+
Returns:
|
|
1115
|
+
List of TestFailure objects
|
|
1116
|
+
"""
|
|
1117
|
+
failures = []
|
|
1118
|
+
lines = output.split("\n")
|
|
1119
|
+
|
|
1120
|
+
current_failure = None
|
|
1121
|
+
in_traceback = False
|
|
1122
|
+
in_captured = False
|
|
1123
|
+
capture_type = None
|
|
1124
|
+
|
|
1125
|
+
for i, line in enumerate(lines):
|
|
1126
|
+
# Parse failure header
|
|
1127
|
+
new_failure, header_found = self._parse_failure_header(
|
|
1128
|
+
line, current_failure
|
|
1129
|
+
)
|
|
1130
|
+
if header_found:
|
|
1131
|
+
if current_failure:
|
|
1132
|
+
failures.append(current_failure)
|
|
1133
|
+
current_failure = new_failure
|
|
1134
|
+
in_traceback = True
|
|
1135
|
+
in_captured = False
|
|
1136
|
+
continue
|
|
1137
|
+
|
|
1138
|
+
if not current_failure:
|
|
1139
|
+
continue
|
|
1140
|
+
|
|
1141
|
+
# Parse location and assertion
|
|
1142
|
+
if self._parse_location_and_assertion(line, current_failure, in_traceback):
|
|
1143
|
+
continue
|
|
1144
|
+
|
|
1145
|
+
# Parse captured section headers
|
|
1146
|
+
is_captured, new_capture_type = self._parse_captured_section_header(line)
|
|
1147
|
+
if is_captured:
|
|
1148
|
+
in_captured = True
|
|
1149
|
+
capture_type = new_capture_type
|
|
1150
|
+
in_traceback = False
|
|
1151
|
+
continue
|
|
1152
|
+
|
|
1153
|
+
# Parse traceback lines
|
|
1154
|
+
if in_traceback:
|
|
1155
|
+
in_traceback = self._parse_traceback_line(
|
|
1156
|
+
line, lines, i, current_failure
|
|
1157
|
+
)
|
|
1158
|
+
|
|
1159
|
+
# Parse captured output
|
|
1160
|
+
if in_captured and capture_type:
|
|
1161
|
+
in_captured = self._parse_captured_output(
|
|
1162
|
+
line, capture_type, current_failure
|
|
1163
|
+
)
|
|
1164
|
+
if not in_captured:
|
|
1165
|
+
capture_type = None
|
|
1166
|
+
|
|
1167
|
+
# Save final failure
|
|
1168
|
+
if current_failure:
|
|
1169
|
+
failures.append(current_failure)
|
|
1170
|
+
|
|
1171
|
+
return failures
|
|
1172
|
+
|
|
1173
|
+
def _render_structured_failure_panels(self, failures: list["TestFailure"]) -> None:
|
|
1174
|
+
"""Render failures as Rich panels with tables and syntax highlighting.
|
|
1175
|
+
|
|
1176
|
+
Each failure is rendered in a panel containing:
|
|
1177
|
+
- Summary table (test name, location, status)
|
|
1178
|
+
- Assertion details (if present)
|
|
1179
|
+
- Syntax-highlighted traceback
|
|
1180
|
+
- Captured output (if any)
|
|
1181
|
+
|
|
1182
|
+
Args:
|
|
1183
|
+
failures: List of TestFailure objects
|
|
1184
|
+
"""
|
|
1185
|
+
from rich import box
|
|
1186
|
+
from rich.console import Group
|
|
1187
|
+
from rich.panel import Panel
|
|
1188
|
+
from rich.syntax import Syntax
|
|
1189
|
+
from rich.table import Table
|
|
1190
|
+
|
|
1191
|
+
if not failures:
|
|
1192
|
+
return
|
|
1193
|
+
|
|
1194
|
+
# Group failures by file for better organization
|
|
1195
|
+
failures_by_file: dict[str, list[TestFailure]] = {}
|
|
1196
|
+
for failure in failures:
|
|
1197
|
+
file_path = failure.get_file_path()
|
|
1198
|
+
if file_path not in failures_by_file:
|
|
1199
|
+
failures_by_file[file_path] = []
|
|
1200
|
+
failures_by_file[file_path].append(failure)
|
|
1201
|
+
|
|
1202
|
+
# Render each file group
|
|
1203
|
+
for file_path, file_failures in failures_by_file.items():
|
|
1204
|
+
self.console.print(
|
|
1205
|
+
f"\n[bold red]📁 {file_path}[/bold red] ({len(file_failures)} failure(s))\n"
|
|
1206
|
+
)
|
|
1207
|
+
|
|
1208
|
+
for i, failure in enumerate(file_failures, 1):
|
|
1209
|
+
# Create details table
|
|
1210
|
+
table = Table(
|
|
1211
|
+
show_header=False,
|
|
1212
|
+
box=box.SIMPLE,
|
|
1213
|
+
padding=(0, 1),
|
|
1214
|
+
border_style="red",
|
|
1215
|
+
)
|
|
1216
|
+
table.add_column("Key", style="cyan bold", width=12)
|
|
1217
|
+
table.add_column("Value", overflow="fold")
|
|
1218
|
+
|
|
1219
|
+
# Add rows
|
|
1220
|
+
table.add_row("Test", f"[yellow]{failure.test_name}[/yellow]")
|
|
1221
|
+
table.add_row(
|
|
1222
|
+
"Location", f"[blue underline]{failure.location}[/blue underline]"
|
|
1223
|
+
)
|
|
1224
|
+
table.add_row("Status", f"[red bold]{failure.status}[/red bold]")
|
|
1225
|
+
|
|
1226
|
+
if failure.duration:
|
|
1227
|
+
table.add_row("Duration", f"{failure.duration:.3f}s")
|
|
1228
|
+
|
|
1229
|
+
# Add summary timing insight if available
|
|
1230
|
+
duration_note = self._get_duration_note(failure)
|
|
1231
|
+
if duration_note:
|
|
1232
|
+
table.add_row("Timing", duration_note)
|
|
1233
|
+
|
|
1234
|
+
# Components for panel (mixed list of renderables for Rich Group)
|
|
1235
|
+
components: list[t.Any] = [table]
|
|
1236
|
+
|
|
1237
|
+
# Add assertion details
|
|
1238
|
+
if failure.assertion:
|
|
1239
|
+
components.append("") # Spacer
|
|
1240
|
+
components.append("[bold red]Assertion Error:[/bold red]")
|
|
1241
|
+
|
|
1242
|
+
# Syntax highlight the assertion
|
|
1243
|
+
assertion_syntax = Syntax(
|
|
1244
|
+
failure.assertion,
|
|
1245
|
+
"python",
|
|
1246
|
+
theme="monokai",
|
|
1247
|
+
line_numbers=False,
|
|
1248
|
+
background_color="default",
|
|
1249
|
+
)
|
|
1250
|
+
components.append(assertion_syntax)
|
|
1251
|
+
|
|
1252
|
+
# Add relevant traceback (last 15 lines)
|
|
1253
|
+
relevant_traceback = failure.get_relevant_traceback(max_lines=15)
|
|
1254
|
+
if relevant_traceback:
|
|
1255
|
+
components.append("") # Spacer
|
|
1256
|
+
components.append("[bold red]Traceback:[/bold red]")
|
|
1257
|
+
|
|
1258
|
+
traceback_text = "\n".join(relevant_traceback)
|
|
1259
|
+
traceback_syntax = Syntax(
|
|
1260
|
+
traceback_text,
|
|
1261
|
+
"python",
|
|
1262
|
+
theme="monokai",
|
|
1263
|
+
line_numbers=False,
|
|
1264
|
+
word_wrap=True,
|
|
1265
|
+
background_color="default",
|
|
1266
|
+
)
|
|
1267
|
+
components.append(traceback_syntax)
|
|
1268
|
+
|
|
1269
|
+
# Add captured output if present
|
|
1270
|
+
if failure.captured_stdout:
|
|
1271
|
+
components.append("") # Spacer
|
|
1272
|
+
components.append("[bold yellow]Captured stdout:[/bold yellow]")
|
|
1273
|
+
components.append(f"[dim]{failure.captured_stdout}[/dim]")
|
|
1274
|
+
|
|
1275
|
+
if failure.captured_stderr:
|
|
1276
|
+
components.append("") # Spacer
|
|
1277
|
+
components.append("[bold yellow]Captured stderr:[/bold yellow]")
|
|
1278
|
+
components.append(f"[dim]{failure.captured_stderr}[/dim]")
|
|
1279
|
+
|
|
1280
|
+
# Create grouped content
|
|
1281
|
+
group = Group(*components)
|
|
1282
|
+
|
|
1283
|
+
# Render panel
|
|
1284
|
+
panel = Panel(
|
|
1285
|
+
group,
|
|
1286
|
+
title=f"[bold red]❌ Failure {i}/{len(file_failures)}[/bold red]",
|
|
1287
|
+
border_style="red",
|
|
1288
|
+
width=get_console_width(),
|
|
1289
|
+
padding=(1, 2),
|
|
1290
|
+
)
|
|
1291
|
+
|
|
1292
|
+
self.console.print(panel)
|
|
1293
|
+
|
|
1294
|
+
def _get_duration_note(self, failure: "TestFailure") -> str | None:
|
|
1295
|
+
"""Return a duration note highlighting long-running failures."""
|
|
1296
|
+
if not failure.duration:
|
|
1297
|
+
return None
|
|
1298
|
+
|
|
1299
|
+
if failure.duration > 5:
|
|
1300
|
+
return (
|
|
1301
|
+
f"[bold red]{failure.duration:.2f}s – investigate slow test[/bold red]"
|
|
1302
|
+
)
|
|
1303
|
+
if failure.duration > 2:
|
|
1304
|
+
return f"[yellow]{failure.duration:.2f}s – moderately slow[/yellow]"
|
|
1305
|
+
return None
|
|
1306
|
+
|
|
299
1307
|
def _get_timeout(self, options: OptionsProtocol) -> int:
|
|
300
1308
|
return self.command_builder.get_test_timeout(options)
|
|
301
1309
|
|
|
302
1310
|
async def run_pre_test_lsp_diagnostics(self) -> bool:
|
|
303
1311
|
"""Run LSP diagnostics before tests to catch type errors early."""
|
|
304
|
-
if not self.use_lsp_diagnostics:
|
|
1312
|
+
if not self.use_lsp_diagnostics or self._lsp_client is None:
|
|
305
1313
|
return True
|
|
306
1314
|
|
|
307
1315
|
try:
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
lsp_client = LSPClient(self.console)
|
|
1316
|
+
# Use injected LSP client (already instantiated)
|
|
1317
|
+
lsp_client = self._lsp_client
|
|
311
1318
|
|
|
312
1319
|
# Check if LSP server is available
|
|
313
1320
|
if not lsp_client.is_server_running():
|