crackerjack 0.37.9__py3-none-any.whl → 0.45.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- crackerjack/README.md +19 -0
- crackerjack/__init__.py +30 -1
- crackerjack/__main__.py +342 -1263
- crackerjack/adapters/README.md +18 -0
- crackerjack/adapters/__init__.py +27 -5
- crackerjack/adapters/_output_paths.py +167 -0
- crackerjack/adapters/_qa_adapter_base.py +309 -0
- crackerjack/adapters/_tool_adapter_base.py +706 -0
- crackerjack/adapters/ai/README.md +65 -0
- crackerjack/adapters/ai/__init__.py +5 -0
- crackerjack/adapters/ai/claude.py +853 -0
- crackerjack/adapters/complexity/README.md +53 -0
- crackerjack/adapters/complexity/__init__.py +10 -0
- crackerjack/adapters/complexity/complexipy.py +641 -0
- crackerjack/adapters/dependency/__init__.py +22 -0
- crackerjack/adapters/dependency/pip_audit.py +418 -0
- crackerjack/adapters/format/README.md +72 -0
- crackerjack/adapters/format/__init__.py +11 -0
- crackerjack/adapters/format/mdformat.py +313 -0
- crackerjack/adapters/format/ruff.py +516 -0
- crackerjack/adapters/lint/README.md +47 -0
- crackerjack/adapters/lint/__init__.py +11 -0
- crackerjack/adapters/lint/codespell.py +273 -0
- crackerjack/adapters/lsp/README.md +49 -0
- crackerjack/adapters/lsp/__init__.py +27 -0
- crackerjack/adapters/{rust_tool_manager.py → lsp/_manager.py} +3 -3
- crackerjack/adapters/{skylos_adapter.py → lsp/skylos.py} +59 -7
- crackerjack/adapters/{zuban_adapter.py → lsp/zuban.py} +3 -6
- crackerjack/adapters/refactor/README.md +59 -0
- crackerjack/adapters/refactor/__init__.py +12 -0
- crackerjack/adapters/refactor/creosote.py +318 -0
- crackerjack/adapters/refactor/refurb.py +406 -0
- crackerjack/adapters/refactor/skylos.py +494 -0
- crackerjack/adapters/sast/README.md +132 -0
- crackerjack/adapters/sast/__init__.py +32 -0
- crackerjack/adapters/sast/_base.py +201 -0
- crackerjack/adapters/sast/bandit.py +423 -0
- crackerjack/adapters/sast/pyscn.py +405 -0
- crackerjack/adapters/sast/semgrep.py +241 -0
- crackerjack/adapters/security/README.md +111 -0
- crackerjack/adapters/security/__init__.py +17 -0
- crackerjack/adapters/security/gitleaks.py +339 -0
- crackerjack/adapters/type/README.md +52 -0
- crackerjack/adapters/type/__init__.py +12 -0
- crackerjack/adapters/type/pyrefly.py +402 -0
- crackerjack/adapters/type/ty.py +402 -0
- crackerjack/adapters/type/zuban.py +522 -0
- crackerjack/adapters/utility/README.md +51 -0
- crackerjack/adapters/utility/__init__.py +10 -0
- crackerjack/adapters/utility/checks.py +884 -0
- crackerjack/agents/README.md +264 -0
- crackerjack/agents/__init__.py +40 -12
- crackerjack/agents/base.py +1 -0
- crackerjack/agents/claude_code_bridge.py +641 -0
- crackerjack/agents/coordinator.py +49 -53
- crackerjack/agents/dry_agent.py +187 -3
- crackerjack/agents/enhanced_coordinator.py +279 -0
- crackerjack/agents/enhanced_proactive_agent.py +185 -0
- crackerjack/agents/error_middleware.py +53 -0
- crackerjack/agents/formatting_agent.py +6 -8
- crackerjack/agents/helpers/__init__.py +9 -0
- crackerjack/agents/helpers/performance/__init__.py +22 -0
- crackerjack/agents/helpers/performance/performance_ast_analyzer.py +357 -0
- crackerjack/agents/helpers/performance/performance_pattern_detector.py +909 -0
- crackerjack/agents/helpers/performance/performance_recommender.py +572 -0
- crackerjack/agents/helpers/refactoring/__init__.py +22 -0
- crackerjack/agents/helpers/refactoring/code_transformer.py +536 -0
- crackerjack/agents/helpers/refactoring/complexity_analyzer.py +344 -0
- crackerjack/agents/helpers/refactoring/dead_code_detector.py +437 -0
- crackerjack/agents/helpers/test_creation/__init__.py +19 -0
- crackerjack/agents/helpers/test_creation/test_ast_analyzer.py +216 -0
- crackerjack/agents/helpers/test_creation/test_coverage_analyzer.py +643 -0
- crackerjack/agents/helpers/test_creation/test_template_generator.py +1031 -0
- crackerjack/agents/performance_agent.py +121 -1152
- crackerjack/agents/refactoring_agent.py +156 -655
- crackerjack/agents/semantic_agent.py +479 -0
- crackerjack/agents/semantic_helpers.py +356 -0
- crackerjack/agents/test_creation_agent.py +19 -1605
- crackerjack/api.py +5 -7
- crackerjack/cli/README.md +394 -0
- crackerjack/cli/__init__.py +1 -1
- crackerjack/cli/cache_handlers.py +23 -18
- crackerjack/cli/cache_handlers_enhanced.py +1 -4
- crackerjack/cli/facade.py +70 -8
- crackerjack/cli/formatting.py +13 -0
- crackerjack/cli/handlers/__init__.py +85 -0
- crackerjack/cli/handlers/advanced.py +103 -0
- crackerjack/cli/handlers/ai_features.py +62 -0
- crackerjack/cli/handlers/analytics.py +479 -0
- crackerjack/cli/handlers/changelog.py +271 -0
- crackerjack/cli/handlers/config_handlers.py +16 -0
- crackerjack/cli/handlers/coverage.py +84 -0
- crackerjack/cli/handlers/documentation.py +280 -0
- crackerjack/cli/handlers/main_handlers.py +497 -0
- crackerjack/cli/handlers/monitoring.py +371 -0
- crackerjack/cli/handlers.py +249 -49
- crackerjack/cli/interactive.py +8 -5
- crackerjack/cli/options.py +203 -110
- crackerjack/cli/semantic_handlers.py +292 -0
- crackerjack/cli/version.py +19 -0
- crackerjack/code_cleaner.py +60 -24
- crackerjack/config/README.md +472 -0
- crackerjack/config/__init__.py +256 -0
- crackerjack/config/global_lock_config.py +191 -54
- crackerjack/config/hooks.py +188 -16
- crackerjack/config/loader.py +239 -0
- crackerjack/config/settings.py +141 -0
- crackerjack/config/tool_commands.py +331 -0
- crackerjack/core/README.md +393 -0
- crackerjack/core/async_workflow_orchestrator.py +79 -53
- crackerjack/core/autofix_coordinator.py +22 -9
- crackerjack/core/container.py +10 -9
- crackerjack/core/enhanced_container.py +9 -9
- crackerjack/core/performance.py +1 -1
- crackerjack/core/performance_monitor.py +5 -3
- crackerjack/core/phase_coordinator.py +1018 -634
- crackerjack/core/proactive_workflow.py +3 -3
- crackerjack/core/retry.py +275 -0
- crackerjack/core/service_watchdog.py +167 -23
- crackerjack/core/session_coordinator.py +187 -382
- crackerjack/core/timeout_manager.py +161 -44
- crackerjack/core/workflow/__init__.py +21 -0
- crackerjack/core/workflow/workflow_ai_coordinator.py +863 -0
- crackerjack/core/workflow/workflow_event_orchestrator.py +1107 -0
- crackerjack/core/workflow/workflow_issue_parser.py +714 -0
- crackerjack/core/workflow/workflow_phase_executor.py +1158 -0
- crackerjack/core/workflow/workflow_security_gates.py +400 -0
- crackerjack/core/workflow_orchestrator.py +1247 -953
- crackerjack/data/README.md +11 -0
- crackerjack/data/__init__.py +8 -0
- crackerjack/data/models.py +79 -0
- crackerjack/data/repository.py +210 -0
- crackerjack/decorators/README.md +180 -0
- crackerjack/decorators/__init__.py +35 -0
- crackerjack/decorators/error_handling.py +649 -0
- crackerjack/decorators/error_handling_decorators.py +334 -0
- crackerjack/decorators/helpers.py +58 -0
- crackerjack/decorators/patterns.py +281 -0
- crackerjack/decorators/utils.py +58 -0
- crackerjack/docs/README.md +11 -0
- crackerjack/docs/generated/api/CLI_REFERENCE.md +1 -1
- crackerjack/documentation/README.md +11 -0
- crackerjack/documentation/ai_templates.py +1 -1
- crackerjack/documentation/dual_output_generator.py +11 -9
- crackerjack/documentation/reference_generator.py +104 -59
- crackerjack/dynamic_config.py +52 -61
- crackerjack/errors.py +1 -1
- crackerjack/events/README.md +11 -0
- crackerjack/events/__init__.py +16 -0
- crackerjack/events/telemetry.py +175 -0
- crackerjack/events/workflow_bus.py +346 -0
- crackerjack/exceptions/README.md +301 -0
- crackerjack/exceptions/__init__.py +5 -0
- crackerjack/exceptions/config.py +4 -0
- crackerjack/exceptions/tool_execution_error.py +245 -0
- crackerjack/executors/README.md +591 -0
- crackerjack/executors/__init__.py +2 -0
- crackerjack/executors/async_hook_executor.py +539 -77
- crackerjack/executors/cached_hook_executor.py +3 -3
- crackerjack/executors/hook_executor.py +967 -102
- crackerjack/executors/hook_lock_manager.py +31 -22
- crackerjack/executors/individual_hook_executor.py +66 -32
- crackerjack/executors/lsp_aware_hook_executor.py +136 -57
- crackerjack/executors/progress_hook_executor.py +282 -0
- crackerjack/executors/tool_proxy.py +23 -7
- crackerjack/hooks/README.md +485 -0
- crackerjack/hooks/lsp_hook.py +8 -9
- crackerjack/intelligence/README.md +557 -0
- crackerjack/interactive.py +37 -10
- crackerjack/managers/README.md +369 -0
- crackerjack/managers/async_hook_manager.py +41 -57
- crackerjack/managers/hook_manager.py +449 -79
- crackerjack/managers/publish_manager.py +81 -36
- crackerjack/managers/test_command_builder.py +290 -12
- crackerjack/managers/test_executor.py +93 -8
- crackerjack/managers/test_manager.py +1082 -75
- crackerjack/managers/test_progress.py +118 -26
- crackerjack/mcp/README.md +374 -0
- crackerjack/mcp/cache.py +25 -2
- crackerjack/mcp/client_runner.py +35 -18
- crackerjack/mcp/context.py +9 -9
- crackerjack/mcp/dashboard.py +24 -8
- crackerjack/mcp/enhanced_progress_monitor.py +34 -23
- crackerjack/mcp/file_monitor.py +27 -6
- crackerjack/mcp/progress_components.py +45 -34
- crackerjack/mcp/progress_monitor.py +6 -9
- crackerjack/mcp/rate_limiter.py +11 -7
- crackerjack/mcp/server.py +2 -0
- crackerjack/mcp/server_core.py +187 -55
- crackerjack/mcp/service_watchdog.py +12 -9
- crackerjack/mcp/task_manager.py +2 -2
- crackerjack/mcp/tools/README.md +27 -0
- crackerjack/mcp/tools/__init__.py +2 -0
- crackerjack/mcp/tools/core_tools.py +75 -52
- crackerjack/mcp/tools/execution_tools.py +87 -31
- crackerjack/mcp/tools/intelligence_tools.py +2 -2
- crackerjack/mcp/tools/proactive_tools.py +1 -1
- crackerjack/mcp/tools/semantic_tools.py +584 -0
- crackerjack/mcp/tools/utility_tools.py +180 -132
- crackerjack/mcp/tools/workflow_executor.py +87 -46
- crackerjack/mcp/websocket/README.md +31 -0
- crackerjack/mcp/websocket/app.py +11 -1
- crackerjack/mcp/websocket/event_bridge.py +188 -0
- crackerjack/mcp/websocket/jobs.py +27 -4
- crackerjack/mcp/websocket/monitoring/__init__.py +25 -0
- crackerjack/mcp/websocket/monitoring/api/__init__.py +19 -0
- crackerjack/mcp/websocket/monitoring/api/dependencies.py +141 -0
- crackerjack/mcp/websocket/monitoring/api/heatmap.py +154 -0
- crackerjack/mcp/websocket/monitoring/api/intelligence.py +199 -0
- crackerjack/mcp/websocket/monitoring/api/metrics.py +203 -0
- crackerjack/mcp/websocket/monitoring/api/telemetry.py +101 -0
- crackerjack/mcp/websocket/monitoring/dashboard.py +18 -0
- crackerjack/mcp/websocket/monitoring/factory.py +109 -0
- crackerjack/mcp/websocket/monitoring/filters.py +10 -0
- crackerjack/mcp/websocket/monitoring/metrics.py +64 -0
- crackerjack/mcp/websocket/monitoring/models.py +90 -0
- crackerjack/mcp/websocket/monitoring/utils.py +171 -0
- crackerjack/mcp/websocket/monitoring/websocket_manager.py +78 -0
- crackerjack/mcp/websocket/monitoring/websockets/__init__.py +17 -0
- crackerjack/mcp/websocket/monitoring/websockets/dependencies.py +126 -0
- crackerjack/mcp/websocket/monitoring/websockets/heatmap.py +176 -0
- crackerjack/mcp/websocket/monitoring/websockets/intelligence.py +291 -0
- crackerjack/mcp/websocket/monitoring/websockets/metrics.py +291 -0
- crackerjack/mcp/websocket/monitoring_endpoints.py +16 -2930
- crackerjack/mcp/websocket/server.py +1 -3
- crackerjack/mcp/websocket/websocket_handler.py +107 -6
- crackerjack/models/README.md +308 -0
- crackerjack/models/__init__.py +10 -1
- crackerjack/models/config.py +639 -22
- crackerjack/models/config_adapter.py +6 -6
- crackerjack/models/protocols.py +1167 -23
- crackerjack/models/pydantic_models.py +320 -0
- crackerjack/models/qa_config.py +145 -0
- crackerjack/models/qa_results.py +134 -0
- crackerjack/models/results.py +35 -0
- crackerjack/models/semantic_models.py +258 -0
- crackerjack/models/task.py +19 -3
- crackerjack/models/test_models.py +60 -0
- crackerjack/monitoring/README.md +11 -0
- crackerjack/monitoring/ai_agent_watchdog.py +5 -4
- crackerjack/monitoring/metrics_collector.py +4 -3
- crackerjack/monitoring/regression_prevention.py +4 -3
- crackerjack/monitoring/websocket_server.py +4 -241
- crackerjack/orchestration/README.md +340 -0
- crackerjack/orchestration/__init__.py +43 -0
- crackerjack/orchestration/advanced_orchestrator.py +20 -67
- crackerjack/orchestration/cache/README.md +312 -0
- crackerjack/orchestration/cache/__init__.py +37 -0
- crackerjack/orchestration/cache/memory_cache.py +338 -0
- crackerjack/orchestration/cache/tool_proxy_cache.py +340 -0
- crackerjack/orchestration/config.py +297 -0
- crackerjack/orchestration/coverage_improvement.py +13 -6
- crackerjack/orchestration/execution_strategies.py +6 -6
- crackerjack/orchestration/hook_orchestrator.py +1398 -0
- crackerjack/orchestration/strategies/README.md +401 -0
- crackerjack/orchestration/strategies/__init__.py +39 -0
- crackerjack/orchestration/strategies/adaptive_strategy.py +630 -0
- crackerjack/orchestration/strategies/parallel_strategy.py +237 -0
- crackerjack/orchestration/strategies/sequential_strategy.py +299 -0
- crackerjack/orchestration/test_progress_streamer.py +1 -1
- crackerjack/plugins/README.md +11 -0
- crackerjack/plugins/hooks.py +3 -2
- crackerjack/plugins/loader.py +3 -3
- crackerjack/plugins/managers.py +1 -1
- crackerjack/py313.py +191 -0
- crackerjack/security/README.md +11 -0
- crackerjack/services/README.md +374 -0
- crackerjack/services/__init__.py +8 -21
- crackerjack/services/ai/README.md +295 -0
- crackerjack/services/ai/__init__.py +7 -0
- crackerjack/services/ai/advanced_optimizer.py +878 -0
- crackerjack/services/{contextual_ai_assistant.py → ai/contextual_ai_assistant.py} +5 -3
- crackerjack/services/ai/embeddings.py +444 -0
- crackerjack/services/ai/intelligent_commit.py +328 -0
- crackerjack/services/ai/predictive_analytics.py +510 -0
- crackerjack/services/api_extractor.py +5 -3
- crackerjack/services/bounded_status_operations.py +45 -5
- crackerjack/services/cache.py +249 -318
- crackerjack/services/changelog_automation.py +7 -3
- crackerjack/services/command_execution_service.py +305 -0
- crackerjack/services/config_integrity.py +83 -39
- crackerjack/services/config_merge.py +9 -6
- crackerjack/services/config_service.py +198 -0
- crackerjack/services/config_template.py +13 -26
- crackerjack/services/coverage_badge_service.py +6 -4
- crackerjack/services/coverage_ratchet.py +53 -27
- crackerjack/services/debug.py +18 -7
- crackerjack/services/dependency_analyzer.py +4 -4
- crackerjack/services/dependency_monitor.py +13 -13
- crackerjack/services/documentation_generator.py +4 -2
- crackerjack/services/documentation_service.py +62 -33
- crackerjack/services/enhanced_filesystem.py +81 -27
- crackerjack/services/enterprise_optimizer.py +1 -1
- crackerjack/services/error_pattern_analyzer.py +10 -10
- crackerjack/services/file_filter.py +221 -0
- crackerjack/services/file_hasher.py +5 -7
- crackerjack/services/file_io_service.py +361 -0
- crackerjack/services/file_modifier.py +615 -0
- crackerjack/services/filesystem.py +80 -109
- crackerjack/services/git.py +99 -5
- crackerjack/services/health_metrics.py +4 -6
- crackerjack/services/heatmap_generator.py +12 -3
- crackerjack/services/incremental_executor.py +380 -0
- crackerjack/services/initialization.py +101 -49
- crackerjack/services/log_manager.py +2 -2
- crackerjack/services/logging.py +120 -68
- crackerjack/services/lsp_client.py +12 -12
- crackerjack/services/memory_optimizer.py +27 -22
- crackerjack/services/monitoring/README.md +30 -0
- crackerjack/services/monitoring/__init__.py +9 -0
- crackerjack/services/monitoring/dependency_monitor.py +678 -0
- crackerjack/services/monitoring/error_pattern_analyzer.py +676 -0
- crackerjack/services/monitoring/health_metrics.py +716 -0
- crackerjack/services/monitoring/metrics.py +587 -0
- crackerjack/services/{performance_benchmarks.py → monitoring/performance_benchmarks.py} +100 -14
- crackerjack/services/{performance_cache.py → monitoring/performance_cache.py} +21 -15
- crackerjack/services/{performance_monitor.py → monitoring/performance_monitor.py} +10 -6
- crackerjack/services/parallel_executor.py +166 -55
- crackerjack/services/patterns/__init__.py +142 -0
- crackerjack/services/patterns/agents.py +107 -0
- crackerjack/services/patterns/code/__init__.py +15 -0
- crackerjack/services/patterns/code/detection.py +118 -0
- crackerjack/services/patterns/code/imports.py +107 -0
- crackerjack/services/patterns/code/paths.py +159 -0
- crackerjack/services/patterns/code/performance.py +119 -0
- crackerjack/services/patterns/code/replacement.py +36 -0
- crackerjack/services/patterns/core.py +212 -0
- crackerjack/services/patterns/documentation/__init__.py +14 -0
- crackerjack/services/patterns/documentation/badges_markdown.py +96 -0
- crackerjack/services/patterns/documentation/comments_blocks.py +83 -0
- crackerjack/services/patterns/documentation/docstrings.py +89 -0
- crackerjack/services/patterns/formatting.py +226 -0
- crackerjack/services/patterns/operations.py +339 -0
- crackerjack/services/patterns/security/__init__.py +23 -0
- crackerjack/services/patterns/security/code_injection.py +122 -0
- crackerjack/services/patterns/security/credentials.py +190 -0
- crackerjack/services/patterns/security/path_traversal.py +221 -0
- crackerjack/services/patterns/security/unsafe_operations.py +216 -0
- crackerjack/services/patterns/templates.py +62 -0
- crackerjack/services/patterns/testing/__init__.py +18 -0
- crackerjack/services/patterns/testing/error_patterns.py +107 -0
- crackerjack/services/patterns/testing/pytest_output.py +126 -0
- crackerjack/services/patterns/tool_output/__init__.py +16 -0
- crackerjack/services/patterns/tool_output/bandit.py +72 -0
- crackerjack/services/patterns/tool_output/other.py +97 -0
- crackerjack/services/patterns/tool_output/pyright.py +67 -0
- crackerjack/services/patterns/tool_output/ruff.py +44 -0
- crackerjack/services/patterns/url_sanitization.py +114 -0
- crackerjack/services/patterns/utilities.py +42 -0
- crackerjack/services/patterns/utils.py +339 -0
- crackerjack/services/patterns/validation.py +46 -0
- crackerjack/services/patterns/versioning.py +62 -0
- crackerjack/services/predictive_analytics.py +21 -8
- crackerjack/services/profiler.py +280 -0
- crackerjack/services/quality/README.md +415 -0
- crackerjack/services/quality/__init__.py +11 -0
- crackerjack/services/quality/anomaly_detector.py +392 -0
- crackerjack/services/quality/pattern_cache.py +333 -0
- crackerjack/services/quality/pattern_detector.py +479 -0
- crackerjack/services/quality/qa_orchestrator.py +491 -0
- crackerjack/services/{quality_baseline.py → quality/quality_baseline.py} +163 -2
- crackerjack/services/{quality_baseline_enhanced.py → quality/quality_baseline_enhanced.py} +4 -1
- crackerjack/services/{quality_intelligence.py → quality/quality_intelligence.py} +180 -16
- crackerjack/services/regex_patterns.py +58 -2987
- crackerjack/services/regex_utils.py +55 -29
- crackerjack/services/secure_status_formatter.py +42 -15
- crackerjack/services/secure_subprocess.py +35 -2
- crackerjack/services/security.py +16 -8
- crackerjack/services/server_manager.py +40 -51
- crackerjack/services/smart_scheduling.py +46 -6
- crackerjack/services/status_authentication.py +3 -3
- crackerjack/services/thread_safe_status_collector.py +1 -0
- crackerjack/services/tool_filter.py +368 -0
- crackerjack/services/tool_version_service.py +9 -5
- crackerjack/services/unified_config.py +43 -351
- crackerjack/services/vector_store.py +689 -0
- crackerjack/services/version_analyzer.py +6 -4
- crackerjack/services/version_checker.py +14 -8
- crackerjack/services/zuban_lsp_service.py +5 -4
- crackerjack/slash_commands/README.md +11 -0
- crackerjack/slash_commands/init.md +2 -12
- crackerjack/slash_commands/run.md +84 -50
- crackerjack/tools/README.md +11 -0
- crackerjack/tools/__init__.py +30 -0
- crackerjack/tools/_git_utils.py +105 -0
- crackerjack/tools/check_added_large_files.py +139 -0
- crackerjack/tools/check_ast.py +105 -0
- crackerjack/tools/check_json.py +103 -0
- crackerjack/tools/check_jsonschema.py +297 -0
- crackerjack/tools/check_toml.py +103 -0
- crackerjack/tools/check_yaml.py +110 -0
- crackerjack/tools/codespell_wrapper.py +72 -0
- crackerjack/tools/end_of_file_fixer.py +202 -0
- crackerjack/tools/format_json.py +128 -0
- crackerjack/tools/mdformat_wrapper.py +114 -0
- crackerjack/tools/trailing_whitespace.py +198 -0
- crackerjack/tools/validate_regex_patterns.py +7 -3
- crackerjack/ui/README.md +11 -0
- crackerjack/ui/dashboard_renderer.py +28 -0
- crackerjack/ui/templates/README.md +11 -0
- crackerjack/utils/console_utils.py +13 -0
- crackerjack/utils/dependency_guard.py +230 -0
- crackerjack/utils/retry_utils.py +275 -0
- crackerjack/workflows/README.md +590 -0
- crackerjack/workflows/__init__.py +46 -0
- crackerjack/workflows/actions.py +811 -0
- crackerjack/workflows/auto_fix.py +444 -0
- crackerjack/workflows/container_builder.py +499 -0
- crackerjack/workflows/definitions.py +443 -0
- crackerjack/workflows/engine.py +177 -0
- crackerjack/workflows/event_bridge.py +242 -0
- {crackerjack-0.37.9.dist-info → crackerjack-0.45.2.dist-info}/METADATA +678 -98
- crackerjack-0.45.2.dist-info/RECORD +478 -0
- {crackerjack-0.37.9.dist-info → crackerjack-0.45.2.dist-info}/WHEEL +1 -1
- crackerjack/managers/test_manager_backup.py +0 -1075
- crackerjack/mcp/tools/execution_tools_backup.py +0 -1011
- crackerjack/mixins/__init__.py +0 -3
- crackerjack/mixins/error_handling.py +0 -145
- crackerjack/services/config.py +0 -358
- crackerjack/ui/server_panels.py +0 -125
- crackerjack-0.37.9.dist-info/RECORD +0 -231
- /crackerjack/adapters/{rust_tool_adapter.py → lsp/_base.py} +0 -0
- /crackerjack/adapters/{lsp_client.py → lsp/_client.py} +0 -0
- {crackerjack-0.37.9.dist-info → crackerjack-0.45.2.dist-info}/entry_points.txt +0 -0
- {crackerjack-0.37.9.dist-info → crackerjack-0.45.2.dist-info}/licenses/LICENSE +0 -0
crackerjack/__main__.py
CHANGED
|
@@ -1,16 +1,145 @@
|
|
|
1
|
+
"""Crackerjack - Opinionated Python project management tool.
|
|
2
|
+
|
|
3
|
+
Early initialization: Configure logging before ACB imports to suppress startup messages.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import logging
|
|
7
|
+
import sys
|
|
1
8
|
import typing as t
|
|
2
|
-
|
|
9
|
+
import warnings
|
|
10
|
+
from contextlib import suppress
|
|
11
|
+
|
|
12
|
+
# CRITICAL: Suppress ACB logger startup messages BEFORE any ACB imports
|
|
13
|
+
# ACB's logger initializes at import time and emits "Application started" messages.
|
|
14
|
+
# Configure Python's logging module early to intercept these messages.
|
|
15
|
+
_EARLY_DEBUG_MODE = any(
|
|
16
|
+
arg in ("--debug", "-d", "--ai-debug") or arg.startswith("--debug=")
|
|
17
|
+
for arg in sys.argv[1:]
|
|
18
|
+
)
|
|
19
|
+
_EARLY_VERBOSE_MODE = any(
|
|
20
|
+
arg in ("--verbose", "-v") or arg.startswith("--verbose=") for arg in sys.argv[1:]
|
|
21
|
+
)
|
|
22
|
+
# Check if help is requested to suppress ACB startup messages completely
|
|
23
|
+
_EARLY_HELP_MODE = any(
|
|
24
|
+
arg in ("--help", "-h") or arg.startswith("--help=") for arg in sys.argv[1:]
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def _configure_structlog_for_level(log_level: int) -> None:
|
|
29
|
+
"""Configure structlog with appropriate filtering for the given log level."""
|
|
30
|
+
with suppress(ImportError):
|
|
31
|
+
import structlog
|
|
32
|
+
|
|
33
|
+
if log_level == logging.DEBUG:
|
|
34
|
+
# In debug mode, show all messages
|
|
35
|
+
structlog.configure(
|
|
36
|
+
processors=[
|
|
37
|
+
structlog.stdlib.add_log_level,
|
|
38
|
+
structlog.stdlib.PositionalArgumentsFormatter(),
|
|
39
|
+
structlog.processors.TimeStamper(fmt="iso"),
|
|
40
|
+
structlog.processors.StackInfoRenderer(),
|
|
41
|
+
structlog.processors.format_exc_info,
|
|
42
|
+
structlog.processors.UnicodeDecoder(),
|
|
43
|
+
structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
|
|
44
|
+
],
|
|
45
|
+
logger_factory=structlog.stdlib.LoggerFactory(),
|
|
46
|
+
wrapper_class=structlog.stdlib.BoundLogger,
|
|
47
|
+
cache_logger_on_first_use=True,
|
|
48
|
+
)
|
|
49
|
+
elif log_level == logging.ERROR:
|
|
50
|
+
# In verbose mode, show only ERROR and above
|
|
51
|
+
structlog.configure(
|
|
52
|
+
processors=[
|
|
53
|
+
structlog.stdlib.add_log_level,
|
|
54
|
+
structlog.processors.TimeStamper(fmt="iso"),
|
|
55
|
+
structlog.processors.UnicodeDecoder(),
|
|
56
|
+
structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
|
|
57
|
+
],
|
|
58
|
+
logger_factory=structlog.stdlib.LoggerFactory(),
|
|
59
|
+
wrapper_class=structlog.make_filtering_bound_logger(logging.ERROR),
|
|
60
|
+
cache_logger_on_first_use=True,
|
|
61
|
+
)
|
|
62
|
+
else: # CRITICAL level or other suppressive levels
|
|
63
|
+
# In normal mode, suppress all output
|
|
64
|
+
def dummy_processor(logger, method_name, event_dict):
|
|
65
|
+
"""Dummy processor that returns the event dict unchanged without additional processing."""
|
|
66
|
+
return event_dict
|
|
67
|
+
|
|
68
|
+
structlog.configure(
|
|
69
|
+
processors=[dummy_processor],
|
|
70
|
+
logger_factory=structlog.testing.CapturingLoggerFactory(),
|
|
71
|
+
wrapper_class=structlog.make_filtering_bound_logger(logging.CRITICAL),
|
|
72
|
+
cache_logger_on_first_use=True,
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
if not _EARLY_DEBUG_MODE:
|
|
77
|
+
if _EARLY_HELP_MODE:
|
|
78
|
+
# In help mode, completely suppress ACB startup logging for clean UX
|
|
79
|
+
acb_logger = logging.getLogger("acb")
|
|
80
|
+
acb_logger.setLevel(logging.CRITICAL)
|
|
81
|
+
acb_logger.propagate = False
|
|
82
|
+
|
|
83
|
+
# Also suppress subloggers like acb.adapters.logger, acb.workflows, etc.
|
|
84
|
+
logging.getLogger("acb.adapters").setLevel(logging.CRITICAL)
|
|
85
|
+
logging.getLogger("acb.workflows").setLevel(logging.CRITICAL)
|
|
86
|
+
logging.getLogger("acb.console").setLevel(logging.CRITICAL)
|
|
87
|
+
logging.getLogger("crackerjack.core").setLevel(logging.CRITICAL)
|
|
88
|
+
# Specifically target the loggers that were appearing in the output
|
|
89
|
+
logging.getLogger("acb.adapters.logger").setLevel(logging.CRITICAL)
|
|
90
|
+
logging.getLogger("acb.workflows.engine").setLevel(logging.CRITICAL)
|
|
91
|
+
|
|
92
|
+
# Configure structlog to suppress output in help mode
|
|
93
|
+
_configure_structlog_for_level(logging.CRITICAL)
|
|
94
|
+
elif not _EARLY_VERBOSE_MODE:
|
|
95
|
+
# In non-debug and non-verbose mode, suppress ACB startup logging for clean default UX
|
|
96
|
+
acb_logger = logging.getLogger("acb")
|
|
97
|
+
acb_logger.setLevel(logging.CRITICAL)
|
|
98
|
+
acb_logger.propagate = False
|
|
99
|
+
|
|
100
|
+
# Also suppress subloggers like acb.adapters.logger, acb.workflows, etc.
|
|
101
|
+
logging.getLogger("acb.adapters").setLevel(logging.CRITICAL)
|
|
102
|
+
logging.getLogger("acb.workflows").setLevel(logging.CRITICAL)
|
|
103
|
+
logging.getLogger("acb.console").setLevel(logging.CRITICAL)
|
|
104
|
+
logging.getLogger("crackerjack.core").setLevel(logging.CRITICAL)
|
|
105
|
+
# Specifically target the loggers that were appearing in the output
|
|
106
|
+
logging.getLogger("acb.adapters.logger").setLevel(logging.CRITICAL)
|
|
107
|
+
logging.getLogger("acb.workflows.engine").setLevel(logging.CRITICAL)
|
|
108
|
+
|
|
109
|
+
# Configure structlog to suppress output in normal mode
|
|
110
|
+
_configure_structlog_for_level(logging.CRITICAL)
|
|
111
|
+
else:
|
|
112
|
+
# In verbose mode but not debug, set to ERROR level to reduce noise but still show important errors
|
|
113
|
+
logging.getLogger("acb").setLevel(logging.ERROR)
|
|
114
|
+
logging.getLogger("crackerjack.core").setLevel(logging.ERROR)
|
|
115
|
+
# Specifically target the loggers that were appearing in the output
|
|
116
|
+
logging.getLogger("acb.adapters.logger").setLevel(logging.ERROR)
|
|
117
|
+
logging.getLogger("acb.workflows.engine").setLevel(logging.ERROR)
|
|
118
|
+
|
|
119
|
+
# Configure structlog to show only ERROR and above in verbose mode
|
|
120
|
+
_configure_structlog_for_level(logging.ERROR)
|
|
3
121
|
|
|
122
|
+
# NOW safe to import ACB-dependent modules
|
|
4
123
|
import typer
|
|
5
|
-
from
|
|
124
|
+
from acb.console import Console
|
|
125
|
+
from acb.depends import Inject, depends
|
|
126
|
+
|
|
127
|
+
# Suppress asyncio subprocess cleanup warnings when event loop closes
|
|
128
|
+
# This is a known Python issue - the subprocesses are properly cleaned up,
|
|
129
|
+
# but the warning appears when the event loop closes with pending subprocess handlers
|
|
130
|
+
warnings.filterwarnings(
|
|
131
|
+
"ignore",
|
|
132
|
+
message=".*loop.*closed.*",
|
|
133
|
+
category=RuntimeWarning,
|
|
134
|
+
)
|
|
6
135
|
|
|
7
136
|
if t.TYPE_CHECKING:
|
|
8
|
-
|
|
137
|
+
pass
|
|
9
138
|
|
|
10
|
-
from crackerjack.services.git import GitService
|
|
11
139
|
|
|
12
140
|
from .cli import (
|
|
13
141
|
CLI_OPTIONS,
|
|
142
|
+
BumpOption,
|
|
14
143
|
create_options,
|
|
15
144
|
handle_interactive_mode,
|
|
16
145
|
handle_standard_mode,
|
|
@@ -19,1198 +148,42 @@ from .cli import (
|
|
|
19
148
|
from .cli.cache_handlers import _handle_cache_commands
|
|
20
149
|
from .cli.handlers import (
|
|
21
150
|
handle_config_updates,
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
151
|
+
)
|
|
152
|
+
from .cli.handlers.advanced import (
|
|
153
|
+
handle_advanced_optimizer,
|
|
154
|
+
)
|
|
155
|
+
from .cli.handlers.ai_features import handle_contextual_ai
|
|
156
|
+
from .cli.handlers.analytics import (
|
|
157
|
+
handle_anomaly_detection,
|
|
158
|
+
handle_heatmap_generation,
|
|
159
|
+
handle_predictive_analytics,
|
|
160
|
+
)
|
|
161
|
+
from .cli.handlers.changelog import (
|
|
162
|
+
handle_changelog_commands,
|
|
163
|
+
handle_version_analysis,
|
|
164
|
+
setup_debug_and_verbose_flags,
|
|
165
|
+
)
|
|
166
|
+
from .cli.handlers.coverage import (
|
|
167
|
+
handle_coverage_status,
|
|
168
|
+
)
|
|
169
|
+
from .cli.handlers.documentation import (
|
|
170
|
+
handle_documentation_commands,
|
|
171
|
+
handle_mkdocs_integration,
|
|
172
|
+
)
|
|
173
|
+
from .cli.handlers.monitoring import handle_server_commands
|
|
174
|
+
from .cli.semantic_handlers import (
|
|
175
|
+
handle_remove_from_semantic_index,
|
|
176
|
+
handle_semantic_index,
|
|
177
|
+
handle_semantic_search,
|
|
178
|
+
handle_semantic_stats,
|
|
35
179
|
)
|
|
36
180
|
|
|
37
|
-
console = Console(
|
|
181
|
+
console = Console()
|
|
38
182
|
app = typer.Typer(
|
|
39
183
|
help="Crackerjack: Your Python project setup and style enforcement tool.",
|
|
40
184
|
)
|
|
41
185
|
|
|
42
186
|
|
|
43
|
-
def _handle_monitoring_commands(
|
|
44
|
-
monitor: bool,
|
|
45
|
-
enhanced_monitor: bool,
|
|
46
|
-
dashboard: bool,
|
|
47
|
-
unified_dashboard: bool,
|
|
48
|
-
unified_dashboard_port: int | None,
|
|
49
|
-
watchdog: bool,
|
|
50
|
-
dev: bool,
|
|
51
|
-
) -> bool:
|
|
52
|
-
if monitor:
|
|
53
|
-
handle_monitor_mode(dev_mode=dev)
|
|
54
|
-
return True
|
|
55
|
-
if enhanced_monitor:
|
|
56
|
-
handle_enhanced_monitor_mode(dev_mode=dev)
|
|
57
|
-
return True
|
|
58
|
-
if dashboard:
|
|
59
|
-
handle_dashboard_mode(dev_mode=dev)
|
|
60
|
-
return True
|
|
61
|
-
if unified_dashboard:
|
|
62
|
-
from .cli.handlers import handle_unified_dashboard_mode
|
|
63
|
-
|
|
64
|
-
port = unified_dashboard_port or 8675
|
|
65
|
-
handle_unified_dashboard_mode(port=port, dev_mode=dev)
|
|
66
|
-
return True
|
|
67
|
-
if watchdog:
|
|
68
|
-
handle_watchdog_mode()
|
|
69
|
-
return True
|
|
70
|
-
return False
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
def _handle_websocket_commands(
|
|
74
|
-
start_websocket_server: bool,
|
|
75
|
-
stop_websocket_server: bool,
|
|
76
|
-
restart_websocket_server: bool,
|
|
77
|
-
websocket_port: int | None,
|
|
78
|
-
) -> bool:
|
|
79
|
-
if start_websocket_server:
|
|
80
|
-
port = websocket_port or 8675
|
|
81
|
-
handle_start_websocket_server(port)
|
|
82
|
-
return True
|
|
83
|
-
if stop_websocket_server:
|
|
84
|
-
handle_stop_websocket_server()
|
|
85
|
-
return True
|
|
86
|
-
if restart_websocket_server:
|
|
87
|
-
port = websocket_port or 8675
|
|
88
|
-
handle_restart_websocket_server(port)
|
|
89
|
-
return True
|
|
90
|
-
return False
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
def _handle_mcp_commands(
|
|
94
|
-
start_mcp_server: bool,
|
|
95
|
-
stop_mcp_server: bool,
|
|
96
|
-
restart_mcp_server: bool,
|
|
97
|
-
websocket_port: int | None,
|
|
98
|
-
) -> bool:
|
|
99
|
-
if start_mcp_server:
|
|
100
|
-
handle_mcp_server(websocket_port)
|
|
101
|
-
return True
|
|
102
|
-
if stop_mcp_server:
|
|
103
|
-
handle_stop_mcp_server()
|
|
104
|
-
return True
|
|
105
|
-
if restart_mcp_server:
|
|
106
|
-
handle_restart_mcp_server(websocket_port)
|
|
107
|
-
return True
|
|
108
|
-
return False
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
def _handle_zuban_lsp_commands(
|
|
112
|
-
start_zuban_lsp: bool,
|
|
113
|
-
stop_zuban_lsp: bool,
|
|
114
|
-
restart_zuban_lsp: bool,
|
|
115
|
-
zuban_lsp_port: int,
|
|
116
|
-
zuban_lsp_mode: str,
|
|
117
|
-
) -> bool:
|
|
118
|
-
if start_zuban_lsp:
|
|
119
|
-
handle_start_zuban_lsp(port=zuban_lsp_port, mode=zuban_lsp_mode)
|
|
120
|
-
return True
|
|
121
|
-
if stop_zuban_lsp:
|
|
122
|
-
handle_stop_zuban_lsp()
|
|
123
|
-
return True
|
|
124
|
-
if restart_zuban_lsp:
|
|
125
|
-
handle_restart_zuban_lsp(port=zuban_lsp_port, mode=zuban_lsp_mode)
|
|
126
|
-
return True
|
|
127
|
-
return False
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
def _handle_server_commands(
|
|
131
|
-
monitor: bool,
|
|
132
|
-
enhanced_monitor: bool,
|
|
133
|
-
dashboard: bool,
|
|
134
|
-
unified_dashboard: bool,
|
|
135
|
-
unified_dashboard_port: int | None,
|
|
136
|
-
watchdog: bool,
|
|
137
|
-
start_websocket_server: bool,
|
|
138
|
-
stop_websocket_server: bool,
|
|
139
|
-
restart_websocket_server: bool,
|
|
140
|
-
start_mcp_server: bool,
|
|
141
|
-
stop_mcp_server: bool,
|
|
142
|
-
restart_mcp_server: bool,
|
|
143
|
-
websocket_port: int | None,
|
|
144
|
-
start_zuban_lsp: bool,
|
|
145
|
-
stop_zuban_lsp: bool,
|
|
146
|
-
restart_zuban_lsp: bool,
|
|
147
|
-
zuban_lsp_port: int,
|
|
148
|
-
zuban_lsp_mode: str,
|
|
149
|
-
dev: bool,
|
|
150
|
-
) -> bool:
|
|
151
|
-
return (
|
|
152
|
-
_handle_monitoring_commands(
|
|
153
|
-
monitor,
|
|
154
|
-
enhanced_monitor,
|
|
155
|
-
dashboard,
|
|
156
|
-
unified_dashboard,
|
|
157
|
-
unified_dashboard_port,
|
|
158
|
-
watchdog,
|
|
159
|
-
dev,
|
|
160
|
-
)
|
|
161
|
-
or _handle_websocket_commands(
|
|
162
|
-
start_websocket_server,
|
|
163
|
-
stop_websocket_server,
|
|
164
|
-
restart_websocket_server,
|
|
165
|
-
websocket_port,
|
|
166
|
-
)
|
|
167
|
-
or _handle_mcp_commands(
|
|
168
|
-
start_mcp_server,
|
|
169
|
-
stop_mcp_server,
|
|
170
|
-
restart_mcp_server,
|
|
171
|
-
websocket_port,
|
|
172
|
-
)
|
|
173
|
-
or _handle_zuban_lsp_commands(
|
|
174
|
-
start_zuban_lsp,
|
|
175
|
-
stop_zuban_lsp,
|
|
176
|
-
restart_zuban_lsp,
|
|
177
|
-
zuban_lsp_port,
|
|
178
|
-
zuban_lsp_mode,
|
|
179
|
-
)
|
|
180
|
-
)
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
def _generate_documentation(doc_service: t.Any, console: t.Any) -> bool:
|
|
184
|
-
"""Generate API documentation.
|
|
185
|
-
|
|
186
|
-
Returns True if successful, False if failed.
|
|
187
|
-
"""
|
|
188
|
-
console.print("📖 [bold blue]Generating API documentation...[/bold blue]")
|
|
189
|
-
success = doc_service.generate_full_api_documentation()
|
|
190
|
-
if success:
|
|
191
|
-
console.print(
|
|
192
|
-
"✅ [bold green]Documentation generated successfully![/bold green]"
|
|
193
|
-
)
|
|
194
|
-
return True
|
|
195
|
-
else:
|
|
196
|
-
console.print("❌ [bold red]Documentation generation failed![/bold red]")
|
|
197
|
-
return False
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
def _validate_documentation_files(doc_service: t.Any, console: t.Any) -> None:
|
|
201
|
-
"""Validate existing documentation files."""
|
|
202
|
-
from pathlib import Path
|
|
203
|
-
|
|
204
|
-
console.print("🔍 [bold blue]Validating documentation...[/bold blue]")
|
|
205
|
-
doc_paths = [Path("docs"), Path("README.md"), Path("CHANGELOG.md")]
|
|
206
|
-
existing_docs = [p for p in doc_paths if p.exists()]
|
|
207
|
-
|
|
208
|
-
if existing_docs:
|
|
209
|
-
issues = doc_service.validate_documentation(existing_docs)
|
|
210
|
-
if issues:
|
|
211
|
-
console.print(f"⚠️ Found {len(issues)} documentation issues:")
|
|
212
|
-
for issue in issues:
|
|
213
|
-
file_path = issue.get("path", issue.get("file", "unknown"))
|
|
214
|
-
console.print(f" - {file_path}: {issue['message']}")
|
|
215
|
-
else:
|
|
216
|
-
console.print(
|
|
217
|
-
"✅ [bold green]Documentation validation passed![/bold green]"
|
|
218
|
-
)
|
|
219
|
-
else:
|
|
220
|
-
console.print("⚠️ No documentation files found to validate.")
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
def _handle_documentation_commands(
|
|
224
|
-
generate_docs: bool, validate_docs: bool, console: Console, options: t.Any
|
|
225
|
-
) -> bool:
|
|
226
|
-
"""Handle documentation generation and validation commands.
|
|
227
|
-
|
|
228
|
-
Returns True if documentation commands were handled and execution should continue,
|
|
229
|
-
False if execution should return early.
|
|
230
|
-
"""
|
|
231
|
-
if not (generate_docs or validate_docs):
|
|
232
|
-
return True
|
|
233
|
-
|
|
234
|
-
from pathlib import Path
|
|
235
|
-
|
|
236
|
-
from crackerjack.services.documentation_service import DocumentationServiceImpl
|
|
237
|
-
|
|
238
|
-
pkg_path = Path("crackerjack")
|
|
239
|
-
doc_service = DocumentationServiceImpl(pkg_path=pkg_path, console=console)
|
|
240
|
-
|
|
241
|
-
if generate_docs:
|
|
242
|
-
if not _generate_documentation(doc_service, console):
|
|
243
|
-
return False
|
|
244
|
-
|
|
245
|
-
if validate_docs:
|
|
246
|
-
_validate_documentation_files(doc_service, console)
|
|
247
|
-
|
|
248
|
-
# Check if we should continue with other operations
|
|
249
|
-
return any(
|
|
250
|
-
[
|
|
251
|
-
options.run_tests,
|
|
252
|
-
options.strip_code,
|
|
253
|
-
options.all,
|
|
254
|
-
options.publish,
|
|
255
|
-
options.comp,
|
|
256
|
-
]
|
|
257
|
-
)
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
def _handle_changelog_commands(
|
|
261
|
-
generate_changelog: bool,
|
|
262
|
-
changelog_dry_run: bool,
|
|
263
|
-
changelog_version: str | None,
|
|
264
|
-
changelog_since: str | None,
|
|
265
|
-
console: Console,
|
|
266
|
-
options: t.Any,
|
|
267
|
-
) -> bool:
|
|
268
|
-
"""Handle changelog generation commands.
|
|
269
|
-
|
|
270
|
-
Returns True if changelog commands were handled and execution should continue,
|
|
271
|
-
False if execution should return early.
|
|
272
|
-
"""
|
|
273
|
-
if not (generate_changelog or changelog_dry_run):
|
|
274
|
-
return True
|
|
275
|
-
|
|
276
|
-
services = _setup_changelog_services(console)
|
|
277
|
-
changelog_path = services["pkg_path"] / "CHANGELOG.md"
|
|
278
|
-
|
|
279
|
-
if changelog_dry_run:
|
|
280
|
-
return _handle_changelog_dry_run(
|
|
281
|
-
services["generator"], changelog_since, console, options
|
|
282
|
-
)
|
|
283
|
-
|
|
284
|
-
if generate_changelog:
|
|
285
|
-
return _handle_changelog_generation(
|
|
286
|
-
services,
|
|
287
|
-
changelog_path,
|
|
288
|
-
changelog_version,
|
|
289
|
-
changelog_since,
|
|
290
|
-
console,
|
|
291
|
-
options,
|
|
292
|
-
)
|
|
293
|
-
|
|
294
|
-
return _should_continue_after_changelog(options)
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
def _setup_changelog_services(console: Console) -> dict[str, t.Any]:
|
|
298
|
-
"""Setup changelog services and dependencies."""
|
|
299
|
-
from pathlib import Path
|
|
300
|
-
|
|
301
|
-
from crackerjack.services.changelog_automation import ChangelogGenerator
|
|
302
|
-
from crackerjack.services.git import GitService
|
|
303
|
-
|
|
304
|
-
pkg_path = Path()
|
|
305
|
-
git_service = GitService(console, pkg_path)
|
|
306
|
-
changelog_generator = ChangelogGenerator(console, git_service)
|
|
307
|
-
|
|
308
|
-
return {
|
|
309
|
-
"pkg_path": pkg_path,
|
|
310
|
-
"git_service": git_service,
|
|
311
|
-
"generator": changelog_generator,
|
|
312
|
-
}
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
def _handle_changelog_dry_run(
|
|
316
|
-
generator: "ChangelogGenerator",
|
|
317
|
-
changelog_since: str | None,
|
|
318
|
-
console: Console,
|
|
319
|
-
options: t.Any,
|
|
320
|
-
) -> bool:
|
|
321
|
-
"""Handle changelog dry run preview."""
|
|
322
|
-
console.print("🔍 [bold blue]Previewing changelog generation...[/bold blue]")
|
|
323
|
-
entries = generator.generate_changelog_entries(changelog_since)
|
|
324
|
-
if entries:
|
|
325
|
-
generator._display_changelog_preview(entries)
|
|
326
|
-
console.print("✅ [bold green]Changelog preview completed![/bold green]")
|
|
327
|
-
else:
|
|
328
|
-
console.print("⚠️ No new changelog entries to generate.")
|
|
329
|
-
|
|
330
|
-
return _should_continue_after_changelog(options)
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
def _handle_changelog_generation(
|
|
334
|
-
services: dict[str, t.Any],
|
|
335
|
-
changelog_path: "Path",
|
|
336
|
-
changelog_version: str | None,
|
|
337
|
-
changelog_since: str | None,
|
|
338
|
-
console: Console,
|
|
339
|
-
options: t.Any,
|
|
340
|
-
) -> bool:
|
|
341
|
-
"""Handle actual changelog generation."""
|
|
342
|
-
console.print("📝 [bold blue]Generating changelog...[/bold blue]")
|
|
343
|
-
|
|
344
|
-
version = _determine_changelog_version(
|
|
345
|
-
services["git_service"], changelog_version, changelog_since, console, options
|
|
346
|
-
)
|
|
347
|
-
|
|
348
|
-
success = services["generator"].generate_changelog_from_commits(
|
|
349
|
-
changelog_path=changelog_path,
|
|
350
|
-
version=version,
|
|
351
|
-
since_version=changelog_since,
|
|
352
|
-
)
|
|
353
|
-
|
|
354
|
-
if success:
|
|
355
|
-
console.print(
|
|
356
|
-
f"✅ [bold green]Changelog updated for version {version}![/bold green]"
|
|
357
|
-
)
|
|
358
|
-
return _should_continue_after_changelog(options)
|
|
359
|
-
else:
|
|
360
|
-
console.print("❌ [bold red]Changelog generation failed![/bold red]")
|
|
361
|
-
return False
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
def _determine_changelog_version(
|
|
365
|
-
git_service: GitService,
|
|
366
|
-
changelog_version: str | None,
|
|
367
|
-
changelog_since: str | None,
|
|
368
|
-
console: Console,
|
|
369
|
-
options: t.Any,
|
|
370
|
-
) -> str:
|
|
371
|
-
"""Determine the version to use for changelog generation."""
|
|
372
|
-
if getattr(options, "auto_version", False) and not changelog_version:
|
|
373
|
-
try:
|
|
374
|
-
import asyncio
|
|
375
|
-
|
|
376
|
-
from crackerjack.services.version_analyzer import VersionAnalyzer
|
|
377
|
-
|
|
378
|
-
version_analyzer = VersionAnalyzer(console, git_service)
|
|
379
|
-
console.print(
|
|
380
|
-
"[cyan]🔍[/cyan] Analyzing version changes for intelligent changelog..."
|
|
381
|
-
)
|
|
382
|
-
|
|
383
|
-
recommendation = asyncio.run(
|
|
384
|
-
version_analyzer.recommend_version_bump(changelog_since)
|
|
385
|
-
)
|
|
386
|
-
version = recommendation.recommended_version
|
|
387
|
-
console.print(f"[green]✨[/green] Using AI-recommended version: {version}")
|
|
388
|
-
return version
|
|
389
|
-
except Exception as e:
|
|
390
|
-
console.print(f"[yellow]⚠️[/yellow] Version analysis failed: {e}")
|
|
391
|
-
return changelog_version or "Unreleased"
|
|
392
|
-
|
|
393
|
-
return changelog_version or "Unreleased"
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
def _should_continue_after_changelog(options: t.Any) -> bool:
|
|
397
|
-
"""Check if execution should continue after changelog operations."""
|
|
398
|
-
return any(
|
|
399
|
-
[
|
|
400
|
-
options.run_tests,
|
|
401
|
-
options.strip_code,
|
|
402
|
-
options.all,
|
|
403
|
-
options.publish,
|
|
404
|
-
options.comp,
|
|
405
|
-
]
|
|
406
|
-
)
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
def _handle_version_analysis(
|
|
410
|
-
auto_version: bool,
|
|
411
|
-
version_since: str | None,
|
|
412
|
-
accept_version: bool,
|
|
413
|
-
console: Console,
|
|
414
|
-
options: t.Any,
|
|
415
|
-
) -> bool:
|
|
416
|
-
"""Handle automatic version analysis and recommendations.
|
|
417
|
-
|
|
418
|
-
Returns True if version analysis was handled and execution should continue,
|
|
419
|
-
False if execution should return early.
|
|
420
|
-
"""
|
|
421
|
-
if not auto_version:
|
|
422
|
-
return True
|
|
423
|
-
|
|
424
|
-
from pathlib import Path
|
|
425
|
-
|
|
426
|
-
from rich.prompt import Confirm
|
|
427
|
-
|
|
428
|
-
from crackerjack.services.git import GitService
|
|
429
|
-
from crackerjack.services.version_analyzer import VersionAnalyzer
|
|
430
|
-
|
|
431
|
-
pkg_path = Path()
|
|
432
|
-
git_service = GitService(console, pkg_path)
|
|
433
|
-
version_analyzer = VersionAnalyzer(console, git_service)
|
|
434
|
-
|
|
435
|
-
try:
|
|
436
|
-
import asyncio
|
|
437
|
-
|
|
438
|
-
recommendation = asyncio.run(
|
|
439
|
-
version_analyzer.recommend_version_bump(version_since)
|
|
440
|
-
)
|
|
441
|
-
version_analyzer.display_recommendation(recommendation)
|
|
442
|
-
|
|
443
|
-
if accept_version or Confirm.ask(
|
|
444
|
-
f"\nAccept recommendation ({recommendation.bump_type.value})",
|
|
445
|
-
default=True,
|
|
446
|
-
):
|
|
447
|
-
console.print(
|
|
448
|
-
f"[green]✅ Version bump accepted: {recommendation.current_version} → {recommendation.recommended_version}[/green]"
|
|
449
|
-
)
|
|
450
|
-
# Note: Actual version bumping would integrate with existing publish/bump logic
|
|
451
|
-
else:
|
|
452
|
-
console.print("[yellow]❌ Version bump declined[/yellow]")
|
|
453
|
-
|
|
454
|
-
except Exception as e:
|
|
455
|
-
console.print(f"[red]❌ Version analysis failed: {e}[/red]")
|
|
456
|
-
|
|
457
|
-
# Check if we should continue with other operations
|
|
458
|
-
return any(
|
|
459
|
-
[
|
|
460
|
-
options.run_tests,
|
|
461
|
-
options.strip_code,
|
|
462
|
-
options.all,
|
|
463
|
-
options.publish,
|
|
464
|
-
options.comp,
|
|
465
|
-
]
|
|
466
|
-
)
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
def _setup_debug_and_verbose_flags(
|
|
470
|
-
ai_debug: bool, debug: bool, verbose: bool, options: t.Any
|
|
471
|
-
) -> tuple[bool, bool]:
|
|
472
|
-
"""Configure debug and verbose flags and update options.
|
|
473
|
-
|
|
474
|
-
Returns tuple of (ai_fix, verbose) flags.
|
|
475
|
-
"""
|
|
476
|
-
ai_fix = False
|
|
477
|
-
|
|
478
|
-
if ai_debug:
|
|
479
|
-
ai_fix = True
|
|
480
|
-
verbose = True
|
|
481
|
-
options.verbose = True
|
|
482
|
-
|
|
483
|
-
if debug:
|
|
484
|
-
verbose = True
|
|
485
|
-
options.verbose = True
|
|
486
|
-
|
|
487
|
-
return ai_fix, verbose
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
def _handle_heatmap_generation(
|
|
491
|
-
heatmap: bool,
|
|
492
|
-
heatmap_type: str,
|
|
493
|
-
heatmap_output: str | None,
|
|
494
|
-
console: Console,
|
|
495
|
-
) -> bool:
|
|
496
|
-
"""Handle heat map generation and visualization.
|
|
497
|
-
|
|
498
|
-
Returns True if execution should continue, False if should return early.
|
|
499
|
-
"""
|
|
500
|
-
if not heatmap:
|
|
501
|
-
return True
|
|
502
|
-
|
|
503
|
-
from pathlib import Path
|
|
504
|
-
|
|
505
|
-
from crackerjack.services.heatmap_generator import HeatMapGenerator
|
|
506
|
-
|
|
507
|
-
console.print("[cyan]🔥[/cyan] Generating heat map visualization...")
|
|
508
|
-
|
|
509
|
-
try:
|
|
510
|
-
generator = HeatMapGenerator()
|
|
511
|
-
project_root = Path.cwd()
|
|
512
|
-
|
|
513
|
-
# Generate the requested heat map type
|
|
514
|
-
if heatmap_type == "error_frequency":
|
|
515
|
-
heatmap_data = generator.generate_error_frequency_heatmap()
|
|
516
|
-
elif heatmap_type == "complexity":
|
|
517
|
-
heatmap_data = generator.generate_code_complexity_heatmap(project_root)
|
|
518
|
-
elif heatmap_type == "quality_metrics":
|
|
519
|
-
heatmap_data = generator.generate_quality_metrics_heatmap()
|
|
520
|
-
elif heatmap_type == "test_failures":
|
|
521
|
-
heatmap_data = generator.generate_test_failure_heatmap()
|
|
522
|
-
else:
|
|
523
|
-
console.print(f"[red]❌[/red] Unknown heat map type: {heatmap_type}")
|
|
524
|
-
return False
|
|
525
|
-
|
|
526
|
-
# Determine output format and save
|
|
527
|
-
if heatmap_output:
|
|
528
|
-
output_path = Path(heatmap_output)
|
|
529
|
-
if output_path.suffix.lower() == ".html":
|
|
530
|
-
# Generate HTML visualization
|
|
531
|
-
html_content = generator.generate_html_visualization(heatmap_data)
|
|
532
|
-
output_path.write_text(html_content, encoding="utf-8")
|
|
533
|
-
console.print(
|
|
534
|
-
f"[green]✅[/green] Heat map HTML saved to: {output_path}"
|
|
535
|
-
)
|
|
536
|
-
elif output_path.suffix.lower() in (".json", ".csv"):
|
|
537
|
-
# Export data in requested format
|
|
538
|
-
format_type = output_path.suffix[1:] # Remove the dot
|
|
539
|
-
generator.export_heatmap_data(heatmap_data, output_path, format_type)
|
|
540
|
-
console.print(
|
|
541
|
-
f"[green]✅[/green] Heat map data saved to: {output_path}"
|
|
542
|
-
)
|
|
543
|
-
else:
|
|
544
|
-
console.print(
|
|
545
|
-
f"[red]❌[/red] Unsupported output format: {output_path.suffix}"
|
|
546
|
-
)
|
|
547
|
-
return False
|
|
548
|
-
else:
|
|
549
|
-
# Default: save as HTML in current directory
|
|
550
|
-
default_filename = f"heatmap_{heatmap_type}.html"
|
|
551
|
-
html_content = generator.generate_html_visualization(heatmap_data)
|
|
552
|
-
Path(default_filename).write_text(html_content, encoding="utf-8")
|
|
553
|
-
console.print(
|
|
554
|
-
f"[green]✅[/green] Heat map HTML saved to: {default_filename}"
|
|
555
|
-
)
|
|
556
|
-
|
|
557
|
-
# Display summary
|
|
558
|
-
console.print(
|
|
559
|
-
f"[cyan]📊[/cyan] Heat map '{heatmap_data.title}' generated successfully"
|
|
560
|
-
)
|
|
561
|
-
console.print(f"[dim] • Cells: {len(heatmap_data.cells)}")
|
|
562
|
-
console.print(f"[dim] • X Labels: {len(heatmap_data.x_labels)}")
|
|
563
|
-
console.print(f"[dim] • Y Labels: {len(heatmap_data.y_labels)}")
|
|
564
|
-
|
|
565
|
-
return False # Exit after generating heat map
|
|
566
|
-
|
|
567
|
-
except Exception as e:
|
|
568
|
-
console.print(f"[red]❌[/red] Heat map generation failed: {e}")
|
|
569
|
-
return False
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
def _generate_anomaly_sample_data(detector: t.Any, console: Console) -> None:
|
|
573
|
-
"""Generate sample anomaly detection data for demonstration."""
|
|
574
|
-
from datetime import datetime, timedelta
|
|
575
|
-
|
|
576
|
-
base_time = datetime.now() - timedelta(hours=24)
|
|
577
|
-
metric_types = [
|
|
578
|
-
"test_pass_rate",
|
|
579
|
-
"coverage_percentage",
|
|
580
|
-
"complexity_score",
|
|
581
|
-
"execution_time",
|
|
582
|
-
"error_count",
|
|
583
|
-
]
|
|
584
|
-
|
|
585
|
-
console.print("[dim] • Collecting quality metrics from recent runs...")
|
|
586
|
-
|
|
587
|
-
# Add historical data points to establish baselines
|
|
588
|
-
for i in range(50):
|
|
589
|
-
timestamp = base_time + timedelta(minutes=i * 30)
|
|
590
|
-
for metric_type in metric_types:
|
|
591
|
-
value = _get_sample_metric_value(metric_type)
|
|
592
|
-
detector.add_metric(metric_type, value, timestamp)
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
def _get_sample_metric_value(metric_type: str) -> float:
|
|
596
|
-
"""Generate sample metric value with occasional anomalies."""
|
|
597
|
-
import random
|
|
598
|
-
|
|
599
|
-
is_anomaly = random.random() <= 0.1
|
|
600
|
-
|
|
601
|
-
if metric_type == "test_pass_rate":
|
|
602
|
-
return random.uniform(0.3, 0.7) if is_anomaly else random.uniform(0.85, 0.98)
|
|
603
|
-
elif metric_type == "coverage_percentage":
|
|
604
|
-
return random.uniform(40, 60) if is_anomaly else random.uniform(75, 95)
|
|
605
|
-
elif metric_type == "complexity_score":
|
|
606
|
-
return random.uniform(20, 35) if is_anomaly else random.uniform(8, 15)
|
|
607
|
-
elif metric_type == "execution_time":
|
|
608
|
-
return random.uniform(300, 600) if is_anomaly else random.uniform(30, 120)
|
|
609
|
-
# error_count
|
|
610
|
-
return random.uniform(8, 15) if is_anomaly else random.uniform(0, 3)
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
def _display_anomaly_results(
|
|
614
|
-
anomalies: list[t.Any], baselines: dict[str, t.Any], console: Console
|
|
615
|
-
) -> None:
|
|
616
|
-
"""Display anomaly detection analysis results."""
|
|
617
|
-
console.print("[cyan]📊[/cyan] Analysis complete:")
|
|
618
|
-
console.print(f"[dim] • Baselines established for {len(baselines)} metrics")
|
|
619
|
-
console.print(f"[dim] • {len(anomalies)} anomalies detected")
|
|
620
|
-
|
|
621
|
-
if anomalies:
|
|
622
|
-
console.print("\n[yellow]⚠️[/yellow] Detected anomalies:")
|
|
623
|
-
for anomaly in anomalies[:5]: # Show top 5 anomalies
|
|
624
|
-
severity_color = {
|
|
625
|
-
"low": "yellow",
|
|
626
|
-
"medium": "orange",
|
|
627
|
-
"high": "red",
|
|
628
|
-
"critical": "bright_red",
|
|
629
|
-
}.get(anomaly.severity, "white")
|
|
630
|
-
|
|
631
|
-
console.print(
|
|
632
|
-
f" • [{severity_color}]{anomaly.severity.upper()}[/{severity_color}] "
|
|
633
|
-
f"{anomaly.metric_type}: {anomaly.description}"
|
|
634
|
-
)
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
def _save_anomaly_report(
|
|
638
|
-
anomalies: list[t.Any],
|
|
639
|
-
baselines: dict[str, t.Any],
|
|
640
|
-
anomaly_sensitivity: float,
|
|
641
|
-
anomaly_report: str,
|
|
642
|
-
console: Console,
|
|
643
|
-
) -> None:
|
|
644
|
-
"""Save anomaly detection report to file."""
|
|
645
|
-
import json
|
|
646
|
-
from datetime import datetime
|
|
647
|
-
from pathlib import Path
|
|
648
|
-
|
|
649
|
-
report_data = {
|
|
650
|
-
"timestamp": datetime.now().isoformat(),
|
|
651
|
-
"summary": {
|
|
652
|
-
"total_anomalies": len(anomalies),
|
|
653
|
-
"baselines_count": len(baselines),
|
|
654
|
-
"sensitivity": anomaly_sensitivity,
|
|
655
|
-
},
|
|
656
|
-
"anomalies": [
|
|
657
|
-
{
|
|
658
|
-
"timestamp": a.timestamp.isoformat(),
|
|
659
|
-
"metric_type": a.metric_type,
|
|
660
|
-
"value": a.value,
|
|
661
|
-
"expected_range": a.expected_range,
|
|
662
|
-
"severity": a.severity,
|
|
663
|
-
"confidence": a.confidence,
|
|
664
|
-
"description": a.description,
|
|
665
|
-
}
|
|
666
|
-
for a in anomalies
|
|
667
|
-
],
|
|
668
|
-
"baselines": baselines,
|
|
669
|
-
}
|
|
670
|
-
|
|
671
|
-
report_path = Path(anomaly_report)
|
|
672
|
-
report_path.write_text(json.dumps(report_data, indent=2), encoding="utf-8")
|
|
673
|
-
console.print(f"[green]✅[/green] Anomaly detection report saved to: {report_path}")
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
def _handle_anomaly_detection(
|
|
677
|
-
anomaly_detection: bool,
|
|
678
|
-
anomaly_sensitivity: float,
|
|
679
|
-
anomaly_report: str | None,
|
|
680
|
-
console: Console,
|
|
681
|
-
) -> bool:
|
|
682
|
-
"""Handle ML-based anomaly detection for quality metrics.
|
|
683
|
-
|
|
684
|
-
Returns True if execution should continue, False if should return early.
|
|
685
|
-
"""
|
|
686
|
-
if not anomaly_detection:
|
|
687
|
-
return True
|
|
688
|
-
|
|
689
|
-
from crackerjack.services.anomaly_detector import AnomalyDetector
|
|
690
|
-
|
|
691
|
-
console.print("[cyan]🔍[/cyan] Running ML-based anomaly detection...")
|
|
692
|
-
|
|
693
|
-
try:
|
|
694
|
-
detector = AnomalyDetector(sensitivity=anomaly_sensitivity)
|
|
695
|
-
|
|
696
|
-
# Generate sample data for demonstration
|
|
697
|
-
_generate_anomaly_sample_data(detector, console)
|
|
698
|
-
|
|
699
|
-
# Generate analysis results
|
|
700
|
-
anomalies = detector.get_anomalies()
|
|
701
|
-
baselines = detector.get_baseline_summary()
|
|
702
|
-
|
|
703
|
-
# Display results
|
|
704
|
-
_display_anomaly_results(anomalies, baselines, console)
|
|
705
|
-
|
|
706
|
-
# Save report if requested
|
|
707
|
-
if anomaly_report:
|
|
708
|
-
_save_anomaly_report(
|
|
709
|
-
anomalies, baselines, anomaly_sensitivity, anomaly_report, console
|
|
710
|
-
)
|
|
711
|
-
|
|
712
|
-
return False # Exit after anomaly detection
|
|
713
|
-
|
|
714
|
-
except Exception as e:
|
|
715
|
-
console.print(f"[red]❌[/red] Anomaly detection failed: {e}")
|
|
716
|
-
return False
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
def _generate_predictive_sample_data(engine: t.Any) -> list[str]:
|
|
720
|
-
"""Generate sample historical data for predictive analytics."""
|
|
721
|
-
import random
|
|
722
|
-
from datetime import datetime, timedelta
|
|
723
|
-
|
|
724
|
-
base_time = datetime.now() - timedelta(hours=72) # 3 days of history
|
|
725
|
-
metric_types = [
|
|
726
|
-
"test_pass_rate",
|
|
727
|
-
"coverage_percentage",
|
|
728
|
-
"execution_time",
|
|
729
|
-
"memory_usage",
|
|
730
|
-
"complexity_score",
|
|
731
|
-
]
|
|
732
|
-
|
|
733
|
-
base_values = {
|
|
734
|
-
"test_pass_rate": 0.95,
|
|
735
|
-
"coverage_percentage": 0.85,
|
|
736
|
-
"execution_time": 120.0,
|
|
737
|
-
"memory_usage": 512.0,
|
|
738
|
-
"complexity_score": 10.0,
|
|
739
|
-
}
|
|
740
|
-
|
|
741
|
-
# Generate sample historical data
|
|
742
|
-
for metric_type in metric_types:
|
|
743
|
-
base_value = base_values[metric_type]
|
|
744
|
-
for i in range(48): # 48 hours of data points
|
|
745
|
-
timestamp = base_time + timedelta(hours=i)
|
|
746
|
-
# Add some trend and random variation
|
|
747
|
-
trend_factor = 1.0 + (i * 0.001) # Slight upward trend
|
|
748
|
-
noise = random.uniform(0.9, 1.1) # 10% noise
|
|
749
|
-
value = base_value * trend_factor * noise
|
|
750
|
-
engine.add_metric(metric_type, value, timestamp)
|
|
751
|
-
|
|
752
|
-
return metric_types
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
def _generate_predictions_summary(
|
|
756
|
-
engine: t.Any, metric_types: list[str], prediction_periods: int
|
|
757
|
-
) -> dict[str, t.Any]:
|
|
758
|
-
"""Generate predictions summary for all metric types."""
|
|
759
|
-
predictions_summary = {}
|
|
760
|
-
trend_summary = engine.get_trend_summary()
|
|
761
|
-
|
|
762
|
-
for metric_type in metric_types:
|
|
763
|
-
predictions = engine.predict_metric(metric_type, prediction_periods)
|
|
764
|
-
if predictions:
|
|
765
|
-
predictions_summary[metric_type] = {
|
|
766
|
-
"trend": trend_summary.get(metric_type, {}),
|
|
767
|
-
"predictions": [
|
|
768
|
-
{
|
|
769
|
-
"predicted_for": p.predicted_for.isoformat(),
|
|
770
|
-
"predicted_value": round(p.predicted_value, 3),
|
|
771
|
-
"confidence_interval": [
|
|
772
|
-
round(p.confidence_interval[0], 3),
|
|
773
|
-
round(p.confidence_interval[1], 3),
|
|
774
|
-
],
|
|
775
|
-
"model_accuracy": round(p.model_accuracy, 3),
|
|
776
|
-
}
|
|
777
|
-
for p in predictions[:5] # Show first 5 predictions
|
|
778
|
-
],
|
|
779
|
-
}
|
|
780
|
-
|
|
781
|
-
return predictions_summary
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
def _display_trend_analysis(
|
|
785
|
-
predictions_summary: dict[str, t.Any], console: Console
|
|
786
|
-
) -> None:
|
|
787
|
-
"""Display trend analysis summary."""
|
|
788
|
-
console.print("\n[green]📈[/green] Trend Analysis Summary:")
|
|
789
|
-
|
|
790
|
-
for metric_type, data in predictions_summary.items():
|
|
791
|
-
trend_info = data.get("trend", {})
|
|
792
|
-
direction = trend_info.get("trend_direction", "unknown")
|
|
793
|
-
strength = trend_info.get("trend_strength", 0)
|
|
794
|
-
|
|
795
|
-
direction_color = {
|
|
796
|
-
"increasing": "green",
|
|
797
|
-
"decreasing": "red",
|
|
798
|
-
"stable": "blue",
|
|
799
|
-
"volatile": "yellow",
|
|
800
|
-
}.get(direction, "white")
|
|
801
|
-
|
|
802
|
-
console.print(
|
|
803
|
-
f" • {metric_type}: [{direction_color}]{direction}[/{direction_color}] "
|
|
804
|
-
f"(strength: {strength:.2f})"
|
|
805
|
-
)
|
|
806
|
-
|
|
807
|
-
if data["predictions"]:
|
|
808
|
-
next_pred = data["predictions"][0]
|
|
809
|
-
console.print(
|
|
810
|
-
f" Next prediction: {next_pred['predicted_value']} "
|
|
811
|
-
f"(confidence: {next_pred['model_accuracy']:.2f})"
|
|
812
|
-
)
|
|
813
|
-
|
|
814
|
-
|
|
815
|
-
def _save_analytics_dashboard(
|
|
816
|
-
predictions_summary: dict[str, t.Any],
|
|
817
|
-
trend_summary: dict[str, t.Any],
|
|
818
|
-
metric_types: list[str],
|
|
819
|
-
prediction_periods: int,
|
|
820
|
-
analytics_dashboard: str,
|
|
821
|
-
console: Console,
|
|
822
|
-
) -> None:
|
|
823
|
-
"""Save analytics dashboard data to file."""
|
|
824
|
-
import json
|
|
825
|
-
from datetime import datetime
|
|
826
|
-
from pathlib import Path
|
|
827
|
-
|
|
828
|
-
dashboard_data = {
|
|
829
|
-
"timestamp": datetime.now().isoformat(),
|
|
830
|
-
"summary": {
|
|
831
|
-
"prediction_periods": prediction_periods,
|
|
832
|
-
"metrics_analyzed": len(metric_types),
|
|
833
|
-
"total_predictions": sum(
|
|
834
|
-
len(data["predictions"]) for data in predictions_summary.values()
|
|
835
|
-
),
|
|
836
|
-
},
|
|
837
|
-
"trends": trend_summary,
|
|
838
|
-
"predictions": predictions_summary,
|
|
839
|
-
}
|
|
840
|
-
|
|
841
|
-
dashboard_path = Path(analytics_dashboard)
|
|
842
|
-
dashboard_path.write_text(json.dumps(dashboard_data, indent=2), encoding="utf-8")
|
|
843
|
-
console.print(f"[green]✅[/green] Analytics dashboard saved to: {dashboard_path}")
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
def _handle_predictive_analytics(
|
|
847
|
-
predictive_analytics: bool,
|
|
848
|
-
prediction_periods: int,
|
|
849
|
-
analytics_dashboard: str | None,
|
|
850
|
-
console: Console,
|
|
851
|
-
) -> bool:
|
|
852
|
-
"""Handle predictive analytics and trend forecasting.
|
|
853
|
-
|
|
854
|
-
Returns True if execution should continue, False if should return early.
|
|
855
|
-
"""
|
|
856
|
-
if not predictive_analytics:
|
|
857
|
-
return True
|
|
858
|
-
|
|
859
|
-
from crackerjack.services.predictive_analytics import PredictiveAnalyticsEngine
|
|
860
|
-
|
|
861
|
-
console.print(
|
|
862
|
-
"[cyan]📊[/cyan] Running predictive analytics and trend forecasting..."
|
|
863
|
-
)
|
|
864
|
-
|
|
865
|
-
try:
|
|
866
|
-
engine = PredictiveAnalyticsEngine()
|
|
867
|
-
|
|
868
|
-
# Generate sample historical data
|
|
869
|
-
metric_types = _generate_predictive_sample_data(engine)
|
|
870
|
-
|
|
871
|
-
# Generate predictions
|
|
872
|
-
console.print(
|
|
873
|
-
f"[blue]🔮[/blue] Generating {prediction_periods} period predictions..."
|
|
874
|
-
)
|
|
875
|
-
|
|
876
|
-
predictions_summary = _generate_predictions_summary(
|
|
877
|
-
engine, metric_types, prediction_periods
|
|
878
|
-
)
|
|
879
|
-
trend_summary = engine.get_trend_summary()
|
|
880
|
-
|
|
881
|
-
# Display analysis results
|
|
882
|
-
_display_trend_analysis(predictions_summary, console)
|
|
883
|
-
|
|
884
|
-
# Save dashboard if requested
|
|
885
|
-
if analytics_dashboard:
|
|
886
|
-
_save_analytics_dashboard(
|
|
887
|
-
predictions_summary,
|
|
888
|
-
trend_summary,
|
|
889
|
-
metric_types,
|
|
890
|
-
prediction_periods,
|
|
891
|
-
analytics_dashboard,
|
|
892
|
-
console,
|
|
893
|
-
)
|
|
894
|
-
|
|
895
|
-
return False # Exit after predictive analytics
|
|
896
|
-
|
|
897
|
-
except Exception as e:
|
|
898
|
-
console.print(f"[red]❌[/red] Predictive analytics failed: {e}")
|
|
899
|
-
return False
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
def _handle_enterprise_optimizer(
|
|
903
|
-
enterprise_optimizer: bool,
|
|
904
|
-
enterprise_profile: str | None,
|
|
905
|
-
enterprise_report: str | None,
|
|
906
|
-
console: Console,
|
|
907
|
-
) -> bool:
|
|
908
|
-
"""Handle enterprise-scale optimization engine.
|
|
909
|
-
|
|
910
|
-
Returns True if execution should continue, False if should return early.
|
|
911
|
-
"""
|
|
912
|
-
if not enterprise_optimizer:
|
|
913
|
-
return True
|
|
914
|
-
|
|
915
|
-
console.print("[cyan]🏢[/cyan] Running enterprise-scale optimization analysis...")
|
|
916
|
-
|
|
917
|
-
try:
|
|
918
|
-
optimizer = _setup_enterprise_optimizer(enterprise_profile)
|
|
919
|
-
result = _run_enterprise_optimization(optimizer, console)
|
|
920
|
-
_display_enterprise_results(result, enterprise_report, console)
|
|
921
|
-
return False # Exit after enterprise optimization
|
|
922
|
-
|
|
923
|
-
except Exception as e:
|
|
924
|
-
console.print(f"[red]❌[/red] Enterprise optimizer error: {e}")
|
|
925
|
-
return False
|
|
926
|
-
|
|
927
|
-
|
|
928
|
-
def _setup_enterprise_optimizer(enterprise_profile: str | None) -> t.Any:
|
|
929
|
-
"""Setup enterprise optimizer with directories and profile."""
|
|
930
|
-
import tempfile
|
|
931
|
-
from pathlib import Path
|
|
932
|
-
|
|
933
|
-
from crackerjack.services.enterprise_optimizer import EnterpriseOptimizer
|
|
934
|
-
|
|
935
|
-
config_dir = Path.cwd() / ".crackerjack"
|
|
936
|
-
storage_dir = Path(tempfile.gettempdir()) / "crackerjack_storage"
|
|
937
|
-
optimizer = EnterpriseOptimizer(config_dir, storage_dir)
|
|
938
|
-
|
|
939
|
-
if enterprise_profile:
|
|
940
|
-
optimizer.performance_profile.optimization_strategy = enterprise_profile
|
|
941
|
-
|
|
942
|
-
return optimizer
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
def _run_enterprise_optimization(optimizer: t.Any, console: t.Any) -> t.Any:
|
|
946
|
-
"""Run the optimization cycle and return results."""
|
|
947
|
-
import asyncio
|
|
948
|
-
|
|
949
|
-
console.print("[blue]📊[/blue] Analyzing system resources and performance...")
|
|
950
|
-
return asyncio.run(optimizer.run_optimization_cycle())
|
|
951
|
-
|
|
952
|
-
|
|
953
|
-
def _display_enterprise_results(
|
|
954
|
-
result: t.Any, enterprise_report: str | None, console: t.Any
|
|
955
|
-
) -> None:
|
|
956
|
-
"""Display optimization results and save report if requested."""
|
|
957
|
-
if result["status"] == "success":
|
|
958
|
-
console.print(
|
|
959
|
-
"[green]✅[/green] Enterprise optimization completed successfully"
|
|
960
|
-
)
|
|
961
|
-
_display_enterprise_metrics(result["metrics"], console)
|
|
962
|
-
_display_enterprise_recommendations(result["recommendations"], console)
|
|
963
|
-
_save_enterprise_report(result, enterprise_report, console)
|
|
964
|
-
else:
|
|
965
|
-
console.print(
|
|
966
|
-
f"[red]❌[/red] Enterprise optimization failed: {result.get('message', 'Unknown error')}"
|
|
967
|
-
)
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
def _display_enterprise_metrics(metrics: t.Any, console: t.Any) -> None:
|
|
971
|
-
"""Display key system metrics."""
|
|
972
|
-
console.print(f"[blue]CPU Usage:[/blue] {metrics['cpu_percent']:.1f}%")
|
|
973
|
-
console.print(f"[blue]Memory Usage:[/blue] {metrics['memory_percent']:.1f}%")
|
|
974
|
-
console.print(f"[blue]Storage Usage:[/blue] {metrics['disk_usage_percent']:.1f}%")
|
|
975
|
-
|
|
976
|
-
|
|
977
|
-
def _display_enterprise_recommendations(recommendations: t.Any, console: t.Any) -> None:
|
|
978
|
-
"""Display optimization recommendations."""
|
|
979
|
-
if recommendations:
|
|
980
|
-
console.print(
|
|
981
|
-
f"\n[yellow]💡[/yellow] Found {len(recommendations)} optimization recommendations:"
|
|
982
|
-
)
|
|
983
|
-
for rec in recommendations[:3]: # Show top 3
|
|
984
|
-
priority_color = {"high": "red", "medium": "yellow", "low": "blue"}[
|
|
985
|
-
rec["priority"]
|
|
986
|
-
]
|
|
987
|
-
console.print(
|
|
988
|
-
f" [{priority_color}]{rec['priority'].upper()}[/{priority_color}]: {rec['title']}"
|
|
989
|
-
)
|
|
990
|
-
|
|
991
|
-
|
|
992
|
-
def _save_enterprise_report(
|
|
993
|
-
result: t.Any, enterprise_report: str | None, console: t.Any
|
|
994
|
-
) -> None:
|
|
995
|
-
"""Save enterprise report to file if requested."""
|
|
996
|
-
if enterprise_report:
|
|
997
|
-
import json
|
|
998
|
-
|
|
999
|
-
with open(enterprise_report, "w") as f:
|
|
1000
|
-
json.dump(result, f, indent=2)
|
|
1001
|
-
console.print(
|
|
1002
|
-
f"[green]📄[/green] Enterprise report saved to: {enterprise_report}"
|
|
1003
|
-
)
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
def _handle_mkdocs_integration(
|
|
1007
|
-
mkdocs_integration: bool,
|
|
1008
|
-
mkdocs_serve: bool,
|
|
1009
|
-
mkdocs_theme: str,
|
|
1010
|
-
mkdocs_output: str | None,
|
|
1011
|
-
console: Console,
|
|
1012
|
-
) -> bool:
|
|
1013
|
-
"""Handle MkDocs documentation site generation.
|
|
1014
|
-
|
|
1015
|
-
Returns True if execution should continue, False if should return early.
|
|
1016
|
-
"""
|
|
1017
|
-
if not mkdocs_integration:
|
|
1018
|
-
return True
|
|
1019
|
-
|
|
1020
|
-
console.print("[cyan]📚[/cyan] Generating MkDocs documentation site...")
|
|
1021
|
-
|
|
1022
|
-
try:
|
|
1023
|
-
services = _create_mkdocs_services()
|
|
1024
|
-
builder = services["builder"]
|
|
1025
|
-
output_dir = _determine_mkdocs_output_dir(mkdocs_output)
|
|
1026
|
-
docs_content = _create_sample_docs_content()
|
|
1027
|
-
|
|
1028
|
-
console.print(
|
|
1029
|
-
f"[blue]🏗️[/blue] Building documentation site with {mkdocs_theme} theme..."
|
|
1030
|
-
)
|
|
1031
|
-
|
|
1032
|
-
_build_mkdocs_site(builder, docs_content, output_dir, mkdocs_serve)
|
|
1033
|
-
site = None # _build_mkdocs_site returns None
|
|
1034
|
-
_handle_mkdocs_build_result(site, mkdocs_serve, console)
|
|
1035
|
-
|
|
1036
|
-
return False # Exit after MkDocs generation
|
|
1037
|
-
|
|
1038
|
-
except Exception as e:
|
|
1039
|
-
console.print(f"[red]❌[/red] MkDocs integration error: {e}")
|
|
1040
|
-
return False
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
def _create_mkdocs_services() -> dict[str, t.Any]:
|
|
1044
|
-
"""Create and configure MkDocs services."""
|
|
1045
|
-
from logging import getLogger
|
|
1046
|
-
from pathlib import Path
|
|
1047
|
-
|
|
1048
|
-
from crackerjack.documentation.mkdocs_integration import (
|
|
1049
|
-
MkDocsIntegrationService,
|
|
1050
|
-
MkDocsSiteBuilder,
|
|
1051
|
-
)
|
|
1052
|
-
|
|
1053
|
-
# Create filesystem service that matches FileSystemServiceProtocol
|
|
1054
|
-
class SyncFileSystemService:
|
|
1055
|
-
def read_file(self, path: str | Path) -> str:
|
|
1056
|
-
return Path(path).read_text()
|
|
1057
|
-
|
|
1058
|
-
def write_file(self, path: str | Path, content: str) -> None:
|
|
1059
|
-
Path(path).write_text(content)
|
|
1060
|
-
|
|
1061
|
-
def exists(self, path: str | Path) -> bool:
|
|
1062
|
-
return Path(path).exists()
|
|
1063
|
-
|
|
1064
|
-
def mkdir(self, path: str | Path, parents: bool = False) -> None:
|
|
1065
|
-
Path(path).mkdir(parents=parents, exist_ok=True)
|
|
1066
|
-
|
|
1067
|
-
def ensure_directory(self, path: str | Path) -> None:
|
|
1068
|
-
Path(path).mkdir(parents=True, exist_ok=True)
|
|
1069
|
-
|
|
1070
|
-
# Create config manager that implements ConfigManagerProtocol
|
|
1071
|
-
class ConfigManager:
|
|
1072
|
-
def __init__(self) -> None:
|
|
1073
|
-
self._config: dict[str, t.Any] = {}
|
|
1074
|
-
|
|
1075
|
-
def get(self, key: str, default: t.Any = None) -> t.Any:
|
|
1076
|
-
return self._config.get(key, default)
|
|
1077
|
-
|
|
1078
|
-
def set(self, key: str, value: t.Any) -> None:
|
|
1079
|
-
self._config[key] = value
|
|
1080
|
-
|
|
1081
|
-
def save(self) -> bool:
|
|
1082
|
-
return True
|
|
1083
|
-
|
|
1084
|
-
def load(self) -> bool:
|
|
1085
|
-
return True
|
|
1086
|
-
|
|
1087
|
-
filesystem = SyncFileSystemService()
|
|
1088
|
-
config_manager = ConfigManager()
|
|
1089
|
-
logger = getLogger(__name__)
|
|
1090
|
-
|
|
1091
|
-
integration_service = MkDocsIntegrationService(config_manager, filesystem, logger)
|
|
1092
|
-
builder = MkDocsSiteBuilder(integration_service)
|
|
1093
|
-
|
|
1094
|
-
return {"builder": builder, "filesystem": filesystem, "config": config_manager}
|
|
1095
|
-
|
|
1096
|
-
|
|
1097
|
-
def _determine_mkdocs_output_dir(mkdocs_output: str | None) -> "Path":
|
|
1098
|
-
"""Determine the output directory for MkDocs site."""
|
|
1099
|
-
from pathlib import Path
|
|
1100
|
-
|
|
1101
|
-
return Path(mkdocs_output) if mkdocs_output else Path.cwd() / "docs_site"
|
|
1102
|
-
|
|
1103
|
-
|
|
1104
|
-
def _create_sample_docs_content() -> dict[str, str]:
|
|
1105
|
-
"""Create sample documentation content."""
|
|
1106
|
-
return {
|
|
1107
|
-
"index.md": "# Project Documentation\n\nWelcome to the project documentation.",
|
|
1108
|
-
"getting-started.md": "# Getting Started\n\nQuick start guide for the project.",
|
|
1109
|
-
"api-reference.md": "# API Reference\n\nAPI documentation and examples.",
|
|
1110
|
-
}
|
|
1111
|
-
|
|
1112
|
-
|
|
1113
|
-
def _build_mkdocs_site(
|
|
1114
|
-
builder: t.Any, docs_content: dict[str, str], output_dir: Path, serve: bool
|
|
1115
|
-
) -> None:
|
|
1116
|
-
"""Build the MkDocs documentation site."""
|
|
1117
|
-
import asyncio
|
|
1118
|
-
|
|
1119
|
-
asyncio.run(
|
|
1120
|
-
builder.build_documentation_site(
|
|
1121
|
-
project_name="Project Documentation",
|
|
1122
|
-
project_description="Comprehensive project documentation",
|
|
1123
|
-
author="Crackerjack",
|
|
1124
|
-
documentation_content=docs_content,
|
|
1125
|
-
output_dir=output_dir,
|
|
1126
|
-
serve=serve,
|
|
1127
|
-
)
|
|
1128
|
-
)
|
|
1129
|
-
|
|
1130
|
-
|
|
1131
|
-
def _handle_mkdocs_build_result(
|
|
1132
|
-
site: t.Any, mkdocs_serve: bool, console: Console
|
|
1133
|
-
) -> None:
|
|
1134
|
-
"""Handle the result of MkDocs site building."""
|
|
1135
|
-
if site:
|
|
1136
|
-
console.print(
|
|
1137
|
-
f"[green]✅[/green] MkDocs site generated successfully at: {site.build_path}"
|
|
1138
|
-
)
|
|
1139
|
-
console.print(
|
|
1140
|
-
f"[blue]📄[/blue] Generated {len(site.pages)} documentation pages"
|
|
1141
|
-
)
|
|
1142
|
-
|
|
1143
|
-
if mkdocs_serve:
|
|
1144
|
-
console.print(
|
|
1145
|
-
"[blue]🌐[/blue] MkDocs development server started at http://127.0.0.1:8000"
|
|
1146
|
-
)
|
|
1147
|
-
console.print("[yellow]Press Ctrl+C to stop the server[/yellow]")
|
|
1148
|
-
else:
|
|
1149
|
-
console.print("[red]❌[/red] Failed to generate MkDocs site")
|
|
1150
|
-
|
|
1151
|
-
|
|
1152
|
-
def _handle_contextual_ai(
|
|
1153
|
-
contextual_ai: bool,
|
|
1154
|
-
ai_recommendations: int,
|
|
1155
|
-
ai_help_query: str | None,
|
|
1156
|
-
console: Console,
|
|
1157
|
-
) -> bool:
|
|
1158
|
-
"""Handle contextual AI assistant features.
|
|
1159
|
-
|
|
1160
|
-
Returns True if execution should continue, False if should return early.
|
|
1161
|
-
"""
|
|
1162
|
-
if not contextual_ai and not ai_help_query:
|
|
1163
|
-
return True
|
|
1164
|
-
|
|
1165
|
-
from crackerjack.services.contextual_ai_assistant import ContextualAIAssistant
|
|
1166
|
-
|
|
1167
|
-
console.print("[cyan]🤖[/cyan] Running contextual AI assistant analysis...")
|
|
1168
|
-
|
|
1169
|
-
try:
|
|
1170
|
-
from pathlib import Path
|
|
1171
|
-
|
|
1172
|
-
# Create filesystem interface that implements FileSystemInterface protocol
|
|
1173
|
-
class FileSystemImpl:
|
|
1174
|
-
def read_file(self, path: str | t.Any) -> str:
|
|
1175
|
-
return Path(path).read_text()
|
|
1176
|
-
|
|
1177
|
-
def write_file(self, path: str | t.Any, content: str) -> None:
|
|
1178
|
-
Path(path).write_text(content)
|
|
1179
|
-
|
|
1180
|
-
def exists(self, path: str | t.Any) -> bool:
|
|
1181
|
-
return Path(path).exists()
|
|
1182
|
-
|
|
1183
|
-
def mkdir(self, path: str | t.Any, parents: bool = False) -> None:
|
|
1184
|
-
Path(path).mkdir(parents=parents, exist_ok=True)
|
|
1185
|
-
|
|
1186
|
-
filesystem = FileSystemImpl()
|
|
1187
|
-
assistant = ContextualAIAssistant(filesystem, console)
|
|
1188
|
-
|
|
1189
|
-
# Handle help query
|
|
1190
|
-
if ai_help_query:
|
|
1191
|
-
help_response = assistant.get_quick_help(ai_help_query)
|
|
1192
|
-
console.print(f"\n[blue]🔍[/blue] AI Help for '{ai_help_query}':")
|
|
1193
|
-
console.print(help_response)
|
|
1194
|
-
return False # Exit after help query
|
|
1195
|
-
|
|
1196
|
-
# Get contextual recommendations
|
|
1197
|
-
console.print(
|
|
1198
|
-
"[blue]🧠[/blue] Analyzing project context for AI recommendations..."
|
|
1199
|
-
)
|
|
1200
|
-
recommendations = assistant.get_contextual_recommendations(ai_recommendations)
|
|
1201
|
-
|
|
1202
|
-
if recommendations:
|
|
1203
|
-
assistant.display_recommendations(recommendations)
|
|
1204
|
-
else:
|
|
1205
|
-
console.print("[green]✨[/green] Great job! No immediate recommendations.")
|
|
1206
|
-
|
|
1207
|
-
return False # Exit after AI recommendations
|
|
1208
|
-
|
|
1209
|
-
except Exception as e:
|
|
1210
|
-
console.print(f"[red]❌[/red] Contextual AI error: {e}")
|
|
1211
|
-
return False
|
|
1212
|
-
|
|
1213
|
-
|
|
1214
187
|
@app.command()
|
|
1215
188
|
def main(
|
|
1216
189
|
commit: bool = CLI_OPTIONS["commit"],
|
|
@@ -1219,9 +192,9 @@ def main(
|
|
|
1219
192
|
update_precommit: bool = CLI_OPTIONS["update_precommit"],
|
|
1220
193
|
verbose: bool = CLI_OPTIONS["verbose"],
|
|
1221
194
|
debug: bool = CLI_OPTIONS["debug"],
|
|
1222
|
-
publish:
|
|
195
|
+
publish: BumpOption | None = CLI_OPTIONS["publish"],
|
|
1223
196
|
all: str | None = CLI_OPTIONS["all"],
|
|
1224
|
-
bump:
|
|
197
|
+
bump: BumpOption | None = CLI_OPTIONS["bump"],
|
|
1225
198
|
strip_code: bool = CLI_OPTIONS["strip_code"],
|
|
1226
199
|
run_tests: bool = CLI_OPTIONS["run_tests"],
|
|
1227
200
|
benchmark: bool = CLI_OPTIONS["benchmark"],
|
|
@@ -1230,6 +203,10 @@ def main(
|
|
|
1230
203
|
skip_hooks: bool = CLI_OPTIONS["skip_hooks"],
|
|
1231
204
|
fast: bool = CLI_OPTIONS["fast"],
|
|
1232
205
|
comp: bool = CLI_OPTIONS["comp"],
|
|
206
|
+
fast_iteration: bool = CLI_OPTIONS["fast_iteration"],
|
|
207
|
+
tool: str | None = CLI_OPTIONS["tool"],
|
|
208
|
+
changed_only: bool = CLI_OPTIONS["changed_only"],
|
|
209
|
+
all_files: bool = CLI_OPTIONS["all_files"],
|
|
1233
210
|
create_pr: bool = CLI_OPTIONS["create_pr"],
|
|
1234
211
|
ai_fix: bool = CLI_OPTIONS["ai_fix"],
|
|
1235
212
|
start_mcp_server: bool = CLI_OPTIONS["start_mcp_server"],
|
|
@@ -1299,10 +276,9 @@ def main(
|
|
|
1299
276
|
predictive_analytics: bool = CLI_OPTIONS["predictive_analytics"],
|
|
1300
277
|
prediction_periods: int = CLI_OPTIONS["prediction_periods"],
|
|
1301
278
|
analytics_dashboard: str | None = CLI_OPTIONS["analytics_dashboard"],
|
|
1302
|
-
|
|
1303
|
-
|
|
1304
|
-
|
|
1305
|
-
enterprise_report: str | None = CLI_OPTIONS["enterprise_report"],
|
|
279
|
+
advanced_optimizer: bool = CLI_OPTIONS["advanced_optimizer"],
|
|
280
|
+
advanced_profile: str | None = CLI_OPTIONS["advanced_profile"],
|
|
281
|
+
advanced_report: str | None = CLI_OPTIONS["advanced_report"],
|
|
1306
282
|
mkdocs_integration: bool = CLI_OPTIONS["mkdocs_integration"],
|
|
1307
283
|
mkdocs_serve: bool = CLI_OPTIONS["mkdocs_serve"],
|
|
1308
284
|
mkdocs_theme: str = CLI_OPTIONS["mkdocs_theme"],
|
|
@@ -1310,14 +286,36 @@ def main(
|
|
|
1310
286
|
contextual_ai: bool = CLI_OPTIONS["contextual_ai"],
|
|
1311
287
|
ai_recommendations: int = CLI_OPTIONS["ai_recommendations"],
|
|
1312
288
|
ai_help_query: str | None = CLI_OPTIONS["ai_help_query"],
|
|
1313
|
-
# Configuration management features
|
|
1314
289
|
check_config_updates: bool = CLI_OPTIONS["check_config_updates"],
|
|
1315
290
|
apply_config_updates: bool = CLI_OPTIONS["apply_config_updates"],
|
|
1316
291
|
diff_config: str | None = CLI_OPTIONS["diff_config"],
|
|
1317
292
|
config_interactive: bool = CLI_OPTIONS["config_interactive"],
|
|
1318
293
|
refresh_cache: bool = CLI_OPTIONS["refresh_cache"],
|
|
294
|
+
use_acb_workflows: bool = CLI_OPTIONS["use_acb_workflows"],
|
|
295
|
+
use_legacy_orchestrator: bool = CLI_OPTIONS["use_legacy_orchestrator"],
|
|
296
|
+
index: str | None = CLI_OPTIONS["index"],
|
|
297
|
+
search: str | None = CLI_OPTIONS["search"],
|
|
298
|
+
semantic_stats: bool = CLI_OPTIONS["semantic_stats"],
|
|
299
|
+
remove_from_index: str | None = CLI_OPTIONS["remove_from_index"],
|
|
1319
300
|
) -> None:
|
|
1320
|
-
|
|
301
|
+
from acb.depends import depends
|
|
302
|
+
|
|
303
|
+
from crackerjack import __version__
|
|
304
|
+
from crackerjack.config import register_services
|
|
305
|
+
from crackerjack.config.loader import load_settings
|
|
306
|
+
from crackerjack.config.settings import CrackerjackSettings
|
|
307
|
+
|
|
308
|
+
settings = load_settings(CrackerjackSettings)
|
|
309
|
+
depends.set(CrackerjackSettings, settings)
|
|
310
|
+
|
|
311
|
+
register_services()
|
|
312
|
+
|
|
313
|
+
# Print version on startup
|
|
314
|
+
console.print(f"[cyan]Crackerjack[/cyan] [dim]v{__version__}[/dim]")
|
|
315
|
+
|
|
316
|
+
# Ensure logging levels are properly set after services are registered
|
|
317
|
+
_configure_logging_for_execution(debug or ai_debug or ai_fix, verbose)
|
|
318
|
+
|
|
1321
319
|
options = create_options(
|
|
1322
320
|
commit,
|
|
1323
321
|
interactive,
|
|
@@ -1333,6 +331,10 @@ def main(
|
|
|
1333
331
|
skip_hooks,
|
|
1334
332
|
fast,
|
|
1335
333
|
comp,
|
|
334
|
+
fast_iteration,
|
|
335
|
+
tool,
|
|
336
|
+
changed_only,
|
|
337
|
+
all_files,
|
|
1336
338
|
create_pr,
|
|
1337
339
|
async_mode,
|
|
1338
340
|
experimental_hooks,
|
|
@@ -1389,10 +391,9 @@ def main(
|
|
|
1389
391
|
predictive_analytics,
|
|
1390
392
|
prediction_periods,
|
|
1391
393
|
analytics_dashboard,
|
|
1392
|
-
|
|
1393
|
-
|
|
1394
|
-
|
|
1395
|
-
enterprise_report,
|
|
394
|
+
advanced_optimizer,
|
|
395
|
+
advanced_profile,
|
|
396
|
+
advanced_report,
|
|
1396
397
|
mkdocs_integration,
|
|
1397
398
|
mkdocs_serve,
|
|
1398
399
|
mkdocs_theme,
|
|
@@ -1405,34 +406,34 @@ def main(
|
|
|
1405
406
|
diff_config,
|
|
1406
407
|
config_interactive,
|
|
1407
408
|
refresh_cache,
|
|
1408
|
-
|
|
409
|
+
use_acb_workflows,
|
|
410
|
+
use_legacy_orchestrator,
|
|
1409
411
|
run_tests=run_tests,
|
|
1410
412
|
)
|
|
1411
413
|
|
|
1412
|
-
|
|
1413
|
-
|
|
414
|
+
options.index = index
|
|
415
|
+
options.search = search
|
|
416
|
+
options.semantic_stats = semantic_stats
|
|
417
|
+
options.remove_from_index = remove_from_index
|
|
418
|
+
|
|
419
|
+
ai_fix, verbose = setup_debug_and_verbose_flags(
|
|
420
|
+
ai_fix, ai_debug, debug, verbose, options
|
|
421
|
+
)
|
|
1414
422
|
setup_ai_agent_env(ai_fix, ai_debug or debug)
|
|
1415
423
|
|
|
1416
|
-
|
|
1417
|
-
if not _process_all_commands(locals(), console, options):
|
|
424
|
+
if not _process_all_commands(locals(), options):
|
|
1418
425
|
return
|
|
1419
426
|
|
|
1420
|
-
# Execute main workflow (interactive or standard mode)
|
|
1421
427
|
if interactive:
|
|
1422
428
|
handle_interactive_mode(options)
|
|
1423
429
|
else:
|
|
1424
430
|
handle_standard_mode(options, async_mode, job_id, orchestrated)
|
|
1425
431
|
|
|
1426
432
|
|
|
1427
|
-
def _process_all_commands(local_vars: t.Any,
|
|
1428
|
-
""
|
|
1429
|
-
# Handle cache management commands early (they exit after execution)
|
|
1430
|
-
if _handle_cache_commands(
|
|
1431
|
-
local_vars["clear_cache"], local_vars["cache_stats"], console
|
|
1432
|
-
):
|
|
433
|
+
def _process_all_commands(local_vars: t.Any, options: t.Any) -> bool:
|
|
434
|
+
if _handle_cache_commands(local_vars["clear_cache"], local_vars["cache_stats"]):
|
|
1433
435
|
return False
|
|
1434
436
|
|
|
1435
|
-
# Handle configuration management commands early (they exit after execution)
|
|
1436
437
|
if (
|
|
1437
438
|
local_vars["check_config_updates"]
|
|
1438
439
|
or local_vars["apply_config_updates"]
|
|
@@ -1442,8 +443,16 @@ def _process_all_commands(local_vars: t.Any, console: t.Any, options: t.Any) ->
|
|
|
1442
443
|
handle_config_updates(options)
|
|
1443
444
|
return False
|
|
1444
445
|
|
|
1445
|
-
|
|
1446
|
-
|
|
446
|
+
if not _handle_semantic_commands(
|
|
447
|
+
local_vars["index"],
|
|
448
|
+
local_vars["search"],
|
|
449
|
+
local_vars["semantic_stats"],
|
|
450
|
+
local_vars["remove_from_index"],
|
|
451
|
+
options,
|
|
452
|
+
):
|
|
453
|
+
return False
|
|
454
|
+
|
|
455
|
+
if handle_server_commands(
|
|
1447
456
|
local_vars["monitor"],
|
|
1448
457
|
local_vars["enhanced_monitor"],
|
|
1449
458
|
local_vars["dashboard"],
|
|
@@ -1466,111 +475,181 @@ def _process_all_commands(local_vars: t.Any, console: t.Any, options: t.Any) ->
|
|
|
1466
475
|
):
|
|
1467
476
|
return False
|
|
1468
477
|
|
|
1469
|
-
|
|
1470
|
-
|
|
478
|
+
if not handle_coverage_status(local_vars["coverage_status"], options):
|
|
479
|
+
return False
|
|
1471
480
|
|
|
481
|
+
return _handle_analysis_commands(local_vars, options)
|
|
1472
482
|
|
|
1473
|
-
|
|
1474
|
-
|
|
1475
|
-
|
|
1476
|
-
|
|
1477
|
-
# Handle documentation commands
|
|
1478
|
-
if not _handle_documentation_commands(
|
|
1479
|
-
local_vars["generate_docs"], local_vars["validate_docs"], console, options
|
|
483
|
+
|
|
484
|
+
def _handle_analysis_commands(local_vars: t.Any, options: t.Any) -> bool:
|
|
485
|
+
if not handle_documentation_commands(
|
|
486
|
+
local_vars["generate_docs"], local_vars["validate_docs"], options
|
|
1480
487
|
):
|
|
1481
488
|
return False
|
|
1482
489
|
|
|
1483
|
-
|
|
1484
|
-
if not _handle_changelog_commands(
|
|
490
|
+
if not handle_changelog_commands(
|
|
1485
491
|
local_vars["generate_changelog"],
|
|
1486
492
|
local_vars["changelog_dry_run"],
|
|
1487
493
|
local_vars["changelog_version"],
|
|
1488
494
|
local_vars["changelog_since"],
|
|
1489
|
-
console,
|
|
1490
495
|
options,
|
|
1491
496
|
):
|
|
1492
497
|
return False
|
|
1493
498
|
|
|
1494
|
-
|
|
1495
|
-
if not _handle_version_analysis(
|
|
499
|
+
if not handle_version_analysis(
|
|
1496
500
|
local_vars["auto_version"],
|
|
1497
501
|
local_vars["version_since"],
|
|
1498
502
|
local_vars["accept_version"],
|
|
1499
|
-
console,
|
|
1500
503
|
options,
|
|
1501
504
|
):
|
|
1502
505
|
return False
|
|
1503
506
|
|
|
1504
|
-
|
|
1505
|
-
return _handle_specialized_analytics(local_vars, console)
|
|
507
|
+
return _handle_specialized_analytics(local_vars)
|
|
1506
508
|
|
|
1507
509
|
|
|
1508
|
-
def _handle_specialized_analytics(local_vars: t.Any
|
|
1509
|
-
|
|
1510
|
-
|
|
1511
|
-
if not _handle_heatmap_generation(
|
|
1512
|
-
local_vars["heatmap"],
|
|
1513
|
-
local_vars["heatmap_type"],
|
|
1514
|
-
local_vars["heatmap_output"],
|
|
1515
|
-
console,
|
|
510
|
+
def _handle_specialized_analytics(local_vars: t.Any) -> bool:
|
|
511
|
+
if not handle_heatmap_generation(
|
|
512
|
+
local_vars["heatmap"], local_vars["heatmap_type"], local_vars["heatmap_output"]
|
|
1516
513
|
):
|
|
1517
514
|
return False
|
|
1518
515
|
|
|
1519
|
-
|
|
1520
|
-
if not _handle_anomaly_detection(
|
|
516
|
+
if not handle_anomaly_detection(
|
|
1521
517
|
local_vars["anomaly_detection"],
|
|
1522
518
|
local_vars["anomaly_sensitivity"],
|
|
1523
519
|
local_vars["anomaly_report"],
|
|
1524
|
-
console,
|
|
1525
520
|
):
|
|
1526
521
|
return False
|
|
1527
522
|
|
|
1528
|
-
|
|
1529
|
-
if not _handle_predictive_analytics(
|
|
523
|
+
if not handle_predictive_analytics(
|
|
1530
524
|
local_vars["predictive_analytics"],
|
|
1531
525
|
local_vars["prediction_periods"],
|
|
1532
526
|
local_vars["analytics_dashboard"],
|
|
1533
|
-
console,
|
|
1534
527
|
):
|
|
1535
528
|
return False
|
|
1536
529
|
|
|
1537
|
-
|
|
1538
|
-
|
|
530
|
+
return _handle_advanced_features(local_vars)
|
|
531
|
+
|
|
532
|
+
|
|
533
|
+
@depends.inject # type: ignore[misc]
|
|
534
|
+
def _handle_semantic_commands(
|
|
535
|
+
index: str | None,
|
|
536
|
+
search: str | None,
|
|
537
|
+
semantic_stats: bool,
|
|
538
|
+
remove_from_index: str | None,
|
|
539
|
+
options: t.Any,
|
|
540
|
+
console: Inject[Console],
|
|
541
|
+
) -> bool:
|
|
542
|
+
if not _has_semantic_operations(index, search, semantic_stats, remove_from_index):
|
|
543
|
+
return True
|
|
544
|
+
|
|
545
|
+
console.print("[cyan]🔍[/cyan] Running semantic search operations...")
|
|
546
|
+
|
|
547
|
+
try:
|
|
548
|
+
_execute_semantic_operations(index, search, semantic_stats, remove_from_index)
|
|
549
|
+
return False
|
|
550
|
+
|
|
551
|
+
except Exception as e:
|
|
552
|
+
console.print(f"[red]❌[/red] Semantic search error: {e}")
|
|
553
|
+
return False
|
|
554
|
+
|
|
555
|
+
|
|
556
|
+
def _has_semantic_operations(
|
|
557
|
+
index: str | None,
|
|
558
|
+
search: str | None,
|
|
559
|
+
semantic_stats: bool,
|
|
560
|
+
remove_from_index: str | None,
|
|
561
|
+
) -> bool:
|
|
562
|
+
return any([index, search, semantic_stats, remove_from_index])
|
|
1539
563
|
|
|
1540
564
|
|
|
1541
|
-
def
|
|
1542
|
-
|
|
1543
|
-
|
|
1544
|
-
|
|
1545
|
-
|
|
1546
|
-
|
|
1547
|
-
|
|
1548
|
-
|
|
565
|
+
def _execute_semantic_operations(
|
|
566
|
+
index: str | None,
|
|
567
|
+
search: str | None,
|
|
568
|
+
semantic_stats: bool,
|
|
569
|
+
remove_from_index: str | None,
|
|
570
|
+
) -> list[str]:
|
|
571
|
+
if index:
|
|
572
|
+
handle_semantic_index(index)
|
|
573
|
+
|
|
574
|
+
if search:
|
|
575
|
+
handle_semantic_search(search)
|
|
576
|
+
|
|
577
|
+
if semantic_stats:
|
|
578
|
+
handle_semantic_stats()
|
|
579
|
+
|
|
580
|
+
if remove_from_index:
|
|
581
|
+
handle_remove_from_semantic_index(remove_from_index)
|
|
582
|
+
|
|
583
|
+
return []
|
|
584
|
+
|
|
585
|
+
|
|
586
|
+
def _handle_advanced_features(local_vars: t.Any) -> bool:
|
|
587
|
+
if not handle_advanced_optimizer(
|
|
588
|
+
local_vars["advanced_optimizer"],
|
|
589
|
+
local_vars["advanced_profile"],
|
|
590
|
+
local_vars["advanced_report"],
|
|
1549
591
|
):
|
|
1550
592
|
return False
|
|
1551
593
|
|
|
1552
|
-
|
|
1553
|
-
if not _handle_mkdocs_integration(
|
|
594
|
+
if not handle_mkdocs_integration(
|
|
1554
595
|
local_vars["mkdocs_integration"],
|
|
1555
596
|
local_vars["mkdocs_serve"],
|
|
1556
597
|
local_vars["mkdocs_theme"],
|
|
1557
598
|
local_vars["mkdocs_output"],
|
|
1558
|
-
console,
|
|
1559
599
|
):
|
|
1560
600
|
return False
|
|
1561
601
|
|
|
1562
|
-
|
|
1563
|
-
if not _handle_contextual_ai(
|
|
602
|
+
if not handle_contextual_ai(
|
|
1564
603
|
local_vars["contextual_ai"],
|
|
1565
604
|
local_vars["ai_recommendations"],
|
|
1566
605
|
local_vars["ai_help_query"],
|
|
1567
|
-
console,
|
|
1568
606
|
):
|
|
1569
607
|
return False
|
|
1570
608
|
|
|
1571
609
|
return True
|
|
1572
610
|
|
|
1573
611
|
|
|
612
|
+
def _configure_logging_for_execution(
|
|
613
|
+
debug_enabled: bool, verbose_enabled: bool = False
|
|
614
|
+
) -> None:
|
|
615
|
+
"""Configure logging levels based on debug and verbose flags during execution."""
|
|
616
|
+
import logging
|
|
617
|
+
|
|
618
|
+
# Determine the appropriate logging level
|
|
619
|
+
if debug_enabled:
|
|
620
|
+
# In debug mode, set to DEBUG to show all messages
|
|
621
|
+
logging.getLogger("acb").setLevel(logging.DEBUG)
|
|
622
|
+
logging.getLogger("crackerjack").setLevel(logging.DEBUG)
|
|
623
|
+
# Configure structlog to show all messages in debug mode
|
|
624
|
+
_configure_structlog_for_level(logging.DEBUG)
|
|
625
|
+
elif verbose_enabled:
|
|
626
|
+
# In verbose mode, we still want to suppress ACB logs to avoid noise
|
|
627
|
+
# Only show ERROR and above for ACB/core components to reduce noise
|
|
628
|
+
logging.getLogger("acb").setLevel(logging.ERROR)
|
|
629
|
+
logging.getLogger("crackerjack.core").setLevel(logging.ERROR)
|
|
630
|
+
# Specifically target the loggers that were appearing in the output
|
|
631
|
+
logging.getLogger("acb.adapters.logger").setLevel(logging.ERROR)
|
|
632
|
+
logging.getLogger("acb.workflows.engine").setLevel(logging.ERROR)
|
|
633
|
+
# Also target the structlog logger adapters specifically
|
|
634
|
+
logging.getLogger("acb.adapters.logger.structlog").setLevel(logging.ERROR)
|
|
635
|
+
# Configure structlog to minimize output in verbose mode
|
|
636
|
+
_configure_structlog_for_level(logging.ERROR)
|
|
637
|
+
else:
|
|
638
|
+
# In normal mode, suppress ACB and crackerjack logging for clean default UX during execution
|
|
639
|
+
logging.getLogger("acb").setLevel(logging.CRITICAL)
|
|
640
|
+
logging.getLogger("acb.adapters").setLevel(logging.CRITICAL)
|
|
641
|
+
logging.getLogger("acb.workflows").setLevel(logging.CRITICAL)
|
|
642
|
+
logging.getLogger("acb.console").setLevel(logging.CRITICAL)
|
|
643
|
+
logging.getLogger("crackerjack.core").setLevel(logging.CRITICAL)
|
|
644
|
+
# Specifically target the loggers that were appearing in the output
|
|
645
|
+
logging.getLogger("acb.adapters.logger").setLevel(logging.CRITICAL)
|
|
646
|
+
logging.getLogger("acb.workflows.engine").setLevel(logging.CRITICAL)
|
|
647
|
+
# Also target the structlog logger adapters specifically
|
|
648
|
+
logging.getLogger("acb.adapters.logger.structlog").setLevel(logging.CRITICAL)
|
|
649
|
+
# Configure structlog to suppress output in normal mode
|
|
650
|
+
_configure_structlog_for_level(logging.CRITICAL)
|
|
651
|
+
|
|
652
|
+
|
|
1574
653
|
def cli() -> None:
|
|
1575
654
|
app()
|
|
1576
655
|
|