crackerjack 0.18.2__py3-none-any.whl → 0.45.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- crackerjack/README.md +19 -0
- crackerjack/__init__.py +96 -2
- crackerjack/__main__.py +637 -138
- crackerjack/adapters/README.md +18 -0
- crackerjack/adapters/__init__.py +39 -0
- crackerjack/adapters/_output_paths.py +167 -0
- crackerjack/adapters/_qa_adapter_base.py +309 -0
- crackerjack/adapters/_tool_adapter_base.py +706 -0
- crackerjack/adapters/ai/README.md +65 -0
- crackerjack/adapters/ai/__init__.py +5 -0
- crackerjack/adapters/ai/claude.py +853 -0
- crackerjack/adapters/complexity/README.md +53 -0
- crackerjack/adapters/complexity/__init__.py +10 -0
- crackerjack/adapters/complexity/complexipy.py +641 -0
- crackerjack/adapters/dependency/__init__.py +22 -0
- crackerjack/adapters/dependency/pip_audit.py +418 -0
- crackerjack/adapters/format/README.md +72 -0
- crackerjack/adapters/format/__init__.py +11 -0
- crackerjack/adapters/format/mdformat.py +313 -0
- crackerjack/adapters/format/ruff.py +516 -0
- crackerjack/adapters/lint/README.md +47 -0
- crackerjack/adapters/lint/__init__.py +11 -0
- crackerjack/adapters/lint/codespell.py +273 -0
- crackerjack/adapters/lsp/README.md +49 -0
- crackerjack/adapters/lsp/__init__.py +27 -0
- crackerjack/adapters/lsp/_base.py +194 -0
- crackerjack/adapters/lsp/_client.py +358 -0
- crackerjack/adapters/lsp/_manager.py +193 -0
- crackerjack/adapters/lsp/skylos.py +283 -0
- crackerjack/adapters/lsp/zuban.py +557 -0
- crackerjack/adapters/refactor/README.md +59 -0
- crackerjack/adapters/refactor/__init__.py +12 -0
- crackerjack/adapters/refactor/creosote.py +318 -0
- crackerjack/adapters/refactor/refurb.py +406 -0
- crackerjack/adapters/refactor/skylos.py +494 -0
- crackerjack/adapters/sast/README.md +132 -0
- crackerjack/adapters/sast/__init__.py +32 -0
- crackerjack/adapters/sast/_base.py +201 -0
- crackerjack/adapters/sast/bandit.py +423 -0
- crackerjack/adapters/sast/pyscn.py +405 -0
- crackerjack/adapters/sast/semgrep.py +241 -0
- crackerjack/adapters/security/README.md +111 -0
- crackerjack/adapters/security/__init__.py +17 -0
- crackerjack/adapters/security/gitleaks.py +339 -0
- crackerjack/adapters/type/README.md +52 -0
- crackerjack/adapters/type/__init__.py +12 -0
- crackerjack/adapters/type/pyrefly.py +402 -0
- crackerjack/adapters/type/ty.py +402 -0
- crackerjack/adapters/type/zuban.py +522 -0
- crackerjack/adapters/utility/README.md +51 -0
- crackerjack/adapters/utility/__init__.py +10 -0
- crackerjack/adapters/utility/checks.py +884 -0
- crackerjack/agents/README.md +264 -0
- crackerjack/agents/__init__.py +66 -0
- crackerjack/agents/architect_agent.py +238 -0
- crackerjack/agents/base.py +167 -0
- crackerjack/agents/claude_code_bridge.py +641 -0
- crackerjack/agents/coordinator.py +600 -0
- crackerjack/agents/documentation_agent.py +520 -0
- crackerjack/agents/dry_agent.py +585 -0
- crackerjack/agents/enhanced_coordinator.py +279 -0
- crackerjack/agents/enhanced_proactive_agent.py +185 -0
- crackerjack/agents/error_middleware.py +53 -0
- crackerjack/agents/formatting_agent.py +230 -0
- crackerjack/agents/helpers/__init__.py +9 -0
- crackerjack/agents/helpers/performance/__init__.py +22 -0
- crackerjack/agents/helpers/performance/performance_ast_analyzer.py +357 -0
- crackerjack/agents/helpers/performance/performance_pattern_detector.py +909 -0
- crackerjack/agents/helpers/performance/performance_recommender.py +572 -0
- crackerjack/agents/helpers/refactoring/__init__.py +22 -0
- crackerjack/agents/helpers/refactoring/code_transformer.py +536 -0
- crackerjack/agents/helpers/refactoring/complexity_analyzer.py +344 -0
- crackerjack/agents/helpers/refactoring/dead_code_detector.py +437 -0
- crackerjack/agents/helpers/test_creation/__init__.py +19 -0
- crackerjack/agents/helpers/test_creation/test_ast_analyzer.py +216 -0
- crackerjack/agents/helpers/test_creation/test_coverage_analyzer.py +643 -0
- crackerjack/agents/helpers/test_creation/test_template_generator.py +1031 -0
- crackerjack/agents/import_optimization_agent.py +1181 -0
- crackerjack/agents/performance_agent.py +325 -0
- crackerjack/agents/performance_helpers.py +205 -0
- crackerjack/agents/proactive_agent.py +55 -0
- crackerjack/agents/refactoring_agent.py +511 -0
- crackerjack/agents/refactoring_helpers.py +247 -0
- crackerjack/agents/security_agent.py +793 -0
- crackerjack/agents/semantic_agent.py +479 -0
- crackerjack/agents/semantic_helpers.py +356 -0
- crackerjack/agents/test_creation_agent.py +570 -0
- crackerjack/agents/test_specialist_agent.py +526 -0
- crackerjack/agents/tracker.py +110 -0
- crackerjack/api.py +647 -0
- crackerjack/cli/README.md +394 -0
- crackerjack/cli/__init__.py +24 -0
- crackerjack/cli/cache_handlers.py +209 -0
- crackerjack/cli/cache_handlers_enhanced.py +680 -0
- crackerjack/cli/facade.py +162 -0
- crackerjack/cli/formatting.py +13 -0
- crackerjack/cli/handlers/__init__.py +85 -0
- crackerjack/cli/handlers/advanced.py +103 -0
- crackerjack/cli/handlers/ai_features.py +62 -0
- crackerjack/cli/handlers/analytics.py +479 -0
- crackerjack/cli/handlers/changelog.py +271 -0
- crackerjack/cli/handlers/config_handlers.py +16 -0
- crackerjack/cli/handlers/coverage.py +84 -0
- crackerjack/cli/handlers/documentation.py +280 -0
- crackerjack/cli/handlers/main_handlers.py +497 -0
- crackerjack/cli/handlers/monitoring.py +371 -0
- crackerjack/cli/handlers.py +700 -0
- crackerjack/cli/interactive.py +488 -0
- crackerjack/cli/options.py +1216 -0
- crackerjack/cli/semantic_handlers.py +292 -0
- crackerjack/cli/utils.py +19 -0
- crackerjack/cli/version.py +19 -0
- crackerjack/code_cleaner.py +1307 -0
- crackerjack/config/README.md +472 -0
- crackerjack/config/__init__.py +275 -0
- crackerjack/config/global_lock_config.py +207 -0
- crackerjack/config/hooks.py +390 -0
- crackerjack/config/loader.py +239 -0
- crackerjack/config/settings.py +141 -0
- crackerjack/config/tool_commands.py +331 -0
- crackerjack/core/README.md +393 -0
- crackerjack/core/__init__.py +0 -0
- crackerjack/core/async_workflow_orchestrator.py +738 -0
- crackerjack/core/autofix_coordinator.py +282 -0
- crackerjack/core/container.py +105 -0
- crackerjack/core/enhanced_container.py +583 -0
- crackerjack/core/file_lifecycle.py +472 -0
- crackerjack/core/performance.py +244 -0
- crackerjack/core/performance_monitor.py +357 -0
- crackerjack/core/phase_coordinator.py +1227 -0
- crackerjack/core/proactive_workflow.py +267 -0
- crackerjack/core/resource_manager.py +425 -0
- crackerjack/core/retry.py +275 -0
- crackerjack/core/service_watchdog.py +601 -0
- crackerjack/core/session_coordinator.py +239 -0
- crackerjack/core/timeout_manager.py +563 -0
- crackerjack/core/websocket_lifecycle.py +410 -0
- crackerjack/core/workflow/__init__.py +21 -0
- crackerjack/core/workflow/workflow_ai_coordinator.py +863 -0
- crackerjack/core/workflow/workflow_event_orchestrator.py +1107 -0
- crackerjack/core/workflow/workflow_issue_parser.py +714 -0
- crackerjack/core/workflow/workflow_phase_executor.py +1158 -0
- crackerjack/core/workflow/workflow_security_gates.py +400 -0
- crackerjack/core/workflow_orchestrator.py +2243 -0
- crackerjack/data/README.md +11 -0
- crackerjack/data/__init__.py +8 -0
- crackerjack/data/models.py +79 -0
- crackerjack/data/repository.py +210 -0
- crackerjack/decorators/README.md +180 -0
- crackerjack/decorators/__init__.py +35 -0
- crackerjack/decorators/error_handling.py +649 -0
- crackerjack/decorators/error_handling_decorators.py +334 -0
- crackerjack/decorators/helpers.py +58 -0
- crackerjack/decorators/patterns.py +281 -0
- crackerjack/decorators/utils.py +58 -0
- crackerjack/docs/INDEX.md +11 -0
- crackerjack/docs/README.md +11 -0
- crackerjack/docs/generated/api/API_REFERENCE.md +10895 -0
- crackerjack/docs/generated/api/CLI_REFERENCE.md +109 -0
- crackerjack/docs/generated/api/CROSS_REFERENCES.md +1755 -0
- crackerjack/docs/generated/api/PROTOCOLS.md +3 -0
- crackerjack/docs/generated/api/SERVICES.md +1252 -0
- crackerjack/documentation/README.md +11 -0
- crackerjack/documentation/__init__.py +31 -0
- crackerjack/documentation/ai_templates.py +756 -0
- crackerjack/documentation/dual_output_generator.py +767 -0
- crackerjack/documentation/mkdocs_integration.py +518 -0
- crackerjack/documentation/reference_generator.py +1065 -0
- crackerjack/dynamic_config.py +678 -0
- crackerjack/errors.py +378 -0
- crackerjack/events/README.md +11 -0
- crackerjack/events/__init__.py +16 -0
- crackerjack/events/telemetry.py +175 -0
- crackerjack/events/workflow_bus.py +346 -0
- crackerjack/exceptions/README.md +301 -0
- crackerjack/exceptions/__init__.py +5 -0
- crackerjack/exceptions/config.py +4 -0
- crackerjack/exceptions/tool_execution_error.py +245 -0
- crackerjack/executors/README.md +591 -0
- crackerjack/executors/__init__.py +13 -0
- crackerjack/executors/async_hook_executor.py +938 -0
- crackerjack/executors/cached_hook_executor.py +316 -0
- crackerjack/executors/hook_executor.py +1295 -0
- crackerjack/executors/hook_lock_manager.py +708 -0
- crackerjack/executors/individual_hook_executor.py +739 -0
- crackerjack/executors/lsp_aware_hook_executor.py +349 -0
- crackerjack/executors/progress_hook_executor.py +282 -0
- crackerjack/executors/tool_proxy.py +433 -0
- crackerjack/hooks/README.md +485 -0
- crackerjack/hooks/lsp_hook.py +93 -0
- crackerjack/intelligence/README.md +557 -0
- crackerjack/intelligence/__init__.py +37 -0
- crackerjack/intelligence/adaptive_learning.py +693 -0
- crackerjack/intelligence/agent_orchestrator.py +485 -0
- crackerjack/intelligence/agent_registry.py +377 -0
- crackerjack/intelligence/agent_selector.py +439 -0
- crackerjack/intelligence/integration.py +250 -0
- crackerjack/interactive.py +719 -0
- crackerjack/managers/README.md +369 -0
- crackerjack/managers/__init__.py +11 -0
- crackerjack/managers/async_hook_manager.py +135 -0
- crackerjack/managers/hook_manager.py +585 -0
- crackerjack/managers/publish_manager.py +631 -0
- crackerjack/managers/test_command_builder.py +391 -0
- crackerjack/managers/test_executor.py +474 -0
- crackerjack/managers/test_manager.py +1357 -0
- crackerjack/managers/test_progress.py +187 -0
- crackerjack/mcp/README.md +374 -0
- crackerjack/mcp/__init__.py +0 -0
- crackerjack/mcp/cache.py +352 -0
- crackerjack/mcp/client_runner.py +121 -0
- crackerjack/mcp/context.py +802 -0
- crackerjack/mcp/dashboard.py +657 -0
- crackerjack/mcp/enhanced_progress_monitor.py +493 -0
- crackerjack/mcp/file_monitor.py +394 -0
- crackerjack/mcp/progress_components.py +607 -0
- crackerjack/mcp/progress_monitor.py +1016 -0
- crackerjack/mcp/rate_limiter.py +336 -0
- crackerjack/mcp/server.py +24 -0
- crackerjack/mcp/server_core.py +526 -0
- crackerjack/mcp/service_watchdog.py +505 -0
- crackerjack/mcp/state.py +407 -0
- crackerjack/mcp/task_manager.py +259 -0
- crackerjack/mcp/tools/README.md +27 -0
- crackerjack/mcp/tools/__init__.py +19 -0
- crackerjack/mcp/tools/core_tools.py +469 -0
- crackerjack/mcp/tools/error_analyzer.py +283 -0
- crackerjack/mcp/tools/execution_tools.py +384 -0
- crackerjack/mcp/tools/intelligence_tool_registry.py +46 -0
- crackerjack/mcp/tools/intelligence_tools.py +264 -0
- crackerjack/mcp/tools/monitoring_tools.py +628 -0
- crackerjack/mcp/tools/proactive_tools.py +367 -0
- crackerjack/mcp/tools/progress_tools.py +222 -0
- crackerjack/mcp/tools/semantic_tools.py +584 -0
- crackerjack/mcp/tools/utility_tools.py +358 -0
- crackerjack/mcp/tools/workflow_executor.py +699 -0
- crackerjack/mcp/websocket/README.md +31 -0
- crackerjack/mcp/websocket/__init__.py +14 -0
- crackerjack/mcp/websocket/app.py +54 -0
- crackerjack/mcp/websocket/endpoints.py +492 -0
- crackerjack/mcp/websocket/event_bridge.py +188 -0
- crackerjack/mcp/websocket/jobs.py +406 -0
- crackerjack/mcp/websocket/monitoring/__init__.py +25 -0
- crackerjack/mcp/websocket/monitoring/api/__init__.py +19 -0
- crackerjack/mcp/websocket/monitoring/api/dependencies.py +141 -0
- crackerjack/mcp/websocket/monitoring/api/heatmap.py +154 -0
- crackerjack/mcp/websocket/monitoring/api/intelligence.py +199 -0
- crackerjack/mcp/websocket/monitoring/api/metrics.py +203 -0
- crackerjack/mcp/websocket/monitoring/api/telemetry.py +101 -0
- crackerjack/mcp/websocket/monitoring/dashboard.py +18 -0
- crackerjack/mcp/websocket/monitoring/factory.py +109 -0
- crackerjack/mcp/websocket/monitoring/filters.py +10 -0
- crackerjack/mcp/websocket/monitoring/metrics.py +64 -0
- crackerjack/mcp/websocket/monitoring/models.py +90 -0
- crackerjack/mcp/websocket/monitoring/utils.py +171 -0
- crackerjack/mcp/websocket/monitoring/websocket_manager.py +78 -0
- crackerjack/mcp/websocket/monitoring/websockets/__init__.py +17 -0
- crackerjack/mcp/websocket/monitoring/websockets/dependencies.py +126 -0
- crackerjack/mcp/websocket/monitoring/websockets/heatmap.py +176 -0
- crackerjack/mcp/websocket/monitoring/websockets/intelligence.py +291 -0
- crackerjack/mcp/websocket/monitoring/websockets/metrics.py +291 -0
- crackerjack/mcp/websocket/monitoring_endpoints.py +21 -0
- crackerjack/mcp/websocket/server.py +174 -0
- crackerjack/mcp/websocket/websocket_handler.py +276 -0
- crackerjack/mcp/websocket_server.py +10 -0
- crackerjack/models/README.md +308 -0
- crackerjack/models/__init__.py +40 -0
- crackerjack/models/config.py +730 -0
- crackerjack/models/config_adapter.py +265 -0
- crackerjack/models/protocols.py +1535 -0
- crackerjack/models/pydantic_models.py +320 -0
- crackerjack/models/qa_config.py +145 -0
- crackerjack/models/qa_results.py +134 -0
- crackerjack/models/resource_protocols.py +299 -0
- crackerjack/models/results.py +35 -0
- crackerjack/models/semantic_models.py +258 -0
- crackerjack/models/task.py +173 -0
- crackerjack/models/test_models.py +60 -0
- crackerjack/monitoring/README.md +11 -0
- crackerjack/monitoring/__init__.py +0 -0
- crackerjack/monitoring/ai_agent_watchdog.py +405 -0
- crackerjack/monitoring/metrics_collector.py +427 -0
- crackerjack/monitoring/regression_prevention.py +580 -0
- crackerjack/monitoring/websocket_server.py +406 -0
- crackerjack/orchestration/README.md +340 -0
- crackerjack/orchestration/__init__.py +43 -0
- crackerjack/orchestration/advanced_orchestrator.py +894 -0
- crackerjack/orchestration/cache/README.md +312 -0
- crackerjack/orchestration/cache/__init__.py +37 -0
- crackerjack/orchestration/cache/memory_cache.py +338 -0
- crackerjack/orchestration/cache/tool_proxy_cache.py +340 -0
- crackerjack/orchestration/config.py +297 -0
- crackerjack/orchestration/coverage_improvement.py +180 -0
- crackerjack/orchestration/execution_strategies.py +361 -0
- crackerjack/orchestration/hook_orchestrator.py +1398 -0
- crackerjack/orchestration/strategies/README.md +401 -0
- crackerjack/orchestration/strategies/__init__.py +39 -0
- crackerjack/orchestration/strategies/adaptive_strategy.py +630 -0
- crackerjack/orchestration/strategies/parallel_strategy.py +237 -0
- crackerjack/orchestration/strategies/sequential_strategy.py +299 -0
- crackerjack/orchestration/test_progress_streamer.py +647 -0
- crackerjack/plugins/README.md +11 -0
- crackerjack/plugins/__init__.py +15 -0
- crackerjack/plugins/base.py +200 -0
- crackerjack/plugins/hooks.py +254 -0
- crackerjack/plugins/loader.py +335 -0
- crackerjack/plugins/managers.py +264 -0
- crackerjack/py313.py +191 -0
- crackerjack/security/README.md +11 -0
- crackerjack/security/__init__.py +0 -0
- crackerjack/security/audit.py +197 -0
- crackerjack/services/README.md +374 -0
- crackerjack/services/__init__.py +9 -0
- crackerjack/services/ai/README.md +295 -0
- crackerjack/services/ai/__init__.py +7 -0
- crackerjack/services/ai/advanced_optimizer.py +878 -0
- crackerjack/services/ai/contextual_ai_assistant.py +542 -0
- crackerjack/services/ai/embeddings.py +444 -0
- crackerjack/services/ai/intelligent_commit.py +328 -0
- crackerjack/services/ai/predictive_analytics.py +510 -0
- crackerjack/services/anomaly_detector.py +392 -0
- crackerjack/services/api_extractor.py +617 -0
- crackerjack/services/backup_service.py +467 -0
- crackerjack/services/bounded_status_operations.py +530 -0
- crackerjack/services/cache.py +369 -0
- crackerjack/services/changelog_automation.py +399 -0
- crackerjack/services/command_execution_service.py +305 -0
- crackerjack/services/config_integrity.py +132 -0
- crackerjack/services/config_merge.py +546 -0
- crackerjack/services/config_service.py +198 -0
- crackerjack/services/config_template.py +493 -0
- crackerjack/services/coverage_badge_service.py +173 -0
- crackerjack/services/coverage_ratchet.py +381 -0
- crackerjack/services/debug.py +733 -0
- crackerjack/services/dependency_analyzer.py +460 -0
- crackerjack/services/dependency_monitor.py +622 -0
- crackerjack/services/documentation_generator.py +493 -0
- crackerjack/services/documentation_service.py +704 -0
- crackerjack/services/enhanced_filesystem.py +497 -0
- crackerjack/services/enterprise_optimizer.py +865 -0
- crackerjack/services/error_pattern_analyzer.py +676 -0
- crackerjack/services/file_filter.py +221 -0
- crackerjack/services/file_hasher.py +149 -0
- crackerjack/services/file_io_service.py +361 -0
- crackerjack/services/file_modifier.py +615 -0
- crackerjack/services/filesystem.py +381 -0
- crackerjack/services/git.py +422 -0
- crackerjack/services/health_metrics.py +615 -0
- crackerjack/services/heatmap_generator.py +744 -0
- crackerjack/services/incremental_executor.py +380 -0
- crackerjack/services/initialization.py +823 -0
- crackerjack/services/input_validator.py +668 -0
- crackerjack/services/intelligent_commit.py +327 -0
- crackerjack/services/log_manager.py +289 -0
- crackerjack/services/logging.py +228 -0
- crackerjack/services/lsp_client.py +628 -0
- crackerjack/services/memory_optimizer.py +414 -0
- crackerjack/services/metrics.py +587 -0
- crackerjack/services/monitoring/README.md +30 -0
- crackerjack/services/monitoring/__init__.py +9 -0
- crackerjack/services/monitoring/dependency_monitor.py +678 -0
- crackerjack/services/monitoring/error_pattern_analyzer.py +676 -0
- crackerjack/services/monitoring/health_metrics.py +716 -0
- crackerjack/services/monitoring/metrics.py +587 -0
- crackerjack/services/monitoring/performance_benchmarks.py +410 -0
- crackerjack/services/monitoring/performance_cache.py +388 -0
- crackerjack/services/monitoring/performance_monitor.py +569 -0
- crackerjack/services/parallel_executor.py +527 -0
- crackerjack/services/pattern_cache.py +333 -0
- crackerjack/services/pattern_detector.py +478 -0
- crackerjack/services/patterns/__init__.py +142 -0
- crackerjack/services/patterns/agents.py +107 -0
- crackerjack/services/patterns/code/__init__.py +15 -0
- crackerjack/services/patterns/code/detection.py +118 -0
- crackerjack/services/patterns/code/imports.py +107 -0
- crackerjack/services/patterns/code/paths.py +159 -0
- crackerjack/services/patterns/code/performance.py +119 -0
- crackerjack/services/patterns/code/replacement.py +36 -0
- crackerjack/services/patterns/core.py +212 -0
- crackerjack/services/patterns/documentation/__init__.py +14 -0
- crackerjack/services/patterns/documentation/badges_markdown.py +96 -0
- crackerjack/services/patterns/documentation/comments_blocks.py +83 -0
- crackerjack/services/patterns/documentation/docstrings.py +89 -0
- crackerjack/services/patterns/formatting.py +226 -0
- crackerjack/services/patterns/operations.py +339 -0
- crackerjack/services/patterns/security/__init__.py +23 -0
- crackerjack/services/patterns/security/code_injection.py +122 -0
- crackerjack/services/patterns/security/credentials.py +190 -0
- crackerjack/services/patterns/security/path_traversal.py +221 -0
- crackerjack/services/patterns/security/unsafe_operations.py +216 -0
- crackerjack/services/patterns/templates.py +62 -0
- crackerjack/services/patterns/testing/__init__.py +18 -0
- crackerjack/services/patterns/testing/error_patterns.py +107 -0
- crackerjack/services/patterns/testing/pytest_output.py +126 -0
- crackerjack/services/patterns/tool_output/__init__.py +16 -0
- crackerjack/services/patterns/tool_output/bandit.py +72 -0
- crackerjack/services/patterns/tool_output/other.py +97 -0
- crackerjack/services/patterns/tool_output/pyright.py +67 -0
- crackerjack/services/patterns/tool_output/ruff.py +44 -0
- crackerjack/services/patterns/url_sanitization.py +114 -0
- crackerjack/services/patterns/utilities.py +42 -0
- crackerjack/services/patterns/utils.py +339 -0
- crackerjack/services/patterns/validation.py +46 -0
- crackerjack/services/patterns/versioning.py +62 -0
- crackerjack/services/predictive_analytics.py +523 -0
- crackerjack/services/profiler.py +280 -0
- crackerjack/services/quality/README.md +415 -0
- crackerjack/services/quality/__init__.py +11 -0
- crackerjack/services/quality/anomaly_detector.py +392 -0
- crackerjack/services/quality/pattern_cache.py +333 -0
- crackerjack/services/quality/pattern_detector.py +479 -0
- crackerjack/services/quality/qa_orchestrator.py +491 -0
- crackerjack/services/quality/quality_baseline.py +395 -0
- crackerjack/services/quality/quality_baseline_enhanced.py +649 -0
- crackerjack/services/quality/quality_intelligence.py +949 -0
- crackerjack/services/regex_patterns.py +58 -0
- crackerjack/services/regex_utils.py +483 -0
- crackerjack/services/secure_path_utils.py +524 -0
- crackerjack/services/secure_status_formatter.py +450 -0
- crackerjack/services/secure_subprocess.py +635 -0
- crackerjack/services/security.py +239 -0
- crackerjack/services/security_logger.py +495 -0
- crackerjack/services/server_manager.py +411 -0
- crackerjack/services/smart_scheduling.py +167 -0
- crackerjack/services/status_authentication.py +460 -0
- crackerjack/services/status_security_manager.py +315 -0
- crackerjack/services/terminal_utils.py +0 -0
- crackerjack/services/thread_safe_status_collector.py +441 -0
- crackerjack/services/tool_filter.py +368 -0
- crackerjack/services/tool_version_service.py +43 -0
- crackerjack/services/unified_config.py +115 -0
- crackerjack/services/validation_rate_limiter.py +220 -0
- crackerjack/services/vector_store.py +689 -0
- crackerjack/services/version_analyzer.py +461 -0
- crackerjack/services/version_checker.py +223 -0
- crackerjack/services/websocket_resource_limiter.py +438 -0
- crackerjack/services/zuban_lsp_service.py +391 -0
- crackerjack/slash_commands/README.md +11 -0
- crackerjack/slash_commands/__init__.py +59 -0
- crackerjack/slash_commands/init.md +112 -0
- crackerjack/slash_commands/run.md +197 -0
- crackerjack/slash_commands/status.md +127 -0
- crackerjack/tools/README.md +11 -0
- crackerjack/tools/__init__.py +30 -0
- crackerjack/tools/_git_utils.py +105 -0
- crackerjack/tools/check_added_large_files.py +139 -0
- crackerjack/tools/check_ast.py +105 -0
- crackerjack/tools/check_json.py +103 -0
- crackerjack/tools/check_jsonschema.py +297 -0
- crackerjack/tools/check_toml.py +103 -0
- crackerjack/tools/check_yaml.py +110 -0
- crackerjack/tools/codespell_wrapper.py +72 -0
- crackerjack/tools/end_of_file_fixer.py +202 -0
- crackerjack/tools/format_json.py +128 -0
- crackerjack/tools/mdformat_wrapper.py +114 -0
- crackerjack/tools/trailing_whitespace.py +198 -0
- crackerjack/tools/validate_input_validator_patterns.py +236 -0
- crackerjack/tools/validate_regex_patterns.py +188 -0
- crackerjack/ui/README.md +11 -0
- crackerjack/ui/__init__.py +1 -0
- crackerjack/ui/dashboard_renderer.py +28 -0
- crackerjack/ui/templates/README.md +11 -0
- crackerjack/utils/console_utils.py +13 -0
- crackerjack/utils/dependency_guard.py +230 -0
- crackerjack/utils/retry_utils.py +275 -0
- crackerjack/workflows/README.md +590 -0
- crackerjack/workflows/__init__.py +46 -0
- crackerjack/workflows/actions.py +811 -0
- crackerjack/workflows/auto_fix.py +444 -0
- crackerjack/workflows/container_builder.py +499 -0
- crackerjack/workflows/definitions.py +443 -0
- crackerjack/workflows/engine.py +177 -0
- crackerjack/workflows/event_bridge.py +242 -0
- crackerjack-0.45.2.dist-info/METADATA +1678 -0
- crackerjack-0.45.2.dist-info/RECORD +478 -0
- {crackerjack-0.18.2.dist-info → crackerjack-0.45.2.dist-info}/WHEEL +1 -1
- crackerjack-0.45.2.dist-info/entry_points.txt +2 -0
- crackerjack/.gitignore +0 -14
- crackerjack/.libcst.codemod.yaml +0 -18
- crackerjack/.pdm.toml +0 -1
- crackerjack/.pre-commit-config.yaml +0 -91
- crackerjack/.pytest_cache/.gitignore +0 -2
- crackerjack/.pytest_cache/CACHEDIR.TAG +0 -4
- crackerjack/.pytest_cache/README.md +0 -8
- crackerjack/.pytest_cache/v/cache/nodeids +0 -1
- crackerjack/.pytest_cache/v/cache/stepwise +0 -1
- crackerjack/.ruff_cache/.gitignore +0 -1
- crackerjack/.ruff_cache/0.1.11/3256171999636029978 +0 -0
- crackerjack/.ruff_cache/0.1.14/602324811142551221 +0 -0
- crackerjack/.ruff_cache/0.1.4/10355199064880463147 +0 -0
- crackerjack/.ruff_cache/0.1.6/15140459877605758699 +0 -0
- crackerjack/.ruff_cache/0.1.7/1790508110482614856 +0 -0
- crackerjack/.ruff_cache/0.1.9/17041001205004563469 +0 -0
- crackerjack/.ruff_cache/0.11.2/4070660268492669020 +0 -0
- crackerjack/.ruff_cache/0.11.3/9818742842212983150 +0 -0
- crackerjack/.ruff_cache/0.11.4/9818742842212983150 +0 -0
- crackerjack/.ruff_cache/0.11.6/3557596832929915217 +0 -0
- crackerjack/.ruff_cache/0.11.7/10386934055395314831 +0 -0
- crackerjack/.ruff_cache/0.11.7/3557596832929915217 +0 -0
- crackerjack/.ruff_cache/0.11.8/530407680854991027 +0 -0
- crackerjack/.ruff_cache/0.2.0/10047773857155985907 +0 -0
- crackerjack/.ruff_cache/0.2.1/8522267973936635051 +0 -0
- crackerjack/.ruff_cache/0.2.2/18053836298936336950 +0 -0
- crackerjack/.ruff_cache/0.3.0/12548816621480535786 +0 -0
- crackerjack/.ruff_cache/0.3.3/11081883392474770722 +0 -0
- crackerjack/.ruff_cache/0.3.4/676973378459347183 +0 -0
- crackerjack/.ruff_cache/0.3.5/16311176246009842383 +0 -0
- crackerjack/.ruff_cache/0.5.7/1493622539551733492 +0 -0
- crackerjack/.ruff_cache/0.5.7/6231957614044513175 +0 -0
- crackerjack/.ruff_cache/0.5.7/9932762556785938009 +0 -0
- crackerjack/.ruff_cache/0.6.0/11982804814124138945 +0 -0
- crackerjack/.ruff_cache/0.6.0/12055761203849489982 +0 -0
- crackerjack/.ruff_cache/0.6.2/1206147804896221174 +0 -0
- crackerjack/.ruff_cache/0.6.4/1206147804896221174 +0 -0
- crackerjack/.ruff_cache/0.6.5/1206147804896221174 +0 -0
- crackerjack/.ruff_cache/0.6.7/3657366982708166874 +0 -0
- crackerjack/.ruff_cache/0.6.9/285614542852677309 +0 -0
- crackerjack/.ruff_cache/0.7.1/1024065805990144819 +0 -0
- crackerjack/.ruff_cache/0.7.1/285614542852677309 +0 -0
- crackerjack/.ruff_cache/0.7.3/16061516852537040135 +0 -0
- crackerjack/.ruff_cache/0.8.4/16354268377385700367 +0 -0
- crackerjack/.ruff_cache/0.9.10/12813592349865671909 +0 -0
- crackerjack/.ruff_cache/0.9.10/923908772239632759 +0 -0
- crackerjack/.ruff_cache/0.9.3/13948373885254993391 +0 -0
- crackerjack/.ruff_cache/0.9.9/12813592349865671909 +0 -0
- crackerjack/.ruff_cache/0.9.9/8843823720003377982 +0 -0
- crackerjack/.ruff_cache/CACHEDIR.TAG +0 -1
- crackerjack/crackerjack.py +0 -855
- crackerjack/pyproject.toml +0 -214
- crackerjack-0.18.2.dist-info/METADATA +0 -420
- crackerjack-0.18.2.dist-info/RECORD +0 -59
- crackerjack-0.18.2.dist-info/entry_points.txt +0 -4
- {crackerjack-0.18.2.dist-info → crackerjack-0.45.2.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,949 @@
|
|
|
1
|
+
"""Advanced ML-based quality intelligence with anomaly detection and predictive analytics."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import typing as t
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
from datetime import datetime
|
|
7
|
+
from enum import Enum
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
|
|
10
|
+
import numpy as np
|
|
11
|
+
from scipy import stats
|
|
12
|
+
|
|
13
|
+
from crackerjack.models.protocols import QualityIntelligenceProtocol
|
|
14
|
+
|
|
15
|
+
from .quality_baseline_enhanced import (
|
|
16
|
+
AlertSeverity,
|
|
17
|
+
EnhancedQualityBaselineService,
|
|
18
|
+
TrendDirection,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class AnomalyType(str, Enum):
|
|
23
|
+
"""Types of anomalies that can be detected."""
|
|
24
|
+
|
|
25
|
+
SPIKE = "spike" # Sudden increase in metrics
|
|
26
|
+
DROP = "drop" # Sudden decrease in metrics
|
|
27
|
+
DRIFT = "drift" # Gradual change over time
|
|
28
|
+
OSCILLATION = "oscillation" # Unusual fluctuation patterns
|
|
29
|
+
OUTLIER = "outlier" # Statistical outlier
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class PatternType(str, Enum):
|
|
33
|
+
"""Types of patterns that can be identified."""
|
|
34
|
+
|
|
35
|
+
CYCLIC = "cyclic" # Regular recurring patterns
|
|
36
|
+
SEASONAL = "seasonal" # Time-based patterns
|
|
37
|
+
CORRELATION = "correlation" # Metric correlation patterns
|
|
38
|
+
REGRESSION = "regression" # Quality regression patterns
|
|
39
|
+
IMPROVEMENT = "improvement" # Quality improvement patterns
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
@dataclass
|
|
43
|
+
class QualityAnomaly:
|
|
44
|
+
"""Detected quality anomaly with ML confidence."""
|
|
45
|
+
|
|
46
|
+
anomaly_type: AnomalyType
|
|
47
|
+
metric_name: str
|
|
48
|
+
detected_at: datetime
|
|
49
|
+
confidence: float # 0.0 to 1.0
|
|
50
|
+
severity: AlertSeverity
|
|
51
|
+
description: str
|
|
52
|
+
actual_value: float
|
|
53
|
+
expected_value: float
|
|
54
|
+
deviation_sigma: float # Standard deviations from normal
|
|
55
|
+
context: dict[str, t.Any] = field(default_factory=dict[str, t.Any])
|
|
56
|
+
|
|
57
|
+
def to_dict(self) -> dict[str, t.Any]:
|
|
58
|
+
data = {
|
|
59
|
+
"anomaly_type": self.anomaly_type,
|
|
60
|
+
"metric_name": self.metric_name,
|
|
61
|
+
"detected_at": self.detected_at.isoformat(),
|
|
62
|
+
"confidence": self.confidence,
|
|
63
|
+
"severity": self.severity,
|
|
64
|
+
"description": self.description,
|
|
65
|
+
"actual_value": self.actual_value,
|
|
66
|
+
"expected_value": self.expected_value,
|
|
67
|
+
"deviation_sigma": self.deviation_sigma,
|
|
68
|
+
"context": self.context,
|
|
69
|
+
}
|
|
70
|
+
return data
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
@dataclass
|
|
74
|
+
class QualityPattern:
|
|
75
|
+
"""Identified quality pattern with statistical analysis."""
|
|
76
|
+
|
|
77
|
+
pattern_type: PatternType
|
|
78
|
+
metric_names: list[str]
|
|
79
|
+
detected_at: datetime
|
|
80
|
+
confidence: float
|
|
81
|
+
description: str
|
|
82
|
+
period_days: int
|
|
83
|
+
correlation_strength: float # For correlation patterns
|
|
84
|
+
trend_direction: TrendDirection
|
|
85
|
+
statistical_significance: float # p-value
|
|
86
|
+
context: dict[str, t.Any] = field(default_factory=dict[str, t.Any])
|
|
87
|
+
|
|
88
|
+
def to_dict(self) -> dict[str, t.Any]:
|
|
89
|
+
return {
|
|
90
|
+
"pattern_type": self.pattern_type,
|
|
91
|
+
"metric_names": self.metric_names,
|
|
92
|
+
"detected_at": self.detected_at.isoformat(),
|
|
93
|
+
"confidence": self.confidence,
|
|
94
|
+
"description": self.description,
|
|
95
|
+
"period_days": self.period_days,
|
|
96
|
+
"correlation_strength": self.correlation_strength,
|
|
97
|
+
"trend_direction": self.trend_direction,
|
|
98
|
+
"statistical_significance": self.statistical_significance,
|
|
99
|
+
"context": self.context,
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
@dataclass
|
|
104
|
+
class QualityPrediction:
|
|
105
|
+
"""Advanced quality prediction with confidence intervals."""
|
|
106
|
+
|
|
107
|
+
metric_name: str
|
|
108
|
+
predicted_value: float
|
|
109
|
+
confidence_lower: float
|
|
110
|
+
confidence_upper: float
|
|
111
|
+
confidence_level: float # e.g., 0.95 for 95% confidence
|
|
112
|
+
prediction_horizon_days: int
|
|
113
|
+
prediction_method: str
|
|
114
|
+
created_at: datetime
|
|
115
|
+
factors: list[str] = field(default_factory=list)
|
|
116
|
+
risk_assessment: str = "low" # low, medium, high
|
|
117
|
+
|
|
118
|
+
def to_dict(self) -> dict[str, t.Any]:
|
|
119
|
+
return {
|
|
120
|
+
"metric_name": self.metric_name,
|
|
121
|
+
"predicted_value": self.predicted_value,
|
|
122
|
+
"confidence_lower": self.confidence_lower,
|
|
123
|
+
"confidence_upper": self.confidence_upper,
|
|
124
|
+
"confidence_level": self.confidence_level,
|
|
125
|
+
"prediction_horizon_days": self.prediction_horizon_days,
|
|
126
|
+
"prediction_method": self.prediction_method,
|
|
127
|
+
"created_at": self.created_at.isoformat(),
|
|
128
|
+
"factors": self.factors,
|
|
129
|
+
"risk_assessment": self.risk_assessment,
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
@dataclass
|
|
134
|
+
class QualityInsights:
|
|
135
|
+
"""Comprehensive quality insights with ML analysis."""
|
|
136
|
+
|
|
137
|
+
anomalies: list[QualityAnomaly]
|
|
138
|
+
patterns: list[QualityPattern]
|
|
139
|
+
predictions: list[QualityPrediction]
|
|
140
|
+
recommendations: list[str]
|
|
141
|
+
overall_health_score: float # 0.0 to 1.0
|
|
142
|
+
risk_level: str # low, medium, high, critical
|
|
143
|
+
generated_at: datetime = field(default_factory=datetime.now)
|
|
144
|
+
|
|
145
|
+
def to_dict(self) -> dict[str, t.Any]:
|
|
146
|
+
return {
|
|
147
|
+
"anomalies": [a.to_dict() for a in self.anomalies],
|
|
148
|
+
"patterns": [p.to_dict() for p in self.patterns],
|
|
149
|
+
"predictions": [p.to_dict() for p in self.predictions],
|
|
150
|
+
"recommendations": self.recommendations,
|
|
151
|
+
"overall_health_score": self.overall_health_score,
|
|
152
|
+
"risk_level": self.risk_level,
|
|
153
|
+
"generated_at": self.generated_at.isoformat(),
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
class QualityIntelligenceService(QualityIntelligenceProtocol):
|
|
158
|
+
"""Advanced ML-based quality intelligence service."""
|
|
159
|
+
|
|
160
|
+
def __init__(
|
|
161
|
+
self,
|
|
162
|
+
quality_service: EnhancedQualityBaselineService,
|
|
163
|
+
anomaly_sensitivity: float = 2.0, # Standard deviations for anomaly detection
|
|
164
|
+
min_data_points: int = 10,
|
|
165
|
+
) -> None:
|
|
166
|
+
self.quality_service = quality_service
|
|
167
|
+
self.anomaly_sensitivity = anomaly_sensitivity
|
|
168
|
+
self.min_data_points = min_data_points
|
|
169
|
+
|
|
170
|
+
def detect_anomalies(
|
|
171
|
+
self, days: int = 30, metrics: list[str] | None = None
|
|
172
|
+
) -> list[QualityAnomaly]:
|
|
173
|
+
"""Detect anomalies in quality metrics using statistical analysis (sync version)."""
|
|
174
|
+
metrics = self._get_default_metrics() if metrics is None else metrics
|
|
175
|
+
|
|
176
|
+
baselines = self.quality_service.get_recent_baselines(limit=days * 2)
|
|
177
|
+
if len(baselines) < self.min_data_points:
|
|
178
|
+
return []
|
|
179
|
+
|
|
180
|
+
anomalies = []
|
|
181
|
+
for metric_name in metrics:
|
|
182
|
+
metric_anomalies = self._detect_metric_anomalies(metric_name, baselines)
|
|
183
|
+
anomalies.extend(metric_anomalies)
|
|
184
|
+
|
|
185
|
+
return anomalies
|
|
186
|
+
|
|
187
|
+
async def detect_anomalies_async(
|
|
188
|
+
self, days: int = 30, metrics: list[str] | None = None
|
|
189
|
+
) -> list[QualityAnomaly]:
|
|
190
|
+
"""Detect anomalies in quality metrics using statistical analysis (async version)."""
|
|
191
|
+
metrics = self._get_default_metrics() if metrics is None else metrics
|
|
192
|
+
|
|
193
|
+
baselines = await self.quality_service.aget_recent_baselines(limit=days * 2)
|
|
194
|
+
if len(baselines) < self.min_data_points:
|
|
195
|
+
return []
|
|
196
|
+
|
|
197
|
+
anomalies = []
|
|
198
|
+
for metric_name in metrics:
|
|
199
|
+
metric_anomalies = self._detect_metric_anomalies(metric_name, baselines)
|
|
200
|
+
anomalies.extend(metric_anomalies)
|
|
201
|
+
|
|
202
|
+
return anomalies
|
|
203
|
+
|
|
204
|
+
def _get_default_metrics(self) -> list[str]:
|
|
205
|
+
"""Get default metrics list[t.Any] for anomaly detection."""
|
|
206
|
+
return [
|
|
207
|
+
"quality_score",
|
|
208
|
+
"coverage_percent",
|
|
209
|
+
"hook_failures",
|
|
210
|
+
"security_issues",
|
|
211
|
+
"type_errors",
|
|
212
|
+
"linting_issues",
|
|
213
|
+
]
|
|
214
|
+
|
|
215
|
+
def _detect_metric_anomalies(
|
|
216
|
+
self, metric_name: str, baselines: list[t.Any]
|
|
217
|
+
) -> list[QualityAnomaly]:
|
|
218
|
+
"""Detect anomalies for a specific metric."""
|
|
219
|
+
values, timestamps = self._extract_metric_values(metric_name, baselines)
|
|
220
|
+
|
|
221
|
+
if len(values) < self.min_data_points:
|
|
222
|
+
return []
|
|
223
|
+
|
|
224
|
+
stats_data = self._calculate_statistical_metrics(values)
|
|
225
|
+
if stats_data is None: # No variation
|
|
226
|
+
return []
|
|
227
|
+
|
|
228
|
+
return self._identify_outlier_anomalies(
|
|
229
|
+
metric_name, values, timestamps, stats_data
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
def _extract_metric_values(
|
|
233
|
+
self, metric_name: str, baselines: list[t.Any]
|
|
234
|
+
) -> tuple[list[float], list[t.Any]]:
|
|
235
|
+
"""Extract metric values and timestamps from baselines."""
|
|
236
|
+
values = []
|
|
237
|
+
timestamps = []
|
|
238
|
+
|
|
239
|
+
for baseline in baselines:
|
|
240
|
+
metric_value = self._get_baseline_metric_value(baseline, metric_name)
|
|
241
|
+
if metric_value is not None:
|
|
242
|
+
values.append(metric_value)
|
|
243
|
+
timestamps.append(baseline.timestamp)
|
|
244
|
+
|
|
245
|
+
return values, timestamps
|
|
246
|
+
|
|
247
|
+
def _get_baseline_metric_value(
|
|
248
|
+
self, baseline: t.Any, metric_name: str
|
|
249
|
+
) -> float | None:
|
|
250
|
+
"""Get metric value from baseline object."""
|
|
251
|
+
metric_mapping = {
|
|
252
|
+
"quality_score": baseline.quality_score,
|
|
253
|
+
"coverage_percent": baseline.coverage_percent,
|
|
254
|
+
"hook_failures": baseline.hook_failures,
|
|
255
|
+
"security_issues": baseline.security_issues,
|
|
256
|
+
"type_errors": baseline.type_errors,
|
|
257
|
+
"linting_issues": baseline.linting_issues,
|
|
258
|
+
}
|
|
259
|
+
return metric_mapping.get(metric_name)
|
|
260
|
+
|
|
261
|
+
def _calculate_statistical_metrics(
|
|
262
|
+
self, values: list[float]
|
|
263
|
+
) -> dict[str, float] | None:
|
|
264
|
+
"""Calculate statistical metrics for anomaly detection."""
|
|
265
|
+
values_array = np.array(values)
|
|
266
|
+
mean_val = np.mean(values_array)
|
|
267
|
+
std_val = np.std(values_array)
|
|
268
|
+
|
|
269
|
+
if std_val == 0:
|
|
270
|
+
return None # No variation to detect anomalies
|
|
271
|
+
|
|
272
|
+
z_scores = np.abs((values_array - mean_val) / std_val)
|
|
273
|
+
|
|
274
|
+
return {
|
|
275
|
+
"mean": mean_val,
|
|
276
|
+
"std": std_val,
|
|
277
|
+
"z_scores": z_scores,
|
|
278
|
+
"values_array": values_array,
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
def _identify_outlier_anomalies(
|
|
282
|
+
self,
|
|
283
|
+
metric_name: str,
|
|
284
|
+
values: list[float],
|
|
285
|
+
timestamps: list[t.Any],
|
|
286
|
+
stats_data: dict[str, t.Any],
|
|
287
|
+
) -> list[QualityAnomaly]:
|
|
288
|
+
"""Identify outlier anomalies based on z-scores."""
|
|
289
|
+
anomalies = []
|
|
290
|
+
z_scores = stats_data["z_scores"]
|
|
291
|
+
mean_val = stats_data["mean"]
|
|
292
|
+
std_val = stats_data["std"]
|
|
293
|
+
|
|
294
|
+
for i, (value, timestamp, z_score) in enumerate(
|
|
295
|
+
zip(values, timestamps, z_scores)
|
|
296
|
+
):
|
|
297
|
+
if z_score > self.anomaly_sensitivity:
|
|
298
|
+
anomaly = self._create_anomaly_object(
|
|
299
|
+
metric_name,
|
|
300
|
+
value,
|
|
301
|
+
timestamp,
|
|
302
|
+
z_score,
|
|
303
|
+
mean_val,
|
|
304
|
+
std_val,
|
|
305
|
+
i,
|
|
306
|
+
len(values),
|
|
307
|
+
)
|
|
308
|
+
anomalies.append(anomaly)
|
|
309
|
+
|
|
310
|
+
return anomalies
|
|
311
|
+
|
|
312
|
+
def _create_anomaly_object(
|
|
313
|
+
self,
|
|
314
|
+
metric_name: str,
|
|
315
|
+
value: float,
|
|
316
|
+
timestamp: t.Any,
|
|
317
|
+
z_score: float,
|
|
318
|
+
mean_val: float,
|
|
319
|
+
std_val: float,
|
|
320
|
+
position: int,
|
|
321
|
+
data_points: int,
|
|
322
|
+
) -> QualityAnomaly:
|
|
323
|
+
"""Create QualityAnomaly object from detected outlier."""
|
|
324
|
+
anomaly_type, severity = self._determine_anomaly_type_and_severity(
|
|
325
|
+
value, mean_val, z_score
|
|
326
|
+
)
|
|
327
|
+
confidence = min(1.0, z_score / 4.0) # Scale to 0-1
|
|
328
|
+
|
|
329
|
+
return QualityAnomaly(
|
|
330
|
+
anomaly_type=anomaly_type,
|
|
331
|
+
metric_name=metric_name,
|
|
332
|
+
detected_at=timestamp,
|
|
333
|
+
confidence=confidence,
|
|
334
|
+
severity=severity,
|
|
335
|
+
description=f"{metric_name} {anomaly_type} detected: {value:.2f} (expected ~{mean_val:.2f})",
|
|
336
|
+
actual_value=value,
|
|
337
|
+
expected_value=mean_val,
|
|
338
|
+
deviation_sigma=z_score,
|
|
339
|
+
context={
|
|
340
|
+
"metric_mean": mean_val,
|
|
341
|
+
"metric_std": std_val,
|
|
342
|
+
"data_points": data_points,
|
|
343
|
+
"position_in_series": position,
|
|
344
|
+
},
|
|
345
|
+
)
|
|
346
|
+
|
|
347
|
+
def _determine_anomaly_type_and_severity(
|
|
348
|
+
self, value: float, mean_val: float, z_score: float
|
|
349
|
+
) -> tuple[AnomalyType, AlertSeverity]:
|
|
350
|
+
"""Determine anomaly type and severity based on value and z-score."""
|
|
351
|
+
if value > mean_val:
|
|
352
|
+
anomaly_type = AnomalyType.SPIKE
|
|
353
|
+
else:
|
|
354
|
+
anomaly_type = AnomalyType.DROP
|
|
355
|
+
|
|
356
|
+
severity = AlertSeverity.CRITICAL if z_score > 3.0 else AlertSeverity.WARNING
|
|
357
|
+
|
|
358
|
+
return anomaly_type, severity
|
|
359
|
+
|
|
360
|
+
def identify_patterns(self, days: int = 60) -> list[QualityPattern]:
|
|
361
|
+
"""Identify patterns in quality metrics using correlation and trend analysis (sync version)."""
|
|
362
|
+
baselines = self.quality_service.get_recent_baselines(limit=days * 2)
|
|
363
|
+
if len(baselines) < self.min_data_points:
|
|
364
|
+
return []
|
|
365
|
+
|
|
366
|
+
metrics_data = self._extract_metrics_data(baselines)
|
|
367
|
+
return self._find_correlation_patterns(metrics_data, days)
|
|
368
|
+
|
|
369
|
+
async def identify_patterns_async(self, days: int = 60) -> list[QualityPattern]:
|
|
370
|
+
"""Identify patterns in quality metrics using correlation and trend analysis (async version)."""
|
|
371
|
+
baselines = await self.quality_service.aget_recent_baselines(limit=days * 2)
|
|
372
|
+
if len(baselines) < self.min_data_points:
|
|
373
|
+
return []
|
|
374
|
+
|
|
375
|
+
metrics_data = self._extract_metrics_data(baselines)
|
|
376
|
+
return self._find_correlation_patterns(metrics_data, days)
|
|
377
|
+
|
|
378
|
+
def _extract_metrics_data(self, baselines: list[t.Any]) -> dict[str, list[float]]:
|
|
379
|
+
"""Extract metric data from baselines for correlation analysis."""
|
|
380
|
+
metrics_data = {
|
|
381
|
+
"quality_score": [],
|
|
382
|
+
"coverage_percent": [],
|
|
383
|
+
"hook_failures": [],
|
|
384
|
+
"security_issues": [],
|
|
385
|
+
"type_errors": [],
|
|
386
|
+
"linting_issues": [],
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
for baseline in baselines:
|
|
390
|
+
metrics_data["quality_score"].append(baseline.quality_score)
|
|
391
|
+
metrics_data["coverage_percent"].append(baseline.coverage_percent)
|
|
392
|
+
metrics_data["hook_failures"].append(baseline.hook_failures)
|
|
393
|
+
metrics_data["security_issues"].append(baseline.security_issues)
|
|
394
|
+
metrics_data["type_errors"].append(baseline.type_errors)
|
|
395
|
+
metrics_data["linting_issues"].append(baseline.linting_issues)
|
|
396
|
+
|
|
397
|
+
return metrics_data
|
|
398
|
+
|
|
399
|
+
def _find_correlation_patterns(
|
|
400
|
+
self, metrics_data: dict[str, list[float]], days: int
|
|
401
|
+
) -> list[QualityPattern]:
|
|
402
|
+
"""Find correlation patterns between metrics."""
|
|
403
|
+
patterns = []
|
|
404
|
+
metric_names = list[t.Any](metrics_data.keys())
|
|
405
|
+
|
|
406
|
+
for i, metric1 in enumerate(metric_names):
|
|
407
|
+
for metric2 in metric_names[i + 1 :]:
|
|
408
|
+
pattern = self._analyze_metric_correlation(
|
|
409
|
+
metric1, metric2, metrics_data, days
|
|
410
|
+
)
|
|
411
|
+
if pattern:
|
|
412
|
+
patterns.append(pattern)
|
|
413
|
+
|
|
414
|
+
return patterns
|
|
415
|
+
|
|
416
|
+
def _analyze_metric_correlation(
|
|
417
|
+
self,
|
|
418
|
+
metric1: str,
|
|
419
|
+
metric2: str,
|
|
420
|
+
metrics_data: dict[str, list[float]],
|
|
421
|
+
days: int,
|
|
422
|
+
) -> QualityPattern | None:
|
|
423
|
+
"""Analyze correlation between two metrics."""
|
|
424
|
+
values1 = np.array(metrics_data[metric1])
|
|
425
|
+
values2 = np.array(metrics_data[metric2])
|
|
426
|
+
|
|
427
|
+
if len(values1) < self.min_data_points:
|
|
428
|
+
return None
|
|
429
|
+
|
|
430
|
+
# Handle constant input arrays that would cause correlation warnings
|
|
431
|
+
try:
|
|
432
|
+
# Check for constant arrays (all values the same)
|
|
433
|
+
if 0 in (np.var(values1), np.var(values2)):
|
|
434
|
+
# Cannot calculate correlation for constant arrays
|
|
435
|
+
return None
|
|
436
|
+
|
|
437
|
+
correlation, p_value = stats.pearsonr(values1, values2)
|
|
438
|
+
except (ValueError, RuntimeWarning):
|
|
439
|
+
# Handle any other correlation calculation issues
|
|
440
|
+
return None
|
|
441
|
+
|
|
442
|
+
# Strong correlation threshold
|
|
443
|
+
if abs(correlation) > 0.7 and p_value < 0.05:
|
|
444
|
+
return self._create_correlation_pattern(
|
|
445
|
+
metric1, metric2, correlation, p_value, values1, days
|
|
446
|
+
)
|
|
447
|
+
|
|
448
|
+
return None
|
|
449
|
+
|
|
450
|
+
def _create_correlation_pattern(
|
|
451
|
+
self,
|
|
452
|
+
metric1: str,
|
|
453
|
+
metric2: str,
|
|
454
|
+
correlation: float,
|
|
455
|
+
p_value: float,
|
|
456
|
+
values1: np.ndarray,
|
|
457
|
+
days: int,
|
|
458
|
+
) -> QualityPattern:
|
|
459
|
+
"""Create a quality pattern from correlation analysis."""
|
|
460
|
+
trend_dir, description = self._get_correlation_trend_and_description(
|
|
461
|
+
metric1, metric2, correlation
|
|
462
|
+
)
|
|
463
|
+
|
|
464
|
+
return QualityPattern(
|
|
465
|
+
pattern_type=PatternType.CORRELATION,
|
|
466
|
+
metric_names=[metric1, metric2],
|
|
467
|
+
detected_at=datetime.now(),
|
|
468
|
+
confidence=abs(correlation),
|
|
469
|
+
description=description,
|
|
470
|
+
period_days=days,
|
|
471
|
+
correlation_strength=abs(correlation),
|
|
472
|
+
trend_direction=trend_dir,
|
|
473
|
+
statistical_significance=p_value,
|
|
474
|
+
context={
|
|
475
|
+
"correlation_coefficient": correlation,
|
|
476
|
+
"sample_size": len(values1),
|
|
477
|
+
"strength": self._get_correlation_strength_label(correlation),
|
|
478
|
+
},
|
|
479
|
+
)
|
|
480
|
+
|
|
481
|
+
def _get_correlation_trend_and_description(
|
|
482
|
+
self, metric1: str, metric2: str, correlation: float
|
|
483
|
+
) -> tuple[TrendDirection, str]:
|
|
484
|
+
"""Get trend direction and description for correlation."""
|
|
485
|
+
if correlation > 0:
|
|
486
|
+
return (
|
|
487
|
+
TrendDirection.IMPROVING,
|
|
488
|
+
f"Strong positive correlation between {metric1} and {metric2}",
|
|
489
|
+
)
|
|
490
|
+
return (
|
|
491
|
+
TrendDirection.DECLINING,
|
|
492
|
+
f"Strong negative correlation between {metric1} and {metric2}",
|
|
493
|
+
)
|
|
494
|
+
|
|
495
|
+
def _get_correlation_strength_label(self, correlation: float) -> str:
|
|
496
|
+
"""Get strength label for correlation coefficient."""
|
|
497
|
+
abs_corr = abs(correlation)
|
|
498
|
+
if abs_corr > 0.9:
|
|
499
|
+
return "very strong"
|
|
500
|
+
elif abs_corr > 0.7:
|
|
501
|
+
return "strong"
|
|
502
|
+
return "moderate"
|
|
503
|
+
|
|
504
|
+
def generate_advanced_predictions(
|
|
505
|
+
self, horizon_days: int = 14, confidence_level: float = 0.95
|
|
506
|
+
) -> list[QualityPrediction]:
|
|
507
|
+
"""Generate advanced predictions with confidence intervals."""
|
|
508
|
+
baselines = self.quality_service.get_recent_baselines(limit=90)
|
|
509
|
+
if len(baselines) < self.min_data_points:
|
|
510
|
+
return []
|
|
511
|
+
|
|
512
|
+
predictions = []
|
|
513
|
+
metrics = ["quality_score", "coverage_percent"]
|
|
514
|
+
|
|
515
|
+
for metric_name in metrics:
|
|
516
|
+
values, timestamps = self._extract_time_series(baselines, metric_name)
|
|
517
|
+
|
|
518
|
+
if len(values) < self.min_data_points:
|
|
519
|
+
continue
|
|
520
|
+
|
|
521
|
+
prediction = self._create_metric_prediction(
|
|
522
|
+
metric_name, values, horizon_days, confidence_level
|
|
523
|
+
)
|
|
524
|
+
predictions.append(prediction)
|
|
525
|
+
|
|
526
|
+
return predictions
|
|
527
|
+
|
|
528
|
+
def _extract_time_series(
|
|
529
|
+
self, baselines: list[t.Any], metric_name: str
|
|
530
|
+
) -> tuple[list[t.Any], list[t.Any]]:
|
|
531
|
+
"""Extract time series data for specified metric."""
|
|
532
|
+
values = []
|
|
533
|
+
timestamps = []
|
|
534
|
+
|
|
535
|
+
for baseline in baselines:
|
|
536
|
+
if metric_name == "quality_score":
|
|
537
|
+
values.append(baseline.quality_score)
|
|
538
|
+
elif metric_name == "coverage_percent":
|
|
539
|
+
values.append(baseline.coverage_percent)
|
|
540
|
+
timestamps.append(baseline.timestamp)
|
|
541
|
+
|
|
542
|
+
return values, timestamps
|
|
543
|
+
|
|
544
|
+
def _create_metric_prediction(
|
|
545
|
+
self,
|
|
546
|
+
metric_name: str,
|
|
547
|
+
values: list[t.Any],
|
|
548
|
+
horizon_days: int,
|
|
549
|
+
confidence_level: float,
|
|
550
|
+
) -> QualityPrediction:
|
|
551
|
+
"""Create prediction for a single metric."""
|
|
552
|
+
regression_results = self._perform_linear_regression(values, horizon_days)
|
|
553
|
+
confidence_bounds = self._calculate_confidence_interval(
|
|
554
|
+
values, regression_results, confidence_level
|
|
555
|
+
)
|
|
556
|
+
risk_level = self._assess_prediction_risk(
|
|
557
|
+
metric_name, regression_results["predicted_value"]
|
|
558
|
+
)
|
|
559
|
+
|
|
560
|
+
return QualityPrediction(
|
|
561
|
+
metric_name=metric_name,
|
|
562
|
+
predicted_value=float(regression_results["predicted_value"]),
|
|
563
|
+
confidence_lower=float(confidence_bounds["lower"]),
|
|
564
|
+
confidence_upper=float(confidence_bounds["upper"]),
|
|
565
|
+
confidence_level=confidence_level,
|
|
566
|
+
prediction_horizon_days=horizon_days,
|
|
567
|
+
prediction_method="linear_regression_with_confidence_intervals",
|
|
568
|
+
created_at=datetime.now(),
|
|
569
|
+
factors=["historical_trend", "statistical_analysis"],
|
|
570
|
+
risk_assessment=risk_level,
|
|
571
|
+
)
|
|
572
|
+
|
|
573
|
+
def _perform_linear_regression(
|
|
574
|
+
self, values: list[t.Any], horizon_days: int
|
|
575
|
+
) -> dict[str, t.Any]:
|
|
576
|
+
"""Perform linear regression and predict future value."""
|
|
577
|
+
values_array = np.array(values)
|
|
578
|
+
time_indices = np.arange(len(values))
|
|
579
|
+
|
|
580
|
+
slope, intercept, r_value, p_value, std_err = stats.linregress(
|
|
581
|
+
time_indices, values_array
|
|
582
|
+
)
|
|
583
|
+
|
|
584
|
+
future_index = len(values) + horizon_days
|
|
585
|
+
predicted_value = slope * future_index + intercept
|
|
586
|
+
|
|
587
|
+
return {
|
|
588
|
+
"slope": slope,
|
|
589
|
+
"intercept": intercept,
|
|
590
|
+
"predicted_value": predicted_value,
|
|
591
|
+
"time_indices": time_indices,
|
|
592
|
+
"values_array": values_array,
|
|
593
|
+
"horizon_days": horizon_days,
|
|
594
|
+
}
|
|
595
|
+
|
|
596
|
+
def _calculate_confidence_interval(
|
|
597
|
+
self,
|
|
598
|
+
values: list[t.Any],
|
|
599
|
+
regression_results: dict[str, t.Any],
|
|
600
|
+
confidence_level: float,
|
|
601
|
+
) -> dict[str, t.Any]:
|
|
602
|
+
"""Calculate confidence interval for prediction."""
|
|
603
|
+
slope = regression_results["slope"]
|
|
604
|
+
intercept = regression_results["intercept"]
|
|
605
|
+
time_indices = regression_results["time_indices"]
|
|
606
|
+
values_array = regression_results["values_array"]
|
|
607
|
+
predicted_value = regression_results["predicted_value"]
|
|
608
|
+
|
|
609
|
+
residuals = values_array - (slope * time_indices + intercept)
|
|
610
|
+
residual_std = np.std(residuals)
|
|
611
|
+
|
|
612
|
+
future_index = len(values) + regression_results["horizon_days"]
|
|
613
|
+
t_value = stats.t.ppf((1 + confidence_level) / 2, len(values) - 2)
|
|
614
|
+
|
|
615
|
+
margin_error = self._calculate_margin_error(
|
|
616
|
+
t_value, residual_std, len(values), future_index, time_indices
|
|
617
|
+
)
|
|
618
|
+
|
|
619
|
+
return {
|
|
620
|
+
"lower": predicted_value - margin_error,
|
|
621
|
+
"upper": predicted_value + margin_error,
|
|
622
|
+
}
|
|
623
|
+
|
|
624
|
+
def _calculate_margin_error(
|
|
625
|
+
self,
|
|
626
|
+
t_value: float,
|
|
627
|
+
residual_std: float,
|
|
628
|
+
n_values: int,
|
|
629
|
+
future_index: int,
|
|
630
|
+
time_indices: np.ndarray,
|
|
631
|
+
) -> float:
|
|
632
|
+
"""Calculate margin of error for confidence interval."""
|
|
633
|
+
mean_time: float = float(np.mean(time_indices))
|
|
634
|
+
sum_sq_diff: float = float(np.sum((time_indices - mean_time) ** 2))
|
|
635
|
+
numerator: float = (future_index - mean_time) ** 2
|
|
636
|
+
|
|
637
|
+
sqrt_term: float = float(np.sqrt(1 + 1 / n_values + numerator / sum_sq_diff))
|
|
638
|
+
return t_value * residual_std * sqrt_term
|
|
639
|
+
|
|
640
|
+
def _assess_prediction_risk(self, metric_name: str, predicted_value: float) -> str:
|
|
641
|
+
"""Assess risk level based on predicted value."""
|
|
642
|
+
if metric_name == "quality_score":
|
|
643
|
+
return self._assess_quality_score_risk(predicted_value)
|
|
644
|
+
# coverage_percent
|
|
645
|
+
return self._assess_coverage_risk(predicted_value)
|
|
646
|
+
|
|
647
|
+
def _assess_quality_score_risk(self, predicted_value: float) -> str:
|
|
648
|
+
"""Assess risk for quality score predictions."""
|
|
649
|
+
if predicted_value < 70:
|
|
650
|
+
return "critical"
|
|
651
|
+
elif predicted_value < 80:
|
|
652
|
+
return "high"
|
|
653
|
+
elif predicted_value < 90:
|
|
654
|
+
return "medium"
|
|
655
|
+
return "low"
|
|
656
|
+
|
|
657
|
+
def _assess_coverage_risk(self, predicted_value: float) -> str:
|
|
658
|
+
"""Assess risk for coverage predictions."""
|
|
659
|
+
if predicted_value < 70:
|
|
660
|
+
return "high"
|
|
661
|
+
elif predicted_value < 85:
|
|
662
|
+
return "medium"
|
|
663
|
+
return "low"
|
|
664
|
+
|
|
665
|
+
def _generate_anomaly_recommendations(
|
|
666
|
+
self, anomalies: list[QualityAnomaly]
|
|
667
|
+
) -> list[str]:
|
|
668
|
+
"""Generate recommendations based on anomalies."""
|
|
669
|
+
recommendations = []
|
|
670
|
+
|
|
671
|
+
critical_anomalies = [
|
|
672
|
+
a for a in anomalies if a.severity == AlertSeverity.CRITICAL
|
|
673
|
+
]
|
|
674
|
+
if critical_anomalies:
|
|
675
|
+
recommendations.append(
|
|
676
|
+
f"🚨 CRITICAL: {len(critical_anomalies)} critical anomalies detected - immediate investigation required"
|
|
677
|
+
)
|
|
678
|
+
|
|
679
|
+
quality_drops = [
|
|
680
|
+
a
|
|
681
|
+
for a in anomalies
|
|
682
|
+
if a.anomaly_type == AnomalyType.DROP and a.metric_name == "quality_score"
|
|
683
|
+
]
|
|
684
|
+
if quality_drops:
|
|
685
|
+
recommendations.append(
|
|
686
|
+
"📉 Quality score drops detected - review recent commits and implement quality gates"
|
|
687
|
+
)
|
|
688
|
+
|
|
689
|
+
return recommendations
|
|
690
|
+
|
|
691
|
+
def _generate_pattern_recommendations(
|
|
692
|
+
self, patterns: list[QualityPattern]
|
|
693
|
+
) -> list[str]:
|
|
694
|
+
"""Generate recommendations based on patterns."""
|
|
695
|
+
recommendations = []
|
|
696
|
+
|
|
697
|
+
declining_correlations = [
|
|
698
|
+
p for p in patterns if p.trend_direction == TrendDirection.DECLINING
|
|
699
|
+
]
|
|
700
|
+
if declining_correlations:
|
|
701
|
+
recommendations.append(
|
|
702
|
+
f"⚠️ Negative quality correlations identified - investigate dependencies between {declining_correlations[0].metric_names}"
|
|
703
|
+
)
|
|
704
|
+
|
|
705
|
+
strong_patterns = [p for p in patterns if p.confidence > 0.8]
|
|
706
|
+
if strong_patterns:
|
|
707
|
+
recommendations.append(
|
|
708
|
+
"📊 Strong quality patterns detected - leverage insights for predictive quality management"
|
|
709
|
+
)
|
|
710
|
+
|
|
711
|
+
return recommendations
|
|
712
|
+
|
|
713
|
+
def _generate_prediction_recommendations(
|
|
714
|
+
self, predictions: list[QualityPrediction]
|
|
715
|
+
) -> list[str]:
|
|
716
|
+
"""Generate recommendations based on predictions."""
|
|
717
|
+
recommendations = []
|
|
718
|
+
|
|
719
|
+
high_risk_predictions = [
|
|
720
|
+
p for p in predictions if p.risk_assessment in ("high", "critical")
|
|
721
|
+
]
|
|
722
|
+
if high_risk_predictions:
|
|
723
|
+
metrics = [p.metric_name for p in high_risk_predictions]
|
|
724
|
+
recommendations.append(
|
|
725
|
+
f"🔮 High-risk quality forecast for {', '.join(metrics)} - proactive intervention recommended"
|
|
726
|
+
)
|
|
727
|
+
|
|
728
|
+
low_confidence_predictions = [
|
|
729
|
+
p for p in predictions if p.confidence_upper - p.confidence_lower > 20
|
|
730
|
+
]
|
|
731
|
+
if low_confidence_predictions:
|
|
732
|
+
recommendations.append(
|
|
733
|
+
"📈 Wide prediction intervals detected - increase data collection frequency for better forecasting"
|
|
734
|
+
)
|
|
735
|
+
|
|
736
|
+
return recommendations
|
|
737
|
+
|
|
738
|
+
def _generate_general_ml_insights(
|
|
739
|
+
self, anomalies: list[QualityAnomaly]
|
|
740
|
+
) -> list[str]:
|
|
741
|
+
"""Generate general ML insights."""
|
|
742
|
+
recommendations = []
|
|
743
|
+
|
|
744
|
+
if len(anomalies) > 5:
|
|
745
|
+
recommendations.append(
|
|
746
|
+
f"🤖 High anomaly frequency ({len(anomalies)}) suggests systemic quality issues - consider ML-based automated quality monitoring"
|
|
747
|
+
)
|
|
748
|
+
|
|
749
|
+
return recommendations
|
|
750
|
+
|
|
751
|
+
def generate_ml_recommendations(
|
|
752
|
+
self,
|
|
753
|
+
anomalies: list[QualityAnomaly],
|
|
754
|
+
patterns: list[QualityPattern],
|
|
755
|
+
predictions: list[QualityPrediction],
|
|
756
|
+
) -> list[str]:
|
|
757
|
+
"""Generate intelligent recommendations based on ML analysis."""
|
|
758
|
+
recommendations = []
|
|
759
|
+
|
|
760
|
+
recommendations.extend(self._generate_anomaly_recommendations(anomalies))
|
|
761
|
+
recommendations.extend(self._generate_pattern_recommendations(patterns))
|
|
762
|
+
recommendations.extend(self._generate_prediction_recommendations(predictions))
|
|
763
|
+
recommendations.extend(self._generate_general_ml_insights(anomalies))
|
|
764
|
+
|
|
765
|
+
if not recommendations:
|
|
766
|
+
recommendations.append(
|
|
767
|
+
"✅ Quality metrics show stable patterns with no significant anomalies detected - maintain current practices"
|
|
768
|
+
)
|
|
769
|
+
|
|
770
|
+
return recommendations
|
|
771
|
+
|
|
772
|
+
def generate_comprehensive_insights(
|
|
773
|
+
self, analysis_days: int = 30, prediction_days: int = 14
|
|
774
|
+
) -> QualityInsights:
|
|
775
|
+
"""Generate comprehensive quality insights with ML analysis."""
|
|
776
|
+
# Collect all analysis results
|
|
777
|
+
anomalies = self.detect_anomalies(days=analysis_days)
|
|
778
|
+
patterns = self.identify_patterns(days=analysis_days * 2)
|
|
779
|
+
predictions = self.generate_advanced_predictions(horizon_days=prediction_days)
|
|
780
|
+
recommendations = self.generate_ml_recommendations(
|
|
781
|
+
anomalies, patterns, predictions
|
|
782
|
+
)
|
|
783
|
+
|
|
784
|
+
# Calculate derived metrics
|
|
785
|
+
health_score, risk_level = self._calculate_health_metrics(
|
|
786
|
+
anomalies, predictions
|
|
787
|
+
)
|
|
788
|
+
|
|
789
|
+
return QualityInsights(
|
|
790
|
+
anomalies=anomalies,
|
|
791
|
+
patterns=patterns,
|
|
792
|
+
predictions=predictions,
|
|
793
|
+
recommendations=recommendations,
|
|
794
|
+
overall_health_score=health_score,
|
|
795
|
+
risk_level=risk_level,
|
|
796
|
+
)
|
|
797
|
+
|
|
798
|
+
def _calculate_health_metrics(
|
|
799
|
+
self, anomalies: list[QualityAnomaly], predictions: list[QualityPrediction]
|
|
800
|
+
) -> tuple[float, str]:
|
|
801
|
+
"""Calculate overall health score and risk level."""
|
|
802
|
+
anomaly_counts = self._count_anomalies_by_severity(anomalies)
|
|
803
|
+
risk_prediction_count = self._count_high_risk_predictions(predictions)
|
|
804
|
+
|
|
805
|
+
health_score = self._compute_health_score(anomaly_counts, risk_prediction_count)
|
|
806
|
+
risk_level = self._determine_risk_level(health_score)
|
|
807
|
+
|
|
808
|
+
return health_score, risk_level
|
|
809
|
+
|
|
810
|
+
def _count_anomalies_by_severity(
|
|
811
|
+
self, anomalies: list[QualityAnomaly]
|
|
812
|
+
) -> dict[str, int]:
|
|
813
|
+
"""Count anomalies by severity level."""
|
|
814
|
+
return {
|
|
815
|
+
"critical": len(
|
|
816
|
+
[a for a in anomalies if a.severity == AlertSeverity.CRITICAL]
|
|
817
|
+
),
|
|
818
|
+
"warning": len(
|
|
819
|
+
[a for a in anomalies if a.severity == AlertSeverity.WARNING]
|
|
820
|
+
),
|
|
821
|
+
}
|
|
822
|
+
|
|
823
|
+
def _count_high_risk_predictions(self, predictions: list[QualityPrediction]) -> int:
|
|
824
|
+
"""Count predictions with high or critical risk assessment."""
|
|
825
|
+
return len(
|
|
826
|
+
[p for p in predictions if p.risk_assessment in ("high", "critical")]
|
|
827
|
+
)
|
|
828
|
+
|
|
829
|
+
def _compute_health_score(
|
|
830
|
+
self, anomaly_counts: dict[str, int], risk_predictions: int
|
|
831
|
+
) -> float:
|
|
832
|
+
"""Compute health score based on anomalies and risk predictions."""
|
|
833
|
+
health_score = 1.0
|
|
834
|
+
health_score -= (
|
|
835
|
+
anomaly_counts["critical"] * 0.2
|
|
836
|
+
) # Critical anomalies heavily impact health
|
|
837
|
+
health_score -= (
|
|
838
|
+
anomaly_counts["warning"] * 0.1
|
|
839
|
+
) # Warning anomalies moderately impact health
|
|
840
|
+
health_score -= risk_predictions * 0.15 # High-risk predictions impact health
|
|
841
|
+
return max(0.0, min(1.0, health_score))
|
|
842
|
+
|
|
843
|
+
def _determine_risk_level(self, health_score: float) -> str:
|
|
844
|
+
"""Determine overall risk level based on health score."""
|
|
845
|
+
if health_score < 0.5:
|
|
846
|
+
return "critical"
|
|
847
|
+
elif health_score < 0.7:
|
|
848
|
+
return "high"
|
|
849
|
+
elif health_score < 0.85:
|
|
850
|
+
return "medium"
|
|
851
|
+
return "low"
|
|
852
|
+
|
|
853
|
+
def export_insights(self, insights: QualityInsights, output_path: Path) -> None:
|
|
854
|
+
"""Export quality insights to JSON file."""
|
|
855
|
+
with output_path.open("w") as f:
|
|
856
|
+
json.dump(insights.to_dict(), f, indent=2, default=str)
|
|
857
|
+
|
|
858
|
+
# Protocol methods required by QualityIntelligenceProtocol
|
|
859
|
+
def analyze_quality_trends(self) -> dict[str, t.Any]:
|
|
860
|
+
"""Analyze quality trends."""
|
|
861
|
+
# Use existing identify_patterns method to analyze trends
|
|
862
|
+
patterns = self.identify_patterns()
|
|
863
|
+
trend_analysis = {
|
|
864
|
+
"total_patterns": len(patterns),
|
|
865
|
+
"patterns_by_type": {
|
|
866
|
+
"cyclic": len(
|
|
867
|
+
[p for p in patterns if p.pattern_type == PatternType.CYCLIC]
|
|
868
|
+
),
|
|
869
|
+
"seasonal": len(
|
|
870
|
+
[p for p in patterns if p.pattern_type == PatternType.SEASONAL]
|
|
871
|
+
),
|
|
872
|
+
"correlation": len(
|
|
873
|
+
[p for p in patterns if p.pattern_type == PatternType.CORRELATION]
|
|
874
|
+
),
|
|
875
|
+
"regression": len(
|
|
876
|
+
[p for p in patterns if p.pattern_type == PatternType.REGRESSION]
|
|
877
|
+
),
|
|
878
|
+
"improvement": len(
|
|
879
|
+
[p for p in patterns if p.pattern_type == PatternType.IMPROVEMENT]
|
|
880
|
+
),
|
|
881
|
+
},
|
|
882
|
+
"trend_directions": {
|
|
883
|
+
"improving": len(
|
|
884
|
+
[
|
|
885
|
+
p
|
|
886
|
+
for p in patterns
|
|
887
|
+
if p.trend_direction == TrendDirection.IMPROVING
|
|
888
|
+
]
|
|
889
|
+
),
|
|
890
|
+
"declining": len(
|
|
891
|
+
[
|
|
892
|
+
p
|
|
893
|
+
for p in patterns
|
|
894
|
+
if p.trend_direction == TrendDirection.DECLINING
|
|
895
|
+
]
|
|
896
|
+
),
|
|
897
|
+
"stable": len(
|
|
898
|
+
[p for p in patterns if p.trend_direction == TrendDirection.STABLE]
|
|
899
|
+
),
|
|
900
|
+
"volatile": len(
|
|
901
|
+
[
|
|
902
|
+
p
|
|
903
|
+
for p in patterns
|
|
904
|
+
if p.trend_direction == TrendDirection.VOLATILE
|
|
905
|
+
]
|
|
906
|
+
),
|
|
907
|
+
},
|
|
908
|
+
"generated_at": datetime.now().isoformat(),
|
|
909
|
+
}
|
|
910
|
+
return trend_analysis
|
|
911
|
+
|
|
912
|
+
def predict_quality_issues(self) -> list[dict[str, t.Any]]:
|
|
913
|
+
"""Predict potential quality issues."""
|
|
914
|
+
predictions = self.generate_advanced_predictions()
|
|
915
|
+
|
|
916
|
+
return [
|
|
917
|
+
{
|
|
918
|
+
"metric": pred.metric_name,
|
|
919
|
+
"predicted_value": pred.predicted_value,
|
|
920
|
+
"risk_level": pred.risk_assessment,
|
|
921
|
+
"confidence_interval": {
|
|
922
|
+
"lower": pred.confidence_lower,
|
|
923
|
+
"upper": pred.confidence_upper,
|
|
924
|
+
},
|
|
925
|
+
"prediction_horizon": pred.prediction_horizon_days,
|
|
926
|
+
"factors": pred.factors,
|
|
927
|
+
}
|
|
928
|
+
for pred in predictions
|
|
929
|
+
if pred.risk_assessment in ("high", "critical")
|
|
930
|
+
]
|
|
931
|
+
|
|
932
|
+
def recommend_improvements(self) -> list[dict[str, t.Any]]:
|
|
933
|
+
"""Recommend quality improvements."""
|
|
934
|
+
# Generate basic analysis to get data for recommendations
|
|
935
|
+
anomalies = self.detect_anomalies()
|
|
936
|
+
patterns = self.identify_patterns()
|
|
937
|
+
predictions = self.generate_advanced_predictions()
|
|
938
|
+
|
|
939
|
+
recommendations = self.generate_ml_recommendations(
|
|
940
|
+
anomalies, patterns, predictions
|
|
941
|
+
)
|
|
942
|
+
|
|
943
|
+
# Convert to required format
|
|
944
|
+
return [{"message": rec} for rec in recommendations]
|
|
945
|
+
|
|
946
|
+
def get_intelligence_report(self) -> dict[str, t.Any]:
|
|
947
|
+
"""Get quality intelligence report."""
|
|
948
|
+
insights = self.generate_comprehensive_insights()
|
|
949
|
+
return insights.to_dict()
|