crackerjack 0.18.2__py3-none-any.whl → 0.45.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- crackerjack/README.md +19 -0
- crackerjack/__init__.py +96 -2
- crackerjack/__main__.py +637 -138
- crackerjack/adapters/README.md +18 -0
- crackerjack/adapters/__init__.py +39 -0
- crackerjack/adapters/_output_paths.py +167 -0
- crackerjack/adapters/_qa_adapter_base.py +309 -0
- crackerjack/adapters/_tool_adapter_base.py +706 -0
- crackerjack/adapters/ai/README.md +65 -0
- crackerjack/adapters/ai/__init__.py +5 -0
- crackerjack/adapters/ai/claude.py +853 -0
- crackerjack/adapters/complexity/README.md +53 -0
- crackerjack/adapters/complexity/__init__.py +10 -0
- crackerjack/adapters/complexity/complexipy.py +641 -0
- crackerjack/adapters/dependency/__init__.py +22 -0
- crackerjack/adapters/dependency/pip_audit.py +418 -0
- crackerjack/adapters/format/README.md +72 -0
- crackerjack/adapters/format/__init__.py +11 -0
- crackerjack/adapters/format/mdformat.py +313 -0
- crackerjack/adapters/format/ruff.py +516 -0
- crackerjack/adapters/lint/README.md +47 -0
- crackerjack/adapters/lint/__init__.py +11 -0
- crackerjack/adapters/lint/codespell.py +273 -0
- crackerjack/adapters/lsp/README.md +49 -0
- crackerjack/adapters/lsp/__init__.py +27 -0
- crackerjack/adapters/lsp/_base.py +194 -0
- crackerjack/adapters/lsp/_client.py +358 -0
- crackerjack/adapters/lsp/_manager.py +193 -0
- crackerjack/adapters/lsp/skylos.py +283 -0
- crackerjack/adapters/lsp/zuban.py +557 -0
- crackerjack/adapters/refactor/README.md +59 -0
- crackerjack/adapters/refactor/__init__.py +12 -0
- crackerjack/adapters/refactor/creosote.py +318 -0
- crackerjack/adapters/refactor/refurb.py +406 -0
- crackerjack/adapters/refactor/skylos.py +494 -0
- crackerjack/adapters/sast/README.md +132 -0
- crackerjack/adapters/sast/__init__.py +32 -0
- crackerjack/adapters/sast/_base.py +201 -0
- crackerjack/adapters/sast/bandit.py +423 -0
- crackerjack/adapters/sast/pyscn.py +405 -0
- crackerjack/adapters/sast/semgrep.py +241 -0
- crackerjack/adapters/security/README.md +111 -0
- crackerjack/adapters/security/__init__.py +17 -0
- crackerjack/adapters/security/gitleaks.py +339 -0
- crackerjack/adapters/type/README.md +52 -0
- crackerjack/adapters/type/__init__.py +12 -0
- crackerjack/adapters/type/pyrefly.py +402 -0
- crackerjack/adapters/type/ty.py +402 -0
- crackerjack/adapters/type/zuban.py +522 -0
- crackerjack/adapters/utility/README.md +51 -0
- crackerjack/adapters/utility/__init__.py +10 -0
- crackerjack/adapters/utility/checks.py +884 -0
- crackerjack/agents/README.md +264 -0
- crackerjack/agents/__init__.py +66 -0
- crackerjack/agents/architect_agent.py +238 -0
- crackerjack/agents/base.py +167 -0
- crackerjack/agents/claude_code_bridge.py +641 -0
- crackerjack/agents/coordinator.py +600 -0
- crackerjack/agents/documentation_agent.py +520 -0
- crackerjack/agents/dry_agent.py +585 -0
- crackerjack/agents/enhanced_coordinator.py +279 -0
- crackerjack/agents/enhanced_proactive_agent.py +185 -0
- crackerjack/agents/error_middleware.py +53 -0
- crackerjack/agents/formatting_agent.py +230 -0
- crackerjack/agents/helpers/__init__.py +9 -0
- crackerjack/agents/helpers/performance/__init__.py +22 -0
- crackerjack/agents/helpers/performance/performance_ast_analyzer.py +357 -0
- crackerjack/agents/helpers/performance/performance_pattern_detector.py +909 -0
- crackerjack/agents/helpers/performance/performance_recommender.py +572 -0
- crackerjack/agents/helpers/refactoring/__init__.py +22 -0
- crackerjack/agents/helpers/refactoring/code_transformer.py +536 -0
- crackerjack/agents/helpers/refactoring/complexity_analyzer.py +344 -0
- crackerjack/agents/helpers/refactoring/dead_code_detector.py +437 -0
- crackerjack/agents/helpers/test_creation/__init__.py +19 -0
- crackerjack/agents/helpers/test_creation/test_ast_analyzer.py +216 -0
- crackerjack/agents/helpers/test_creation/test_coverage_analyzer.py +643 -0
- crackerjack/agents/helpers/test_creation/test_template_generator.py +1031 -0
- crackerjack/agents/import_optimization_agent.py +1181 -0
- crackerjack/agents/performance_agent.py +325 -0
- crackerjack/agents/performance_helpers.py +205 -0
- crackerjack/agents/proactive_agent.py +55 -0
- crackerjack/agents/refactoring_agent.py +511 -0
- crackerjack/agents/refactoring_helpers.py +247 -0
- crackerjack/agents/security_agent.py +793 -0
- crackerjack/agents/semantic_agent.py +479 -0
- crackerjack/agents/semantic_helpers.py +356 -0
- crackerjack/agents/test_creation_agent.py +570 -0
- crackerjack/agents/test_specialist_agent.py +526 -0
- crackerjack/agents/tracker.py +110 -0
- crackerjack/api.py +647 -0
- crackerjack/cli/README.md +394 -0
- crackerjack/cli/__init__.py +24 -0
- crackerjack/cli/cache_handlers.py +209 -0
- crackerjack/cli/cache_handlers_enhanced.py +680 -0
- crackerjack/cli/facade.py +162 -0
- crackerjack/cli/formatting.py +13 -0
- crackerjack/cli/handlers/__init__.py +85 -0
- crackerjack/cli/handlers/advanced.py +103 -0
- crackerjack/cli/handlers/ai_features.py +62 -0
- crackerjack/cli/handlers/analytics.py +479 -0
- crackerjack/cli/handlers/changelog.py +271 -0
- crackerjack/cli/handlers/config_handlers.py +16 -0
- crackerjack/cli/handlers/coverage.py +84 -0
- crackerjack/cli/handlers/documentation.py +280 -0
- crackerjack/cli/handlers/main_handlers.py +497 -0
- crackerjack/cli/handlers/monitoring.py +371 -0
- crackerjack/cli/handlers.py +700 -0
- crackerjack/cli/interactive.py +488 -0
- crackerjack/cli/options.py +1216 -0
- crackerjack/cli/semantic_handlers.py +292 -0
- crackerjack/cli/utils.py +19 -0
- crackerjack/cli/version.py +19 -0
- crackerjack/code_cleaner.py +1307 -0
- crackerjack/config/README.md +472 -0
- crackerjack/config/__init__.py +275 -0
- crackerjack/config/global_lock_config.py +207 -0
- crackerjack/config/hooks.py +390 -0
- crackerjack/config/loader.py +239 -0
- crackerjack/config/settings.py +141 -0
- crackerjack/config/tool_commands.py +331 -0
- crackerjack/core/README.md +393 -0
- crackerjack/core/__init__.py +0 -0
- crackerjack/core/async_workflow_orchestrator.py +738 -0
- crackerjack/core/autofix_coordinator.py +282 -0
- crackerjack/core/container.py +105 -0
- crackerjack/core/enhanced_container.py +583 -0
- crackerjack/core/file_lifecycle.py +472 -0
- crackerjack/core/performance.py +244 -0
- crackerjack/core/performance_monitor.py +357 -0
- crackerjack/core/phase_coordinator.py +1227 -0
- crackerjack/core/proactive_workflow.py +267 -0
- crackerjack/core/resource_manager.py +425 -0
- crackerjack/core/retry.py +275 -0
- crackerjack/core/service_watchdog.py +601 -0
- crackerjack/core/session_coordinator.py +239 -0
- crackerjack/core/timeout_manager.py +563 -0
- crackerjack/core/websocket_lifecycle.py +410 -0
- crackerjack/core/workflow/__init__.py +21 -0
- crackerjack/core/workflow/workflow_ai_coordinator.py +863 -0
- crackerjack/core/workflow/workflow_event_orchestrator.py +1107 -0
- crackerjack/core/workflow/workflow_issue_parser.py +714 -0
- crackerjack/core/workflow/workflow_phase_executor.py +1158 -0
- crackerjack/core/workflow/workflow_security_gates.py +400 -0
- crackerjack/core/workflow_orchestrator.py +2243 -0
- crackerjack/data/README.md +11 -0
- crackerjack/data/__init__.py +8 -0
- crackerjack/data/models.py +79 -0
- crackerjack/data/repository.py +210 -0
- crackerjack/decorators/README.md +180 -0
- crackerjack/decorators/__init__.py +35 -0
- crackerjack/decorators/error_handling.py +649 -0
- crackerjack/decorators/error_handling_decorators.py +334 -0
- crackerjack/decorators/helpers.py +58 -0
- crackerjack/decorators/patterns.py +281 -0
- crackerjack/decorators/utils.py +58 -0
- crackerjack/docs/INDEX.md +11 -0
- crackerjack/docs/README.md +11 -0
- crackerjack/docs/generated/api/API_REFERENCE.md +10895 -0
- crackerjack/docs/generated/api/CLI_REFERENCE.md +109 -0
- crackerjack/docs/generated/api/CROSS_REFERENCES.md +1755 -0
- crackerjack/docs/generated/api/PROTOCOLS.md +3 -0
- crackerjack/docs/generated/api/SERVICES.md +1252 -0
- crackerjack/documentation/README.md +11 -0
- crackerjack/documentation/__init__.py +31 -0
- crackerjack/documentation/ai_templates.py +756 -0
- crackerjack/documentation/dual_output_generator.py +767 -0
- crackerjack/documentation/mkdocs_integration.py +518 -0
- crackerjack/documentation/reference_generator.py +1065 -0
- crackerjack/dynamic_config.py +678 -0
- crackerjack/errors.py +378 -0
- crackerjack/events/README.md +11 -0
- crackerjack/events/__init__.py +16 -0
- crackerjack/events/telemetry.py +175 -0
- crackerjack/events/workflow_bus.py +346 -0
- crackerjack/exceptions/README.md +301 -0
- crackerjack/exceptions/__init__.py +5 -0
- crackerjack/exceptions/config.py +4 -0
- crackerjack/exceptions/tool_execution_error.py +245 -0
- crackerjack/executors/README.md +591 -0
- crackerjack/executors/__init__.py +13 -0
- crackerjack/executors/async_hook_executor.py +938 -0
- crackerjack/executors/cached_hook_executor.py +316 -0
- crackerjack/executors/hook_executor.py +1295 -0
- crackerjack/executors/hook_lock_manager.py +708 -0
- crackerjack/executors/individual_hook_executor.py +739 -0
- crackerjack/executors/lsp_aware_hook_executor.py +349 -0
- crackerjack/executors/progress_hook_executor.py +282 -0
- crackerjack/executors/tool_proxy.py +433 -0
- crackerjack/hooks/README.md +485 -0
- crackerjack/hooks/lsp_hook.py +93 -0
- crackerjack/intelligence/README.md +557 -0
- crackerjack/intelligence/__init__.py +37 -0
- crackerjack/intelligence/adaptive_learning.py +693 -0
- crackerjack/intelligence/agent_orchestrator.py +485 -0
- crackerjack/intelligence/agent_registry.py +377 -0
- crackerjack/intelligence/agent_selector.py +439 -0
- crackerjack/intelligence/integration.py +250 -0
- crackerjack/interactive.py +719 -0
- crackerjack/managers/README.md +369 -0
- crackerjack/managers/__init__.py +11 -0
- crackerjack/managers/async_hook_manager.py +135 -0
- crackerjack/managers/hook_manager.py +585 -0
- crackerjack/managers/publish_manager.py +631 -0
- crackerjack/managers/test_command_builder.py +391 -0
- crackerjack/managers/test_executor.py +474 -0
- crackerjack/managers/test_manager.py +1357 -0
- crackerjack/managers/test_progress.py +187 -0
- crackerjack/mcp/README.md +374 -0
- crackerjack/mcp/__init__.py +0 -0
- crackerjack/mcp/cache.py +352 -0
- crackerjack/mcp/client_runner.py +121 -0
- crackerjack/mcp/context.py +802 -0
- crackerjack/mcp/dashboard.py +657 -0
- crackerjack/mcp/enhanced_progress_monitor.py +493 -0
- crackerjack/mcp/file_monitor.py +394 -0
- crackerjack/mcp/progress_components.py +607 -0
- crackerjack/mcp/progress_monitor.py +1016 -0
- crackerjack/mcp/rate_limiter.py +336 -0
- crackerjack/mcp/server.py +24 -0
- crackerjack/mcp/server_core.py +526 -0
- crackerjack/mcp/service_watchdog.py +505 -0
- crackerjack/mcp/state.py +407 -0
- crackerjack/mcp/task_manager.py +259 -0
- crackerjack/mcp/tools/README.md +27 -0
- crackerjack/mcp/tools/__init__.py +19 -0
- crackerjack/mcp/tools/core_tools.py +469 -0
- crackerjack/mcp/tools/error_analyzer.py +283 -0
- crackerjack/mcp/tools/execution_tools.py +384 -0
- crackerjack/mcp/tools/intelligence_tool_registry.py +46 -0
- crackerjack/mcp/tools/intelligence_tools.py +264 -0
- crackerjack/mcp/tools/monitoring_tools.py +628 -0
- crackerjack/mcp/tools/proactive_tools.py +367 -0
- crackerjack/mcp/tools/progress_tools.py +222 -0
- crackerjack/mcp/tools/semantic_tools.py +584 -0
- crackerjack/mcp/tools/utility_tools.py +358 -0
- crackerjack/mcp/tools/workflow_executor.py +699 -0
- crackerjack/mcp/websocket/README.md +31 -0
- crackerjack/mcp/websocket/__init__.py +14 -0
- crackerjack/mcp/websocket/app.py +54 -0
- crackerjack/mcp/websocket/endpoints.py +492 -0
- crackerjack/mcp/websocket/event_bridge.py +188 -0
- crackerjack/mcp/websocket/jobs.py +406 -0
- crackerjack/mcp/websocket/monitoring/__init__.py +25 -0
- crackerjack/mcp/websocket/monitoring/api/__init__.py +19 -0
- crackerjack/mcp/websocket/monitoring/api/dependencies.py +141 -0
- crackerjack/mcp/websocket/monitoring/api/heatmap.py +154 -0
- crackerjack/mcp/websocket/monitoring/api/intelligence.py +199 -0
- crackerjack/mcp/websocket/monitoring/api/metrics.py +203 -0
- crackerjack/mcp/websocket/monitoring/api/telemetry.py +101 -0
- crackerjack/mcp/websocket/monitoring/dashboard.py +18 -0
- crackerjack/mcp/websocket/monitoring/factory.py +109 -0
- crackerjack/mcp/websocket/monitoring/filters.py +10 -0
- crackerjack/mcp/websocket/monitoring/metrics.py +64 -0
- crackerjack/mcp/websocket/monitoring/models.py +90 -0
- crackerjack/mcp/websocket/monitoring/utils.py +171 -0
- crackerjack/mcp/websocket/monitoring/websocket_manager.py +78 -0
- crackerjack/mcp/websocket/monitoring/websockets/__init__.py +17 -0
- crackerjack/mcp/websocket/monitoring/websockets/dependencies.py +126 -0
- crackerjack/mcp/websocket/monitoring/websockets/heatmap.py +176 -0
- crackerjack/mcp/websocket/monitoring/websockets/intelligence.py +291 -0
- crackerjack/mcp/websocket/monitoring/websockets/metrics.py +291 -0
- crackerjack/mcp/websocket/monitoring_endpoints.py +21 -0
- crackerjack/mcp/websocket/server.py +174 -0
- crackerjack/mcp/websocket/websocket_handler.py +276 -0
- crackerjack/mcp/websocket_server.py +10 -0
- crackerjack/models/README.md +308 -0
- crackerjack/models/__init__.py +40 -0
- crackerjack/models/config.py +730 -0
- crackerjack/models/config_adapter.py +265 -0
- crackerjack/models/protocols.py +1535 -0
- crackerjack/models/pydantic_models.py +320 -0
- crackerjack/models/qa_config.py +145 -0
- crackerjack/models/qa_results.py +134 -0
- crackerjack/models/resource_protocols.py +299 -0
- crackerjack/models/results.py +35 -0
- crackerjack/models/semantic_models.py +258 -0
- crackerjack/models/task.py +173 -0
- crackerjack/models/test_models.py +60 -0
- crackerjack/monitoring/README.md +11 -0
- crackerjack/monitoring/__init__.py +0 -0
- crackerjack/monitoring/ai_agent_watchdog.py +405 -0
- crackerjack/monitoring/metrics_collector.py +427 -0
- crackerjack/monitoring/regression_prevention.py +580 -0
- crackerjack/monitoring/websocket_server.py +406 -0
- crackerjack/orchestration/README.md +340 -0
- crackerjack/orchestration/__init__.py +43 -0
- crackerjack/orchestration/advanced_orchestrator.py +894 -0
- crackerjack/orchestration/cache/README.md +312 -0
- crackerjack/orchestration/cache/__init__.py +37 -0
- crackerjack/orchestration/cache/memory_cache.py +338 -0
- crackerjack/orchestration/cache/tool_proxy_cache.py +340 -0
- crackerjack/orchestration/config.py +297 -0
- crackerjack/orchestration/coverage_improvement.py +180 -0
- crackerjack/orchestration/execution_strategies.py +361 -0
- crackerjack/orchestration/hook_orchestrator.py +1398 -0
- crackerjack/orchestration/strategies/README.md +401 -0
- crackerjack/orchestration/strategies/__init__.py +39 -0
- crackerjack/orchestration/strategies/adaptive_strategy.py +630 -0
- crackerjack/orchestration/strategies/parallel_strategy.py +237 -0
- crackerjack/orchestration/strategies/sequential_strategy.py +299 -0
- crackerjack/orchestration/test_progress_streamer.py +647 -0
- crackerjack/plugins/README.md +11 -0
- crackerjack/plugins/__init__.py +15 -0
- crackerjack/plugins/base.py +200 -0
- crackerjack/plugins/hooks.py +254 -0
- crackerjack/plugins/loader.py +335 -0
- crackerjack/plugins/managers.py +264 -0
- crackerjack/py313.py +191 -0
- crackerjack/security/README.md +11 -0
- crackerjack/security/__init__.py +0 -0
- crackerjack/security/audit.py +197 -0
- crackerjack/services/README.md +374 -0
- crackerjack/services/__init__.py +9 -0
- crackerjack/services/ai/README.md +295 -0
- crackerjack/services/ai/__init__.py +7 -0
- crackerjack/services/ai/advanced_optimizer.py +878 -0
- crackerjack/services/ai/contextual_ai_assistant.py +542 -0
- crackerjack/services/ai/embeddings.py +444 -0
- crackerjack/services/ai/intelligent_commit.py +328 -0
- crackerjack/services/ai/predictive_analytics.py +510 -0
- crackerjack/services/anomaly_detector.py +392 -0
- crackerjack/services/api_extractor.py +617 -0
- crackerjack/services/backup_service.py +467 -0
- crackerjack/services/bounded_status_operations.py +530 -0
- crackerjack/services/cache.py +369 -0
- crackerjack/services/changelog_automation.py +399 -0
- crackerjack/services/command_execution_service.py +305 -0
- crackerjack/services/config_integrity.py +132 -0
- crackerjack/services/config_merge.py +546 -0
- crackerjack/services/config_service.py +198 -0
- crackerjack/services/config_template.py +493 -0
- crackerjack/services/coverage_badge_service.py +173 -0
- crackerjack/services/coverage_ratchet.py +381 -0
- crackerjack/services/debug.py +733 -0
- crackerjack/services/dependency_analyzer.py +460 -0
- crackerjack/services/dependency_monitor.py +622 -0
- crackerjack/services/documentation_generator.py +493 -0
- crackerjack/services/documentation_service.py +704 -0
- crackerjack/services/enhanced_filesystem.py +497 -0
- crackerjack/services/enterprise_optimizer.py +865 -0
- crackerjack/services/error_pattern_analyzer.py +676 -0
- crackerjack/services/file_filter.py +221 -0
- crackerjack/services/file_hasher.py +149 -0
- crackerjack/services/file_io_service.py +361 -0
- crackerjack/services/file_modifier.py +615 -0
- crackerjack/services/filesystem.py +381 -0
- crackerjack/services/git.py +422 -0
- crackerjack/services/health_metrics.py +615 -0
- crackerjack/services/heatmap_generator.py +744 -0
- crackerjack/services/incremental_executor.py +380 -0
- crackerjack/services/initialization.py +823 -0
- crackerjack/services/input_validator.py +668 -0
- crackerjack/services/intelligent_commit.py +327 -0
- crackerjack/services/log_manager.py +289 -0
- crackerjack/services/logging.py +228 -0
- crackerjack/services/lsp_client.py +628 -0
- crackerjack/services/memory_optimizer.py +414 -0
- crackerjack/services/metrics.py +587 -0
- crackerjack/services/monitoring/README.md +30 -0
- crackerjack/services/monitoring/__init__.py +9 -0
- crackerjack/services/monitoring/dependency_monitor.py +678 -0
- crackerjack/services/monitoring/error_pattern_analyzer.py +676 -0
- crackerjack/services/monitoring/health_metrics.py +716 -0
- crackerjack/services/monitoring/metrics.py +587 -0
- crackerjack/services/monitoring/performance_benchmarks.py +410 -0
- crackerjack/services/monitoring/performance_cache.py +388 -0
- crackerjack/services/monitoring/performance_monitor.py +569 -0
- crackerjack/services/parallel_executor.py +527 -0
- crackerjack/services/pattern_cache.py +333 -0
- crackerjack/services/pattern_detector.py +478 -0
- crackerjack/services/patterns/__init__.py +142 -0
- crackerjack/services/patterns/agents.py +107 -0
- crackerjack/services/patterns/code/__init__.py +15 -0
- crackerjack/services/patterns/code/detection.py +118 -0
- crackerjack/services/patterns/code/imports.py +107 -0
- crackerjack/services/patterns/code/paths.py +159 -0
- crackerjack/services/patterns/code/performance.py +119 -0
- crackerjack/services/patterns/code/replacement.py +36 -0
- crackerjack/services/patterns/core.py +212 -0
- crackerjack/services/patterns/documentation/__init__.py +14 -0
- crackerjack/services/patterns/documentation/badges_markdown.py +96 -0
- crackerjack/services/patterns/documentation/comments_blocks.py +83 -0
- crackerjack/services/patterns/documentation/docstrings.py +89 -0
- crackerjack/services/patterns/formatting.py +226 -0
- crackerjack/services/patterns/operations.py +339 -0
- crackerjack/services/patterns/security/__init__.py +23 -0
- crackerjack/services/patterns/security/code_injection.py +122 -0
- crackerjack/services/patterns/security/credentials.py +190 -0
- crackerjack/services/patterns/security/path_traversal.py +221 -0
- crackerjack/services/patterns/security/unsafe_operations.py +216 -0
- crackerjack/services/patterns/templates.py +62 -0
- crackerjack/services/patterns/testing/__init__.py +18 -0
- crackerjack/services/patterns/testing/error_patterns.py +107 -0
- crackerjack/services/patterns/testing/pytest_output.py +126 -0
- crackerjack/services/patterns/tool_output/__init__.py +16 -0
- crackerjack/services/patterns/tool_output/bandit.py +72 -0
- crackerjack/services/patterns/tool_output/other.py +97 -0
- crackerjack/services/patterns/tool_output/pyright.py +67 -0
- crackerjack/services/patterns/tool_output/ruff.py +44 -0
- crackerjack/services/patterns/url_sanitization.py +114 -0
- crackerjack/services/patterns/utilities.py +42 -0
- crackerjack/services/patterns/utils.py +339 -0
- crackerjack/services/patterns/validation.py +46 -0
- crackerjack/services/patterns/versioning.py +62 -0
- crackerjack/services/predictive_analytics.py +523 -0
- crackerjack/services/profiler.py +280 -0
- crackerjack/services/quality/README.md +415 -0
- crackerjack/services/quality/__init__.py +11 -0
- crackerjack/services/quality/anomaly_detector.py +392 -0
- crackerjack/services/quality/pattern_cache.py +333 -0
- crackerjack/services/quality/pattern_detector.py +479 -0
- crackerjack/services/quality/qa_orchestrator.py +491 -0
- crackerjack/services/quality/quality_baseline.py +395 -0
- crackerjack/services/quality/quality_baseline_enhanced.py +649 -0
- crackerjack/services/quality/quality_intelligence.py +949 -0
- crackerjack/services/regex_patterns.py +58 -0
- crackerjack/services/regex_utils.py +483 -0
- crackerjack/services/secure_path_utils.py +524 -0
- crackerjack/services/secure_status_formatter.py +450 -0
- crackerjack/services/secure_subprocess.py +635 -0
- crackerjack/services/security.py +239 -0
- crackerjack/services/security_logger.py +495 -0
- crackerjack/services/server_manager.py +411 -0
- crackerjack/services/smart_scheduling.py +167 -0
- crackerjack/services/status_authentication.py +460 -0
- crackerjack/services/status_security_manager.py +315 -0
- crackerjack/services/terminal_utils.py +0 -0
- crackerjack/services/thread_safe_status_collector.py +441 -0
- crackerjack/services/tool_filter.py +368 -0
- crackerjack/services/tool_version_service.py +43 -0
- crackerjack/services/unified_config.py +115 -0
- crackerjack/services/validation_rate_limiter.py +220 -0
- crackerjack/services/vector_store.py +689 -0
- crackerjack/services/version_analyzer.py +461 -0
- crackerjack/services/version_checker.py +223 -0
- crackerjack/services/websocket_resource_limiter.py +438 -0
- crackerjack/services/zuban_lsp_service.py +391 -0
- crackerjack/slash_commands/README.md +11 -0
- crackerjack/slash_commands/__init__.py +59 -0
- crackerjack/slash_commands/init.md +112 -0
- crackerjack/slash_commands/run.md +197 -0
- crackerjack/slash_commands/status.md +127 -0
- crackerjack/tools/README.md +11 -0
- crackerjack/tools/__init__.py +30 -0
- crackerjack/tools/_git_utils.py +105 -0
- crackerjack/tools/check_added_large_files.py +139 -0
- crackerjack/tools/check_ast.py +105 -0
- crackerjack/tools/check_json.py +103 -0
- crackerjack/tools/check_jsonschema.py +297 -0
- crackerjack/tools/check_toml.py +103 -0
- crackerjack/tools/check_yaml.py +110 -0
- crackerjack/tools/codespell_wrapper.py +72 -0
- crackerjack/tools/end_of_file_fixer.py +202 -0
- crackerjack/tools/format_json.py +128 -0
- crackerjack/tools/mdformat_wrapper.py +114 -0
- crackerjack/tools/trailing_whitespace.py +198 -0
- crackerjack/tools/validate_input_validator_patterns.py +236 -0
- crackerjack/tools/validate_regex_patterns.py +188 -0
- crackerjack/ui/README.md +11 -0
- crackerjack/ui/__init__.py +1 -0
- crackerjack/ui/dashboard_renderer.py +28 -0
- crackerjack/ui/templates/README.md +11 -0
- crackerjack/utils/console_utils.py +13 -0
- crackerjack/utils/dependency_guard.py +230 -0
- crackerjack/utils/retry_utils.py +275 -0
- crackerjack/workflows/README.md +590 -0
- crackerjack/workflows/__init__.py +46 -0
- crackerjack/workflows/actions.py +811 -0
- crackerjack/workflows/auto_fix.py +444 -0
- crackerjack/workflows/container_builder.py +499 -0
- crackerjack/workflows/definitions.py +443 -0
- crackerjack/workflows/engine.py +177 -0
- crackerjack/workflows/event_bridge.py +242 -0
- crackerjack-0.45.2.dist-info/METADATA +1678 -0
- crackerjack-0.45.2.dist-info/RECORD +478 -0
- {crackerjack-0.18.2.dist-info → crackerjack-0.45.2.dist-info}/WHEEL +1 -1
- crackerjack-0.45.2.dist-info/entry_points.txt +2 -0
- crackerjack/.gitignore +0 -14
- crackerjack/.libcst.codemod.yaml +0 -18
- crackerjack/.pdm.toml +0 -1
- crackerjack/.pre-commit-config.yaml +0 -91
- crackerjack/.pytest_cache/.gitignore +0 -2
- crackerjack/.pytest_cache/CACHEDIR.TAG +0 -4
- crackerjack/.pytest_cache/README.md +0 -8
- crackerjack/.pytest_cache/v/cache/nodeids +0 -1
- crackerjack/.pytest_cache/v/cache/stepwise +0 -1
- crackerjack/.ruff_cache/.gitignore +0 -1
- crackerjack/.ruff_cache/0.1.11/3256171999636029978 +0 -0
- crackerjack/.ruff_cache/0.1.14/602324811142551221 +0 -0
- crackerjack/.ruff_cache/0.1.4/10355199064880463147 +0 -0
- crackerjack/.ruff_cache/0.1.6/15140459877605758699 +0 -0
- crackerjack/.ruff_cache/0.1.7/1790508110482614856 +0 -0
- crackerjack/.ruff_cache/0.1.9/17041001205004563469 +0 -0
- crackerjack/.ruff_cache/0.11.2/4070660268492669020 +0 -0
- crackerjack/.ruff_cache/0.11.3/9818742842212983150 +0 -0
- crackerjack/.ruff_cache/0.11.4/9818742842212983150 +0 -0
- crackerjack/.ruff_cache/0.11.6/3557596832929915217 +0 -0
- crackerjack/.ruff_cache/0.11.7/10386934055395314831 +0 -0
- crackerjack/.ruff_cache/0.11.7/3557596832929915217 +0 -0
- crackerjack/.ruff_cache/0.11.8/530407680854991027 +0 -0
- crackerjack/.ruff_cache/0.2.0/10047773857155985907 +0 -0
- crackerjack/.ruff_cache/0.2.1/8522267973936635051 +0 -0
- crackerjack/.ruff_cache/0.2.2/18053836298936336950 +0 -0
- crackerjack/.ruff_cache/0.3.0/12548816621480535786 +0 -0
- crackerjack/.ruff_cache/0.3.3/11081883392474770722 +0 -0
- crackerjack/.ruff_cache/0.3.4/676973378459347183 +0 -0
- crackerjack/.ruff_cache/0.3.5/16311176246009842383 +0 -0
- crackerjack/.ruff_cache/0.5.7/1493622539551733492 +0 -0
- crackerjack/.ruff_cache/0.5.7/6231957614044513175 +0 -0
- crackerjack/.ruff_cache/0.5.7/9932762556785938009 +0 -0
- crackerjack/.ruff_cache/0.6.0/11982804814124138945 +0 -0
- crackerjack/.ruff_cache/0.6.0/12055761203849489982 +0 -0
- crackerjack/.ruff_cache/0.6.2/1206147804896221174 +0 -0
- crackerjack/.ruff_cache/0.6.4/1206147804896221174 +0 -0
- crackerjack/.ruff_cache/0.6.5/1206147804896221174 +0 -0
- crackerjack/.ruff_cache/0.6.7/3657366982708166874 +0 -0
- crackerjack/.ruff_cache/0.6.9/285614542852677309 +0 -0
- crackerjack/.ruff_cache/0.7.1/1024065805990144819 +0 -0
- crackerjack/.ruff_cache/0.7.1/285614542852677309 +0 -0
- crackerjack/.ruff_cache/0.7.3/16061516852537040135 +0 -0
- crackerjack/.ruff_cache/0.8.4/16354268377385700367 +0 -0
- crackerjack/.ruff_cache/0.9.10/12813592349865671909 +0 -0
- crackerjack/.ruff_cache/0.9.10/923908772239632759 +0 -0
- crackerjack/.ruff_cache/0.9.3/13948373885254993391 +0 -0
- crackerjack/.ruff_cache/0.9.9/12813592349865671909 +0 -0
- crackerjack/.ruff_cache/0.9.9/8843823720003377982 +0 -0
- crackerjack/.ruff_cache/CACHEDIR.TAG +0 -1
- crackerjack/crackerjack.py +0 -855
- crackerjack/pyproject.toml +0 -214
- crackerjack-0.18.2.dist-info/METADATA +0 -420
- crackerjack-0.18.2.dist-info/RECORD +0 -59
- crackerjack-0.18.2.dist-info/entry_points.txt +0 -4
- {crackerjack-0.18.2.dist-info → crackerjack-0.45.2.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,865 @@
|
|
|
1
|
+
"""Enterprise-scale optimization service for monitoring system."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import logging
|
|
5
|
+
import statistics
|
|
6
|
+
import threading
|
|
7
|
+
import time
|
|
8
|
+
import typing as t
|
|
9
|
+
from collections import deque
|
|
10
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
11
|
+
from dataclasses import asdict, dataclass, field
|
|
12
|
+
from datetime import datetime, timedelta
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
|
|
15
|
+
import psutil
|
|
16
|
+
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class ResourceMetrics:
|
|
22
|
+
"""System resource utilization metrics."""
|
|
23
|
+
|
|
24
|
+
cpu_percent: float
|
|
25
|
+
memory_percent: float
|
|
26
|
+
disk_usage_percent: float
|
|
27
|
+
network_io: dict[str, int]
|
|
28
|
+
active_connections: int
|
|
29
|
+
thread_count: int
|
|
30
|
+
file_descriptors: int
|
|
31
|
+
timestamp: datetime = field(default_factory=datetime.now)
|
|
32
|
+
|
|
33
|
+
def to_dict(self) -> dict[str, t.Any]:
|
|
34
|
+
"""Convert to dictionary for JSON serialization."""
|
|
35
|
+
return {
|
|
36
|
+
"cpu_percent": self.cpu_percent,
|
|
37
|
+
"memory_percent": self.memory_percent,
|
|
38
|
+
"disk_usage_percent": self.disk_usage_percent,
|
|
39
|
+
"network_io": self.network_io,
|
|
40
|
+
"active_connections": self.active_connections,
|
|
41
|
+
"thread_count": self.thread_count,
|
|
42
|
+
"file_descriptors": self.file_descriptors,
|
|
43
|
+
"timestamp": self.timestamp.isoformat(),
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
@dataclass
|
|
48
|
+
class PerformanceProfile:
|
|
49
|
+
"""Performance profile for optimization decisions."""
|
|
50
|
+
|
|
51
|
+
workload_type: str # light, moderate, heavy, extreme
|
|
52
|
+
concurrent_clients: int
|
|
53
|
+
data_retention_days: int
|
|
54
|
+
analysis_frequency_minutes: int
|
|
55
|
+
resource_limits: dict[str, t.Any]
|
|
56
|
+
optimization_strategy: str # balanced, performance, memory, throughput
|
|
57
|
+
|
|
58
|
+
def to_dict(self) -> dict[str, t.Any]:
|
|
59
|
+
"""Convert to dictionary for JSON serialization."""
|
|
60
|
+
return asdict(self)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
@dataclass
|
|
64
|
+
class OptimizationRecommendation:
|
|
65
|
+
"""Optimization recommendation with implementation details."""
|
|
66
|
+
|
|
67
|
+
category: str # performance, memory, storage, network
|
|
68
|
+
priority: str # critical, high, medium, low
|
|
69
|
+
title: str
|
|
70
|
+
description: str
|
|
71
|
+
impact: str
|
|
72
|
+
implementation: str
|
|
73
|
+
estimated_improvement: str
|
|
74
|
+
resource_cost: str
|
|
75
|
+
risk_level: str # low, medium, high
|
|
76
|
+
|
|
77
|
+
def to_dict(self) -> dict[str, t.Any]:
|
|
78
|
+
"""Convert to dictionary for JSON serialization."""
|
|
79
|
+
return asdict(self)
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
@dataclass
|
|
83
|
+
class ScalingMetrics:
|
|
84
|
+
"""Metrics for auto-scaling decisions."""
|
|
85
|
+
|
|
86
|
+
current_load: float # 0.0 to 1.0
|
|
87
|
+
projected_load: float
|
|
88
|
+
response_time_p95: float
|
|
89
|
+
error_rate: float
|
|
90
|
+
memory_pressure: float
|
|
91
|
+
cpu_saturation: float
|
|
92
|
+
recommended_scale_factor: float
|
|
93
|
+
confidence_score: float
|
|
94
|
+
|
|
95
|
+
def to_dict(self) -> dict[str, t.Any]:
|
|
96
|
+
"""Convert to dictionary for JSON serialization."""
|
|
97
|
+
return asdict(self)
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
class ConnectionPool:
|
|
101
|
+
"""Optimized connection pool for WebSocket management."""
|
|
102
|
+
|
|
103
|
+
def __init__(self, max_connections: int = 1000, cleanup_interval: int = 300):
|
|
104
|
+
"""Initialize connection pool with limits and cleanup."""
|
|
105
|
+
self.max_connections = max_connections
|
|
106
|
+
self.cleanup_interval = cleanup_interval
|
|
107
|
+
self.connections: dict[str, t.Any] = {}
|
|
108
|
+
self.connection_stats: dict[str, dict[str, t.Any]] = {}
|
|
109
|
+
self.last_cleanup = time.time()
|
|
110
|
+
self._lock = threading.Lock()
|
|
111
|
+
|
|
112
|
+
def add_connection(
|
|
113
|
+
self,
|
|
114
|
+
connection_id: str,
|
|
115
|
+
websocket: t.Any,
|
|
116
|
+
metadata: dict[str, t.Any] | None = None,
|
|
117
|
+
) -> None:
|
|
118
|
+
"""Add connection with automatic cleanup if at capacity."""
|
|
119
|
+
with self._lock:
|
|
120
|
+
# Clean up stale connections if at capacity
|
|
121
|
+
if len(self.connections) >= self.max_connections:
|
|
122
|
+
self._cleanup_stale_connections()
|
|
123
|
+
|
|
124
|
+
# If still at capacity, remove oldest connection
|
|
125
|
+
if len(self.connections) >= self.max_connections:
|
|
126
|
+
oldest_id = min(
|
|
127
|
+
self.connection_stats.keys(),
|
|
128
|
+
key=lambda x: self.connection_stats[x]["last_activity"],
|
|
129
|
+
)
|
|
130
|
+
self.remove_connection(oldest_id)
|
|
131
|
+
|
|
132
|
+
self.connections[connection_id] = websocket
|
|
133
|
+
self.connection_stats[connection_id] = {
|
|
134
|
+
"created": time.time(),
|
|
135
|
+
"last_activity": time.time(),
|
|
136
|
+
"message_count": 0,
|
|
137
|
+
"metadata": metadata or {},
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
def remove_connection(self, connection_id: str) -> None:
|
|
141
|
+
"""Remove connection and cleanup resources."""
|
|
142
|
+
with self._lock:
|
|
143
|
+
if connection_id in self.connections:
|
|
144
|
+
del self.connections[connection_id]
|
|
145
|
+
if connection_id in self.connection_stats:
|
|
146
|
+
del self.connection_stats[connection_id]
|
|
147
|
+
|
|
148
|
+
def update_activity(self, connection_id: str) -> None:
|
|
149
|
+
"""Update last activity timestamp."""
|
|
150
|
+
with self._lock:
|
|
151
|
+
if connection_id in self.connection_stats:
|
|
152
|
+
self.connection_stats[connection_id]["last_activity"] = time.time()
|
|
153
|
+
self.connection_stats[connection_id]["message_count"] += 1
|
|
154
|
+
|
|
155
|
+
def _cleanup_stale_connections(self) -> None:
|
|
156
|
+
"""Remove stale connections based on inactivity."""
|
|
157
|
+
current_time = time.time()
|
|
158
|
+
stale_threshold = 1800 # 30 minutes
|
|
159
|
+
|
|
160
|
+
stale_connections = [
|
|
161
|
+
conn_id
|
|
162
|
+
for conn_id, stats in self.connection_stats.items()
|
|
163
|
+
if current_time - stats["last_activity"] > stale_threshold
|
|
164
|
+
]
|
|
165
|
+
|
|
166
|
+
for conn_id in stale_connections:
|
|
167
|
+
self.remove_connection(conn_id)
|
|
168
|
+
|
|
169
|
+
self.last_cleanup = current_time
|
|
170
|
+
logger.info(f"Cleaned up {len(stale_connections)} stale connections")
|
|
171
|
+
|
|
172
|
+
def get_stats(self) -> dict[str, t.Any]:
|
|
173
|
+
"""Get connection pool statistics."""
|
|
174
|
+
with self._lock:
|
|
175
|
+
current_time = time.time()
|
|
176
|
+
active_count = len(
|
|
177
|
+
[
|
|
178
|
+
conn
|
|
179
|
+
for conn, stats in self.connection_stats.items()
|
|
180
|
+
if current_time - stats["last_activity"]
|
|
181
|
+
< 300 # Active in last 5 minutes
|
|
182
|
+
]
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
return {
|
|
186
|
+
"total_connections": len(self.connections),
|
|
187
|
+
"active_connections": active_count,
|
|
188
|
+
"max_connections": self.max_connections,
|
|
189
|
+
"utilization_percent": (len(self.connections) / self.max_connections)
|
|
190
|
+
* 100,
|
|
191
|
+
"average_message_count": statistics.mean(
|
|
192
|
+
[stats["message_count"] for stats in self.connection_stats.values()]
|
|
193
|
+
)
|
|
194
|
+
if self.connection_stats
|
|
195
|
+
else 0,
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
class DataCompactionManager:
|
|
200
|
+
"""Manages data compaction and archival for enterprise scale."""
|
|
201
|
+
|
|
202
|
+
def __init__(self, storage_dir: Path, max_storage_gb: float = 10.0):
|
|
203
|
+
"""Initialize with storage limits."""
|
|
204
|
+
self.storage_dir = Path(storage_dir)
|
|
205
|
+
self.max_storage_bytes = max_storage_gb * 1024**3
|
|
206
|
+
self.compaction_rules = self._load_compaction_rules()
|
|
207
|
+
|
|
208
|
+
def _load_compaction_rules(self) -> dict[str, dict[str, t.Any]]:
|
|
209
|
+
"""Load data retention and compaction rules."""
|
|
210
|
+
rules = {}
|
|
211
|
+
|
|
212
|
+
# Raw metrics configuration
|
|
213
|
+
rules["metrics_raw"] = self._create_metrics_raw_config()
|
|
214
|
+
|
|
215
|
+
# Hourly metrics configuration
|
|
216
|
+
rules["metrics_hourly"] = self._create_metrics_hourly_config()
|
|
217
|
+
|
|
218
|
+
# Daily metrics configuration
|
|
219
|
+
rules["metrics_daily"] = self._create_metrics_daily_config()
|
|
220
|
+
|
|
221
|
+
# Error patterns configuration
|
|
222
|
+
rules["error_patterns"] = self._create_error_patterns_config()
|
|
223
|
+
|
|
224
|
+
# Dependency graphs configuration
|
|
225
|
+
rules["dependency_graphs"] = self._create_dependency_graphs_config()
|
|
226
|
+
|
|
227
|
+
return rules
|
|
228
|
+
|
|
229
|
+
def _create_metrics_raw_config(self) -> dict[str, t.Any]:
|
|
230
|
+
"""Create configuration for raw metrics data."""
|
|
231
|
+
return {
|
|
232
|
+
"retention_days": 7,
|
|
233
|
+
"compaction_interval_hours": 1,
|
|
234
|
+
"aggregation_method": "downsample",
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
def _create_metrics_hourly_config(self) -> dict[str, t.Any]:
|
|
238
|
+
"""Create configuration for hourly metrics data."""
|
|
239
|
+
return {
|
|
240
|
+
"retention_days": 30,
|
|
241
|
+
"compaction_interval_hours": 24,
|
|
242
|
+
"aggregation_method": "statistical",
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
def _create_metrics_daily_config(self) -> dict[str, t.Any]:
|
|
246
|
+
"""Create configuration for daily metrics data."""
|
|
247
|
+
return {
|
|
248
|
+
"retention_days": 365,
|
|
249
|
+
"compaction_interval_hours": 168, # Weekly
|
|
250
|
+
"aggregation_method": "statistical",
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
def _create_error_patterns_config(self) -> dict[str, t.Any]:
|
|
254
|
+
"""Create configuration for error patterns data."""
|
|
255
|
+
return {
|
|
256
|
+
"retention_days": 90,
|
|
257
|
+
"compaction_interval_hours": 24,
|
|
258
|
+
"aggregation_method": "deduplication",
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
def _create_dependency_graphs_config(self) -> dict[str, t.Any]:
|
|
262
|
+
"""Create configuration for dependency graphs data."""
|
|
263
|
+
return {
|
|
264
|
+
"retention_days": 30,
|
|
265
|
+
"compaction_interval_hours": 24,
|
|
266
|
+
"aggregation_method": "latest_version",
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
def compact_data(self, data_type: str) -> dict[str, t.Any]:
|
|
270
|
+
"""Perform data compaction based on rules."""
|
|
271
|
+
if data_type not in self.compaction_rules:
|
|
272
|
+
return {"status": "error", "message": f"Unknown data type: {data_type}"}
|
|
273
|
+
|
|
274
|
+
rules = self.compaction_rules[data_type]
|
|
275
|
+
cutoff_date = self._calculate_cutoff_date(rules)
|
|
276
|
+
|
|
277
|
+
compaction_stats = self._process_data_directory(data_type, cutoff_date)
|
|
278
|
+
|
|
279
|
+
return self._build_compaction_result(data_type, rules, compaction_stats)
|
|
280
|
+
|
|
281
|
+
def _calculate_cutoff_date(self, rules: dict[str, t.Any]) -> datetime:
|
|
282
|
+
"""Calculate the cutoff date for data retention."""
|
|
283
|
+
return datetime.now() - timedelta(days=rules["retention_days"])
|
|
284
|
+
|
|
285
|
+
def _process_data_directory(
|
|
286
|
+
self, data_type: str, cutoff_date: datetime
|
|
287
|
+
) -> dict[str, int | float]:
|
|
288
|
+
"""Process files in data directory and return compaction statistics."""
|
|
289
|
+
compacted_records = 0
|
|
290
|
+
freed_space_mb: float = 0.0
|
|
291
|
+
|
|
292
|
+
data_dir = self.storage_dir / data_type
|
|
293
|
+
if data_dir.exists():
|
|
294
|
+
for file_path in data_dir.glob("**/*"):
|
|
295
|
+
if self._should_compact_file(file_path, cutoff_date):
|
|
296
|
+
file_size_mb = file_path.stat().st_size / (1024**2)
|
|
297
|
+
freed_space_mb += file_size_mb
|
|
298
|
+
compacted_records += 1
|
|
299
|
+
# In production, would actually delete/archive the file
|
|
300
|
+
# file_path.unlink()
|
|
301
|
+
|
|
302
|
+
return {
|
|
303
|
+
"compacted_records": compacted_records,
|
|
304
|
+
"freed_space_mb": freed_space_mb,
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
def _should_compact_file(self, file_path: Path, cutoff_date: datetime) -> bool:
|
|
308
|
+
"""Determine if a file should be compacted based on age."""
|
|
309
|
+
if not file_path.is_file():
|
|
310
|
+
return False
|
|
311
|
+
|
|
312
|
+
file_mtime = datetime.fromtimestamp(file_path.stat().st_mtime)
|
|
313
|
+
return file_mtime < cutoff_date
|
|
314
|
+
|
|
315
|
+
def _build_compaction_result(
|
|
316
|
+
self, data_type: str, rules: dict[str, t.Any], stats: dict[str, t.Any]
|
|
317
|
+
) -> dict[str, t.Any]:
|
|
318
|
+
"""Build the compaction result dictionary."""
|
|
319
|
+
return {
|
|
320
|
+
"status": "success",
|
|
321
|
+
"data_type": data_type,
|
|
322
|
+
"compacted_records": stats["compacted_records"],
|
|
323
|
+
"freed_space_mb": round(stats["freed_space_mb"], 2),
|
|
324
|
+
"retention_days": rules["retention_days"],
|
|
325
|
+
"next_compaction": datetime.now()
|
|
326
|
+
+ timedelta(hours=rules["compaction_interval_hours"]),
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
def get_storage_usage(self) -> dict[str, t.Any]:
|
|
330
|
+
"""Get detailed storage usage information."""
|
|
331
|
+
total_size, type_sizes = self._calculate_storage_sizes()
|
|
332
|
+
return self._build_storage_usage_report(total_size, type_sizes)
|
|
333
|
+
|
|
334
|
+
def _calculate_storage_sizes(self) -> tuple[int, dict[str, int]]:
|
|
335
|
+
"""Calculate total storage size and size by data type."""
|
|
336
|
+
total_size = 0
|
|
337
|
+
type_sizes = {}
|
|
338
|
+
|
|
339
|
+
if self.storage_dir.exists():
|
|
340
|
+
for data_type in self.compaction_rules.keys():
|
|
341
|
+
type_size = self._calculate_data_type_size(data_type)
|
|
342
|
+
type_sizes[data_type] = type_size
|
|
343
|
+
total_size += type_size
|
|
344
|
+
|
|
345
|
+
return total_size, type_sizes
|
|
346
|
+
|
|
347
|
+
def _calculate_data_type_size(self, data_type: str) -> int:
|
|
348
|
+
"""Calculate storage size for a specific data type."""
|
|
349
|
+
data_dir = self.storage_dir / data_type
|
|
350
|
+
type_size = 0
|
|
351
|
+
|
|
352
|
+
if data_dir.exists():
|
|
353
|
+
for file_path in data_dir.glob("**/*"):
|
|
354
|
+
if file_path.is_file():
|
|
355
|
+
type_size += file_path.stat().st_size
|
|
356
|
+
|
|
357
|
+
return type_size
|
|
358
|
+
|
|
359
|
+
def _build_storage_usage_report(
|
|
360
|
+
self, total_size: int, type_sizes: dict[str, int]
|
|
361
|
+
) -> dict[str, t.Any]:
|
|
362
|
+
"""Build the storage usage report dictionary."""
|
|
363
|
+
return {
|
|
364
|
+
"total_size_gb": round(total_size / (1024**3), 3),
|
|
365
|
+
"max_size_gb": round(self.max_storage_bytes / (1024**3), 3),
|
|
366
|
+
"utilization_percent": self._calculate_utilization_percent(total_size),
|
|
367
|
+
"by_type_mb": {k: round(v / (1024**2), 2) for k, v in type_sizes.items()},
|
|
368
|
+
"compaction_needed": total_size > (self.max_storage_bytes * 0.8),
|
|
369
|
+
}
|
|
370
|
+
|
|
371
|
+
def _calculate_utilization_percent(self, total_size: int) -> float:
|
|
372
|
+
"""Calculate storage utilization percentage."""
|
|
373
|
+
if self.max_storage_bytes > 0:
|
|
374
|
+
return round((total_size / self.max_storage_bytes) * 100, 2)
|
|
375
|
+
return 0.0
|
|
376
|
+
|
|
377
|
+
|
|
378
|
+
class EnterpriseOptimizer:
|
|
379
|
+
"""Enterprise-scale optimization engine for monitoring system."""
|
|
380
|
+
|
|
381
|
+
def __init__(self, config_dir: Path, storage_dir: Path):
|
|
382
|
+
"""Initialize optimizer with configuration and storage paths."""
|
|
383
|
+
self.config_dir = Path(config_dir)
|
|
384
|
+
self.storage_dir = Path(storage_dir)
|
|
385
|
+
|
|
386
|
+
# Initialize components
|
|
387
|
+
self.connection_pool = ConnectionPool()
|
|
388
|
+
self.compaction_manager = DataCompactionManager(storage_dir)
|
|
389
|
+
self.executor = ThreadPoolExecutor(max_workers=4)
|
|
390
|
+
|
|
391
|
+
# Metrics tracking
|
|
392
|
+
self.resource_history: deque[ResourceMetrics] = deque(
|
|
393
|
+
maxlen=1440
|
|
394
|
+
) # 24 hours of minute-level data
|
|
395
|
+
self.performance_profile = self._load_performance_profile()
|
|
396
|
+
|
|
397
|
+
# Optimization state
|
|
398
|
+
self.last_optimization = datetime.now()
|
|
399
|
+
self.optimization_recommendations: list[OptimizationRecommendation] = []
|
|
400
|
+
|
|
401
|
+
def _load_performance_profile(self) -> PerformanceProfile:
|
|
402
|
+
"""Load or create default performance profile."""
|
|
403
|
+
profile_path = self.config_dir / "performance_profile.json"
|
|
404
|
+
|
|
405
|
+
if profile_path.exists():
|
|
406
|
+
try:
|
|
407
|
+
with profile_path.open() as f:
|
|
408
|
+
data = json.load(f)
|
|
409
|
+
return PerformanceProfile(**data)
|
|
410
|
+
except Exception as e:
|
|
411
|
+
logger.warning(f"Failed to load performance profile: {e}")
|
|
412
|
+
|
|
413
|
+
# Return default profile for moderate workload
|
|
414
|
+
return PerformanceProfile(
|
|
415
|
+
workload_type="moderate",
|
|
416
|
+
concurrent_clients=100,
|
|
417
|
+
data_retention_days=30,
|
|
418
|
+
analysis_frequency_minutes=5,
|
|
419
|
+
resource_limits={
|
|
420
|
+
"max_memory_gb": 4.0,
|
|
421
|
+
"max_cpu_percent": 80.0,
|
|
422
|
+
"max_disk_gb": 10.0,
|
|
423
|
+
},
|
|
424
|
+
optimization_strategy="balanced",
|
|
425
|
+
)
|
|
426
|
+
|
|
427
|
+
def collect_resource_metrics(self) -> ResourceMetrics:
|
|
428
|
+
"""Collect current system resource metrics."""
|
|
429
|
+
try:
|
|
430
|
+
# CPU metrics
|
|
431
|
+
cpu_percent = psutil.cpu_percent(interval=1)
|
|
432
|
+
|
|
433
|
+
# Memory metrics
|
|
434
|
+
memory = psutil.virtual_memory()
|
|
435
|
+
memory_percent = memory.percent
|
|
436
|
+
|
|
437
|
+
# Disk metrics
|
|
438
|
+
disk_usage = psutil.disk_usage(str(self.storage_dir))
|
|
439
|
+
disk_percent = (disk_usage.used / disk_usage.total) * 100
|
|
440
|
+
|
|
441
|
+
# Network metrics
|
|
442
|
+
net_io = psutil.net_io_counters()
|
|
443
|
+
network_io = {
|
|
444
|
+
"bytes_sent": net_io.bytes_sent,
|
|
445
|
+
"bytes_recv": net_io.bytes_recv,
|
|
446
|
+
"packets_sent": net_io.packets_sent,
|
|
447
|
+
"packets_recv": net_io.packets_recv,
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
# Process metrics
|
|
451
|
+
process = psutil.Process()
|
|
452
|
+
active_connections = len(process.connections())
|
|
453
|
+
thread_count = process.num_threads()
|
|
454
|
+
file_descriptors = process.num_fds() if hasattr(process, "num_fds") else 0
|
|
455
|
+
|
|
456
|
+
metrics = ResourceMetrics(
|
|
457
|
+
cpu_percent=cpu_percent,
|
|
458
|
+
memory_percent=memory_percent,
|
|
459
|
+
disk_usage_percent=disk_percent,
|
|
460
|
+
network_io=network_io,
|
|
461
|
+
active_connections=active_connections,
|
|
462
|
+
thread_count=thread_count,
|
|
463
|
+
file_descriptors=file_descriptors,
|
|
464
|
+
)
|
|
465
|
+
|
|
466
|
+
# Store in history
|
|
467
|
+
self.resource_history.append(metrics)
|
|
468
|
+
|
|
469
|
+
return metrics
|
|
470
|
+
|
|
471
|
+
except Exception as e:
|
|
472
|
+
logger.error(f"Failed to collect resource metrics: {e}")
|
|
473
|
+
return ResourceMetrics(
|
|
474
|
+
cpu_percent=0.0,
|
|
475
|
+
memory_percent=0.0,
|
|
476
|
+
disk_usage_percent=0.0,
|
|
477
|
+
network_io={},
|
|
478
|
+
active_connections=0,
|
|
479
|
+
thread_count=0,
|
|
480
|
+
file_descriptors=0,
|
|
481
|
+
)
|
|
482
|
+
|
|
483
|
+
def analyze_scaling_needs(self) -> ScalingMetrics:
|
|
484
|
+
"""Analyze current load and recommend scaling actions."""
|
|
485
|
+
if len(self.resource_history) < 10:
|
|
486
|
+
# Not enough data for analysis
|
|
487
|
+
return ScalingMetrics(
|
|
488
|
+
current_load=0.0,
|
|
489
|
+
projected_load=0.0,
|
|
490
|
+
response_time_p95=100.0,
|
|
491
|
+
error_rate=0.0,
|
|
492
|
+
memory_pressure=0.0,
|
|
493
|
+
cpu_saturation=0.0,
|
|
494
|
+
recommended_scale_factor=1.0,
|
|
495
|
+
confidence_score=0.0,
|
|
496
|
+
)
|
|
497
|
+
|
|
498
|
+
recent_metrics = list[t.Any](self.resource_history)[-10:] # Last 10 minutes
|
|
499
|
+
|
|
500
|
+
# Calculate current load indicators
|
|
501
|
+
avg_cpu = statistics.mean([m.cpu_percent for m in recent_metrics])
|
|
502
|
+
avg_memory = statistics.mean([m.memory_percent for m in recent_metrics])
|
|
503
|
+
statistics.mean([m.active_connections for m in recent_metrics])
|
|
504
|
+
|
|
505
|
+
# Current load calculation (0.0 to 1.0)
|
|
506
|
+
current_load = max(avg_cpu / 100.0, avg_memory / 100.0)
|
|
507
|
+
|
|
508
|
+
# Trend analysis for projection
|
|
509
|
+
if len(recent_metrics) >= 5:
|
|
510
|
+
cpu_trend = (
|
|
511
|
+
recent_metrics[-1].cpu_percent - recent_metrics[-5].cpu_percent
|
|
512
|
+
) / 5
|
|
513
|
+
memory_trend = (
|
|
514
|
+
recent_metrics[-1].memory_percent - recent_metrics[-5].memory_percent
|
|
515
|
+
) / 5
|
|
516
|
+
projected_load = min(
|
|
517
|
+
1.0, current_load + max(cpu_trend, memory_trend) / 100.0
|
|
518
|
+
)
|
|
519
|
+
else:
|
|
520
|
+
projected_load = current_load
|
|
521
|
+
|
|
522
|
+
# Memory pressure (based on rate of increase)
|
|
523
|
+
memory_pressure = min(1.0, avg_memory / 100.0)
|
|
524
|
+
if len(recent_metrics) >= 3:
|
|
525
|
+
memory_velocity = (
|
|
526
|
+
recent_metrics[-1].memory_percent - recent_metrics[-3].memory_percent
|
|
527
|
+
) / 3
|
|
528
|
+
memory_pressure += memory_velocity / 100.0
|
|
529
|
+
|
|
530
|
+
# CPU saturation
|
|
531
|
+
cpu_saturation = min(1.0, avg_cpu / 100.0)
|
|
532
|
+
|
|
533
|
+
# Scaling recommendation
|
|
534
|
+
if projected_load > 0.8 or memory_pressure > 0.85:
|
|
535
|
+
scale_factor = 1.5 # Scale up
|
|
536
|
+
elif projected_load < 0.3 and memory_pressure < 0.4:
|
|
537
|
+
scale_factor = 0.8 # Scale down
|
|
538
|
+
else:
|
|
539
|
+
scale_factor = 1.0 # No scaling
|
|
540
|
+
|
|
541
|
+
# Confidence based on data consistency
|
|
542
|
+
cpu_variance = statistics.variance([m.cpu_percent for m in recent_metrics])
|
|
543
|
+
memory_variance = statistics.variance(
|
|
544
|
+
[m.memory_percent for m in recent_metrics]
|
|
545
|
+
)
|
|
546
|
+
confidence_score = max(0.0, 1.0 - (cpu_variance + memory_variance) / 2000.0)
|
|
547
|
+
|
|
548
|
+
return ScalingMetrics(
|
|
549
|
+
current_load=current_load,
|
|
550
|
+
projected_load=projected_load,
|
|
551
|
+
response_time_p95=100.0, # Would be measured from actual requests
|
|
552
|
+
error_rate=0.0, # Would be measured from actual errors
|
|
553
|
+
memory_pressure=memory_pressure,
|
|
554
|
+
cpu_saturation=cpu_saturation,
|
|
555
|
+
recommended_scale_factor=scale_factor,
|
|
556
|
+
confidence_score=confidence_score,
|
|
557
|
+
)
|
|
558
|
+
|
|
559
|
+
def generate_optimization_recommendations(self) -> list[OptimizationRecommendation]:
|
|
560
|
+
"""Generate optimization recommendations based on current metrics."""
|
|
561
|
+
recommendations: list[OptimizationRecommendation] = []
|
|
562
|
+
|
|
563
|
+
if not self.resource_history:
|
|
564
|
+
return recommendations
|
|
565
|
+
|
|
566
|
+
latest_metrics = self.resource_history[-1]
|
|
567
|
+
scaling_metrics = self.analyze_scaling_needs()
|
|
568
|
+
storage_usage = self.compaction_manager.get_storage_usage()
|
|
569
|
+
|
|
570
|
+
# Generate different types of recommendations
|
|
571
|
+
recommendations.extend(self._generate_cpu_recommendations(latest_metrics))
|
|
572
|
+
recommendations.extend(self._generate_memory_recommendations(latest_metrics))
|
|
573
|
+
recommendations.extend(self._generate_storage_recommendations(storage_usage))
|
|
574
|
+
recommendations.extend(self._generate_connection_recommendations())
|
|
575
|
+
recommendations.extend(self._generate_scaling_recommendations(scaling_metrics))
|
|
576
|
+
|
|
577
|
+
self.optimization_recommendations = recommendations
|
|
578
|
+
return recommendations
|
|
579
|
+
|
|
580
|
+
def _generate_cpu_recommendations(
|
|
581
|
+
self, metrics: ResourceMetrics
|
|
582
|
+
) -> list[OptimizationRecommendation]:
|
|
583
|
+
"""Generate CPU-related optimization recommendations."""
|
|
584
|
+
recommendations: list[OptimizationRecommendation] = []
|
|
585
|
+
|
|
586
|
+
if metrics.cpu_percent > 80:
|
|
587
|
+
recommendations.append(
|
|
588
|
+
OptimizationRecommendation(
|
|
589
|
+
category="performance",
|
|
590
|
+
priority="high",
|
|
591
|
+
title="High CPU Usage Detected",
|
|
592
|
+
description=f"CPU usage at {metrics.cpu_percent:.1f}%, approaching saturation",
|
|
593
|
+
impact="May cause response time degradation and request queuing",
|
|
594
|
+
implementation="Consider scaling horizontally or optimizing CPU-intensive operations",
|
|
595
|
+
estimated_improvement="20-40% response time improvement",
|
|
596
|
+
resource_cost="Medium - additional compute resources",
|
|
597
|
+
risk_level="low",
|
|
598
|
+
)
|
|
599
|
+
)
|
|
600
|
+
|
|
601
|
+
return recommendations
|
|
602
|
+
|
|
603
|
+
def _generate_memory_recommendations(
|
|
604
|
+
self, metrics: ResourceMetrics
|
|
605
|
+
) -> list[OptimizationRecommendation]:
|
|
606
|
+
"""Generate memory-related optimization recommendations."""
|
|
607
|
+
recommendations: list[OptimizationRecommendation] = []
|
|
608
|
+
|
|
609
|
+
if metrics.memory_percent > 85:
|
|
610
|
+
recommendations.append(
|
|
611
|
+
OptimizationRecommendation(
|
|
612
|
+
category="memory",
|
|
613
|
+
priority="critical",
|
|
614
|
+
title="Memory Pressure Critical",
|
|
615
|
+
description=f"Memory usage at {metrics.memory_percent:.1f}%, risk of OOM",
|
|
616
|
+
impact="High risk of application crashes and data loss",
|
|
617
|
+
implementation="Immediately reduce memory consumption or add memory resources",
|
|
618
|
+
estimated_improvement="Prevents system crashes",
|
|
619
|
+
resource_cost="High - memory upgrade or optimization effort",
|
|
620
|
+
risk_level="high",
|
|
621
|
+
)
|
|
622
|
+
)
|
|
623
|
+
|
|
624
|
+
return recommendations
|
|
625
|
+
|
|
626
|
+
def _generate_storage_recommendations(
|
|
627
|
+
self, storage_usage: dict[str, t.Any]
|
|
628
|
+
) -> list[OptimizationRecommendation]:
|
|
629
|
+
"""Generate storage-related optimization recommendations."""
|
|
630
|
+
recommendations: list[OptimizationRecommendation] = []
|
|
631
|
+
|
|
632
|
+
if storage_usage["utilization_percent"] > 80:
|
|
633
|
+
recommendations.append(
|
|
634
|
+
OptimizationRecommendation(
|
|
635
|
+
category="storage",
|
|
636
|
+
priority="high",
|
|
637
|
+
title="Storage Capacity Warning",
|
|
638
|
+
description=f"Storage at {storage_usage['utilization_percent']:.1f}% capacity",
|
|
639
|
+
impact="Risk of data collection failures and service degradation",
|
|
640
|
+
implementation="Run data compaction or extend storage capacity",
|
|
641
|
+
estimated_improvement="Frees 30-50% storage space",
|
|
642
|
+
resource_cost="Low - automated compaction process",
|
|
643
|
+
risk_level="low",
|
|
644
|
+
)
|
|
645
|
+
)
|
|
646
|
+
|
|
647
|
+
return recommendations
|
|
648
|
+
|
|
649
|
+
def _generate_connection_recommendations(self) -> list[OptimizationRecommendation]:
|
|
650
|
+
"""Generate connection pool optimization recommendations."""
|
|
651
|
+
recommendations: list[OptimizationRecommendation] = []
|
|
652
|
+
|
|
653
|
+
pool_stats = self.connection_pool.get_stats()
|
|
654
|
+
if pool_stats["utilization_percent"] > 90:
|
|
655
|
+
recommendations.append(
|
|
656
|
+
OptimizationRecommendation(
|
|
657
|
+
category="network",
|
|
658
|
+
priority="medium",
|
|
659
|
+
title="Connection Pool Near Capacity",
|
|
660
|
+
description=f"WebSocket connections at {pool_stats['utilization_percent']:.1f}% capacity",
|
|
661
|
+
impact="New client connections may be rejected",
|
|
662
|
+
implementation="Increase connection pool size or implement connection sharing",
|
|
663
|
+
estimated_improvement="Supports 2-3x more concurrent clients",
|
|
664
|
+
resource_cost="Low - configuration change",
|
|
665
|
+
risk_level="low",
|
|
666
|
+
)
|
|
667
|
+
)
|
|
668
|
+
|
|
669
|
+
return recommendations
|
|
670
|
+
|
|
671
|
+
def _generate_scaling_recommendations(
|
|
672
|
+
self, scaling_metrics: ScalingMetrics
|
|
673
|
+
) -> list[OptimizationRecommendation]:
|
|
674
|
+
"""Generate scaling-related optimization recommendations."""
|
|
675
|
+
recommendations: list[OptimizationRecommendation] = []
|
|
676
|
+
|
|
677
|
+
if scaling_metrics.recommended_scale_factor > 1.2:
|
|
678
|
+
recommendations.append(
|
|
679
|
+
OptimizationRecommendation(
|
|
680
|
+
category="performance",
|
|
681
|
+
priority="medium",
|
|
682
|
+
title="Horizontal Scaling Recommended",
|
|
683
|
+
description=f"Load analysis suggests {scaling_metrics.recommended_scale_factor:.1f}x scaling",
|
|
684
|
+
impact="Current load may exceed capacity during peak usage",
|
|
685
|
+
implementation="Deploy additional monitoring service instances",
|
|
686
|
+
estimated_improvement="Improved reliability and response times",
|
|
687
|
+
resource_cost="Medium - additional infrastructure",
|
|
688
|
+
risk_level="medium",
|
|
689
|
+
)
|
|
690
|
+
)
|
|
691
|
+
|
|
692
|
+
return recommendations
|
|
693
|
+
|
|
694
|
+
def optimize_configuration(self, strategy: str | None = None) -> dict[str, t.Any]:
|
|
695
|
+
"""Apply automatic configuration optimizations."""
|
|
696
|
+
if strategy is None:
|
|
697
|
+
strategy = self.performance_profile.optimization_strategy
|
|
698
|
+
|
|
699
|
+
latest_metrics = self._get_latest_metrics()
|
|
700
|
+
if not latest_metrics:
|
|
701
|
+
return {"status": "error", "message": "No metrics available"}
|
|
702
|
+
|
|
703
|
+
optimizations_applied = self._apply_all_optimizations(latest_metrics, strategy)
|
|
704
|
+
|
|
705
|
+
return self._build_optimization_result(strategy, optimizations_applied)
|
|
706
|
+
|
|
707
|
+
def _get_latest_metrics(self) -> ResourceMetrics | None:
|
|
708
|
+
"""Get the latest resource metrics."""
|
|
709
|
+
return self.resource_history[-1] if self.resource_history else None
|
|
710
|
+
|
|
711
|
+
def _apply_all_optimizations(
|
|
712
|
+
self, metrics: ResourceMetrics, strategy: str
|
|
713
|
+
) -> list[str]:
|
|
714
|
+
"""Apply all optimization strategies and collect results."""
|
|
715
|
+
optimizations_applied = []
|
|
716
|
+
|
|
717
|
+
# Apply different types of optimizations
|
|
718
|
+
optimizations_applied.extend(self._apply_memory_optimizations(metrics))
|
|
719
|
+
optimizations_applied.extend(
|
|
720
|
+
self._apply_performance_optimizations(metrics, strategy)
|
|
721
|
+
)
|
|
722
|
+
optimizations_applied.extend(self._apply_storage_optimizations())
|
|
723
|
+
|
|
724
|
+
return optimizations_applied
|
|
725
|
+
|
|
726
|
+
def _apply_memory_optimizations(self, metrics: ResourceMetrics) -> list[str]:
|
|
727
|
+
"""Apply memory-related optimizations."""
|
|
728
|
+
optimizations = []
|
|
729
|
+
|
|
730
|
+
if metrics.memory_percent > 70:
|
|
731
|
+
if self.connection_pool.max_connections > 500:
|
|
732
|
+
self.connection_pool.max_connections = int(
|
|
733
|
+
self.connection_pool.max_connections * 0.8
|
|
734
|
+
)
|
|
735
|
+
optimizations.append("Reduced connection pool size")
|
|
736
|
+
|
|
737
|
+
return optimizations
|
|
738
|
+
|
|
739
|
+
def _apply_performance_optimizations(
|
|
740
|
+
self, metrics: ResourceMetrics, strategy: str
|
|
741
|
+
) -> list[str]:
|
|
742
|
+
"""Apply performance-related optimizations."""
|
|
743
|
+
optimizations = []
|
|
744
|
+
|
|
745
|
+
if metrics.cpu_percent > 60 and strategy in ("performance", "balanced"):
|
|
746
|
+
if self.performance_profile.analysis_frequency_minutes > 2:
|
|
747
|
+
self.performance_profile.analysis_frequency_minutes = max(
|
|
748
|
+
1, self.performance_profile.analysis_frequency_minutes - 1
|
|
749
|
+
)
|
|
750
|
+
optimizations.append("Increased analysis frequency")
|
|
751
|
+
|
|
752
|
+
return optimizations
|
|
753
|
+
|
|
754
|
+
def _apply_storage_optimizations(self) -> list[str]:
|
|
755
|
+
"""Apply storage-related optimizations."""
|
|
756
|
+
optimizations = []
|
|
757
|
+
storage_usage = self.compaction_manager.get_storage_usage()
|
|
758
|
+
|
|
759
|
+
if storage_usage["utilization_percent"] > 70:
|
|
760
|
+
for data_type in ("metrics_raw", "error_patterns"):
|
|
761
|
+
result = self.compaction_manager.compact_data(data_type)
|
|
762
|
+
if result["status"] == "success":
|
|
763
|
+
optimizations.append(f"Compacted {data_type} data")
|
|
764
|
+
|
|
765
|
+
return optimizations
|
|
766
|
+
|
|
767
|
+
def _build_optimization_result(
|
|
768
|
+
self, strategy: str, optimizations_applied: list[str]
|
|
769
|
+
) -> dict[str, t.Any]:
|
|
770
|
+
"""Build the optimization result dictionary."""
|
|
771
|
+
return {
|
|
772
|
+
"status": "success",
|
|
773
|
+
"strategy": strategy,
|
|
774
|
+
"optimizations_applied": optimizations_applied,
|
|
775
|
+
"timestamp": datetime.now().isoformat(),
|
|
776
|
+
"next_optimization": (datetime.now() + timedelta(minutes=15)).isoformat(),
|
|
777
|
+
}
|
|
778
|
+
|
|
779
|
+
async def run_optimization_cycle(self) -> dict[str, t.Any]:
|
|
780
|
+
"""Run a complete optimization cycle."""
|
|
781
|
+
try:
|
|
782
|
+
# Collect metrics
|
|
783
|
+
metrics = self.collect_resource_metrics()
|
|
784
|
+
|
|
785
|
+
# Analyze scaling needs
|
|
786
|
+
scaling_metrics = self.analyze_scaling_needs()
|
|
787
|
+
|
|
788
|
+
# Generate recommendations
|
|
789
|
+
recommendations = self.generate_optimization_recommendations()
|
|
790
|
+
|
|
791
|
+
# Apply automatic optimizations if needed
|
|
792
|
+
optimization_result = None
|
|
793
|
+
if (
|
|
794
|
+
metrics.cpu_percent > 80
|
|
795
|
+
or metrics.memory_percent > 85
|
|
796
|
+
or scaling_metrics.current_load > 0.8
|
|
797
|
+
):
|
|
798
|
+
optimization_result = self.optimize_configuration()
|
|
799
|
+
|
|
800
|
+
return {
|
|
801
|
+
"status": "success",
|
|
802
|
+
"metrics": metrics.to_dict(),
|
|
803
|
+
"scaling_analysis": scaling_metrics.to_dict(),
|
|
804
|
+
"recommendations": [rec.to_dict() for rec in recommendations],
|
|
805
|
+
"automatic_optimization": optimization_result,
|
|
806
|
+
"connection_pool_stats": self.connection_pool.get_stats(),
|
|
807
|
+
"storage_usage": self.compaction_manager.get_storage_usage(),
|
|
808
|
+
"timestamp": datetime.now().isoformat(),
|
|
809
|
+
}
|
|
810
|
+
|
|
811
|
+
except Exception as e:
|
|
812
|
+
logger.error(f"Optimization cycle failed: {e}")
|
|
813
|
+
return {
|
|
814
|
+
"status": "error",
|
|
815
|
+
"message": str(e),
|
|
816
|
+
"timestamp": datetime.now().isoformat(),
|
|
817
|
+
}
|
|
818
|
+
|
|
819
|
+
def get_enterprise_status(self) -> dict[str, t.Any]:
|
|
820
|
+
"""Get comprehensive enterprise monitoring status."""
|
|
821
|
+
return {
|
|
822
|
+
"performance_profile": self.performance_profile.to_dict(),
|
|
823
|
+
"resource_metrics": self.resource_history[-1].to_dict()
|
|
824
|
+
if self.resource_history
|
|
825
|
+
else None,
|
|
826
|
+
"scaling_metrics": self.analyze_scaling_needs().to_dict(),
|
|
827
|
+
"active_recommendations": [
|
|
828
|
+
rec.to_dict() for rec in self.optimization_recommendations
|
|
829
|
+
],
|
|
830
|
+
"connection_pool": self.connection_pool.get_stats(),
|
|
831
|
+
"storage_usage": self.compaction_manager.get_storage_usage(),
|
|
832
|
+
"optimization_history": {
|
|
833
|
+
"last_optimization": self.last_optimization.isoformat(),
|
|
834
|
+
"total_optimizations": len(self.optimization_recommendations),
|
|
835
|
+
},
|
|
836
|
+
"health_score": self._calculate_health_score(),
|
|
837
|
+
}
|
|
838
|
+
|
|
839
|
+
def _calculate_health_score(self) -> float:
|
|
840
|
+
"""Calculate overall system health score (0.0 to 100.0)."""
|
|
841
|
+
if not self.resource_history:
|
|
842
|
+
return 50.0 # Unknown, assume average
|
|
843
|
+
|
|
844
|
+
latest = self.resource_history[-1]
|
|
845
|
+
storage = self.compaction_manager.get_storage_usage()
|
|
846
|
+
|
|
847
|
+
# Individual component scores
|
|
848
|
+
cpu_score = max(0, 100 - latest.cpu_percent)
|
|
849
|
+
memory_score = max(0, 100 - latest.memory_percent)
|
|
850
|
+
storage_score = max(0, 100 - storage["utilization_percent"])
|
|
851
|
+
|
|
852
|
+
# Connection efficiency
|
|
853
|
+
pool_stats = self.connection_pool.get_stats()
|
|
854
|
+
connection_score = max(0, 100 - pool_stats["utilization_percent"])
|
|
855
|
+
|
|
856
|
+
# Weighted average
|
|
857
|
+
health_score = (
|
|
858
|
+
cpu_score * 0.3
|
|
859
|
+
+ memory_score * 0.3
|
|
860
|
+
+ storage_score * 0.2
|
|
861
|
+
+ connection_score * 0.2
|
|
862
|
+
)
|
|
863
|
+
|
|
864
|
+
result: float = round(health_score, 1)
|
|
865
|
+
return result
|