crackerjack 0.37.9__py3-none-any.whl → 0.45.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- crackerjack/README.md +19 -0
- crackerjack/__init__.py +30 -1
- crackerjack/__main__.py +342 -1263
- crackerjack/adapters/README.md +18 -0
- crackerjack/adapters/__init__.py +27 -5
- crackerjack/adapters/_output_paths.py +167 -0
- crackerjack/adapters/_qa_adapter_base.py +309 -0
- crackerjack/adapters/_tool_adapter_base.py +706 -0
- crackerjack/adapters/ai/README.md +65 -0
- crackerjack/adapters/ai/__init__.py +5 -0
- crackerjack/adapters/ai/claude.py +853 -0
- crackerjack/adapters/complexity/README.md +53 -0
- crackerjack/adapters/complexity/__init__.py +10 -0
- crackerjack/adapters/complexity/complexipy.py +641 -0
- crackerjack/adapters/dependency/__init__.py +22 -0
- crackerjack/adapters/dependency/pip_audit.py +418 -0
- crackerjack/adapters/format/README.md +72 -0
- crackerjack/adapters/format/__init__.py +11 -0
- crackerjack/adapters/format/mdformat.py +313 -0
- crackerjack/adapters/format/ruff.py +516 -0
- crackerjack/adapters/lint/README.md +47 -0
- crackerjack/adapters/lint/__init__.py +11 -0
- crackerjack/adapters/lint/codespell.py +273 -0
- crackerjack/adapters/lsp/README.md +49 -0
- crackerjack/adapters/lsp/__init__.py +27 -0
- crackerjack/adapters/{rust_tool_manager.py → lsp/_manager.py} +3 -3
- crackerjack/adapters/{skylos_adapter.py → lsp/skylos.py} +59 -7
- crackerjack/adapters/{zuban_adapter.py → lsp/zuban.py} +3 -6
- crackerjack/adapters/refactor/README.md +59 -0
- crackerjack/adapters/refactor/__init__.py +12 -0
- crackerjack/adapters/refactor/creosote.py +318 -0
- crackerjack/adapters/refactor/refurb.py +406 -0
- crackerjack/adapters/refactor/skylos.py +494 -0
- crackerjack/adapters/sast/README.md +132 -0
- crackerjack/adapters/sast/__init__.py +32 -0
- crackerjack/adapters/sast/_base.py +201 -0
- crackerjack/adapters/sast/bandit.py +423 -0
- crackerjack/adapters/sast/pyscn.py +405 -0
- crackerjack/adapters/sast/semgrep.py +241 -0
- crackerjack/adapters/security/README.md +111 -0
- crackerjack/adapters/security/__init__.py +17 -0
- crackerjack/adapters/security/gitleaks.py +339 -0
- crackerjack/adapters/type/README.md +52 -0
- crackerjack/adapters/type/__init__.py +12 -0
- crackerjack/adapters/type/pyrefly.py +402 -0
- crackerjack/adapters/type/ty.py +402 -0
- crackerjack/adapters/type/zuban.py +522 -0
- crackerjack/adapters/utility/README.md +51 -0
- crackerjack/adapters/utility/__init__.py +10 -0
- crackerjack/adapters/utility/checks.py +884 -0
- crackerjack/agents/README.md +264 -0
- crackerjack/agents/__init__.py +40 -12
- crackerjack/agents/base.py +1 -0
- crackerjack/agents/claude_code_bridge.py +641 -0
- crackerjack/agents/coordinator.py +49 -53
- crackerjack/agents/dry_agent.py +187 -3
- crackerjack/agents/enhanced_coordinator.py +279 -0
- crackerjack/agents/enhanced_proactive_agent.py +185 -0
- crackerjack/agents/error_middleware.py +53 -0
- crackerjack/agents/formatting_agent.py +6 -8
- crackerjack/agents/helpers/__init__.py +9 -0
- crackerjack/agents/helpers/performance/__init__.py +22 -0
- crackerjack/agents/helpers/performance/performance_ast_analyzer.py +357 -0
- crackerjack/agents/helpers/performance/performance_pattern_detector.py +909 -0
- crackerjack/agents/helpers/performance/performance_recommender.py +572 -0
- crackerjack/agents/helpers/refactoring/__init__.py +22 -0
- crackerjack/agents/helpers/refactoring/code_transformer.py +536 -0
- crackerjack/agents/helpers/refactoring/complexity_analyzer.py +344 -0
- crackerjack/agents/helpers/refactoring/dead_code_detector.py +437 -0
- crackerjack/agents/helpers/test_creation/__init__.py +19 -0
- crackerjack/agents/helpers/test_creation/test_ast_analyzer.py +216 -0
- crackerjack/agents/helpers/test_creation/test_coverage_analyzer.py +643 -0
- crackerjack/agents/helpers/test_creation/test_template_generator.py +1031 -0
- crackerjack/agents/performance_agent.py +121 -1152
- crackerjack/agents/refactoring_agent.py +156 -655
- crackerjack/agents/semantic_agent.py +479 -0
- crackerjack/agents/semantic_helpers.py +356 -0
- crackerjack/agents/test_creation_agent.py +19 -1605
- crackerjack/api.py +5 -7
- crackerjack/cli/README.md +394 -0
- crackerjack/cli/__init__.py +1 -1
- crackerjack/cli/cache_handlers.py +23 -18
- crackerjack/cli/cache_handlers_enhanced.py +1 -4
- crackerjack/cli/facade.py +70 -8
- crackerjack/cli/formatting.py +13 -0
- crackerjack/cli/handlers/__init__.py +85 -0
- crackerjack/cli/handlers/advanced.py +103 -0
- crackerjack/cli/handlers/ai_features.py +62 -0
- crackerjack/cli/handlers/analytics.py +479 -0
- crackerjack/cli/handlers/changelog.py +271 -0
- crackerjack/cli/handlers/config_handlers.py +16 -0
- crackerjack/cli/handlers/coverage.py +84 -0
- crackerjack/cli/handlers/documentation.py +280 -0
- crackerjack/cli/handlers/main_handlers.py +497 -0
- crackerjack/cli/handlers/monitoring.py +371 -0
- crackerjack/cli/handlers.py +249 -49
- crackerjack/cli/interactive.py +8 -5
- crackerjack/cli/options.py +203 -110
- crackerjack/cli/semantic_handlers.py +292 -0
- crackerjack/cli/version.py +19 -0
- crackerjack/code_cleaner.py +60 -24
- crackerjack/config/README.md +472 -0
- crackerjack/config/__init__.py +256 -0
- crackerjack/config/global_lock_config.py +191 -54
- crackerjack/config/hooks.py +188 -16
- crackerjack/config/loader.py +239 -0
- crackerjack/config/settings.py +141 -0
- crackerjack/config/tool_commands.py +331 -0
- crackerjack/core/README.md +393 -0
- crackerjack/core/async_workflow_orchestrator.py +79 -53
- crackerjack/core/autofix_coordinator.py +22 -9
- crackerjack/core/container.py +10 -9
- crackerjack/core/enhanced_container.py +9 -9
- crackerjack/core/performance.py +1 -1
- crackerjack/core/performance_monitor.py +5 -3
- crackerjack/core/phase_coordinator.py +1018 -634
- crackerjack/core/proactive_workflow.py +3 -3
- crackerjack/core/retry.py +275 -0
- crackerjack/core/service_watchdog.py +167 -23
- crackerjack/core/session_coordinator.py +187 -382
- crackerjack/core/timeout_manager.py +161 -44
- crackerjack/core/workflow/__init__.py +21 -0
- crackerjack/core/workflow/workflow_ai_coordinator.py +863 -0
- crackerjack/core/workflow/workflow_event_orchestrator.py +1107 -0
- crackerjack/core/workflow/workflow_issue_parser.py +714 -0
- crackerjack/core/workflow/workflow_phase_executor.py +1158 -0
- crackerjack/core/workflow/workflow_security_gates.py +400 -0
- crackerjack/core/workflow_orchestrator.py +1247 -953
- crackerjack/data/README.md +11 -0
- crackerjack/data/__init__.py +8 -0
- crackerjack/data/models.py +79 -0
- crackerjack/data/repository.py +210 -0
- crackerjack/decorators/README.md +180 -0
- crackerjack/decorators/__init__.py +35 -0
- crackerjack/decorators/error_handling.py +649 -0
- crackerjack/decorators/error_handling_decorators.py +334 -0
- crackerjack/decorators/helpers.py +58 -0
- crackerjack/decorators/patterns.py +281 -0
- crackerjack/decorators/utils.py +58 -0
- crackerjack/docs/README.md +11 -0
- crackerjack/docs/generated/api/CLI_REFERENCE.md +1 -1
- crackerjack/documentation/README.md +11 -0
- crackerjack/documentation/ai_templates.py +1 -1
- crackerjack/documentation/dual_output_generator.py +11 -9
- crackerjack/documentation/reference_generator.py +104 -59
- crackerjack/dynamic_config.py +52 -61
- crackerjack/errors.py +1 -1
- crackerjack/events/README.md +11 -0
- crackerjack/events/__init__.py +16 -0
- crackerjack/events/telemetry.py +175 -0
- crackerjack/events/workflow_bus.py +346 -0
- crackerjack/exceptions/README.md +301 -0
- crackerjack/exceptions/__init__.py +5 -0
- crackerjack/exceptions/config.py +4 -0
- crackerjack/exceptions/tool_execution_error.py +245 -0
- crackerjack/executors/README.md +591 -0
- crackerjack/executors/__init__.py +2 -0
- crackerjack/executors/async_hook_executor.py +539 -77
- crackerjack/executors/cached_hook_executor.py +3 -3
- crackerjack/executors/hook_executor.py +967 -102
- crackerjack/executors/hook_lock_manager.py +31 -22
- crackerjack/executors/individual_hook_executor.py +66 -32
- crackerjack/executors/lsp_aware_hook_executor.py +136 -57
- crackerjack/executors/progress_hook_executor.py +282 -0
- crackerjack/executors/tool_proxy.py +23 -7
- crackerjack/hooks/README.md +485 -0
- crackerjack/hooks/lsp_hook.py +8 -9
- crackerjack/intelligence/README.md +557 -0
- crackerjack/interactive.py +37 -10
- crackerjack/managers/README.md +369 -0
- crackerjack/managers/async_hook_manager.py +41 -57
- crackerjack/managers/hook_manager.py +449 -79
- crackerjack/managers/publish_manager.py +81 -36
- crackerjack/managers/test_command_builder.py +290 -12
- crackerjack/managers/test_executor.py +93 -8
- crackerjack/managers/test_manager.py +1082 -75
- crackerjack/managers/test_progress.py +118 -26
- crackerjack/mcp/README.md +374 -0
- crackerjack/mcp/cache.py +25 -2
- crackerjack/mcp/client_runner.py +35 -18
- crackerjack/mcp/context.py +9 -9
- crackerjack/mcp/dashboard.py +24 -8
- crackerjack/mcp/enhanced_progress_monitor.py +34 -23
- crackerjack/mcp/file_monitor.py +27 -6
- crackerjack/mcp/progress_components.py +45 -34
- crackerjack/mcp/progress_monitor.py +6 -9
- crackerjack/mcp/rate_limiter.py +11 -7
- crackerjack/mcp/server.py +2 -0
- crackerjack/mcp/server_core.py +187 -55
- crackerjack/mcp/service_watchdog.py +12 -9
- crackerjack/mcp/task_manager.py +2 -2
- crackerjack/mcp/tools/README.md +27 -0
- crackerjack/mcp/tools/__init__.py +2 -0
- crackerjack/mcp/tools/core_tools.py +75 -52
- crackerjack/mcp/tools/execution_tools.py +87 -31
- crackerjack/mcp/tools/intelligence_tools.py +2 -2
- crackerjack/mcp/tools/proactive_tools.py +1 -1
- crackerjack/mcp/tools/semantic_tools.py +584 -0
- crackerjack/mcp/tools/utility_tools.py +180 -132
- crackerjack/mcp/tools/workflow_executor.py +87 -46
- crackerjack/mcp/websocket/README.md +31 -0
- crackerjack/mcp/websocket/app.py +11 -1
- crackerjack/mcp/websocket/event_bridge.py +188 -0
- crackerjack/mcp/websocket/jobs.py +27 -4
- crackerjack/mcp/websocket/monitoring/__init__.py +25 -0
- crackerjack/mcp/websocket/monitoring/api/__init__.py +19 -0
- crackerjack/mcp/websocket/monitoring/api/dependencies.py +141 -0
- crackerjack/mcp/websocket/monitoring/api/heatmap.py +154 -0
- crackerjack/mcp/websocket/monitoring/api/intelligence.py +199 -0
- crackerjack/mcp/websocket/monitoring/api/metrics.py +203 -0
- crackerjack/mcp/websocket/monitoring/api/telemetry.py +101 -0
- crackerjack/mcp/websocket/monitoring/dashboard.py +18 -0
- crackerjack/mcp/websocket/monitoring/factory.py +109 -0
- crackerjack/mcp/websocket/monitoring/filters.py +10 -0
- crackerjack/mcp/websocket/monitoring/metrics.py +64 -0
- crackerjack/mcp/websocket/monitoring/models.py +90 -0
- crackerjack/mcp/websocket/monitoring/utils.py +171 -0
- crackerjack/mcp/websocket/monitoring/websocket_manager.py +78 -0
- crackerjack/mcp/websocket/monitoring/websockets/__init__.py +17 -0
- crackerjack/mcp/websocket/monitoring/websockets/dependencies.py +126 -0
- crackerjack/mcp/websocket/monitoring/websockets/heatmap.py +176 -0
- crackerjack/mcp/websocket/monitoring/websockets/intelligence.py +291 -0
- crackerjack/mcp/websocket/monitoring/websockets/metrics.py +291 -0
- crackerjack/mcp/websocket/monitoring_endpoints.py +16 -2930
- crackerjack/mcp/websocket/server.py +1 -3
- crackerjack/mcp/websocket/websocket_handler.py +107 -6
- crackerjack/models/README.md +308 -0
- crackerjack/models/__init__.py +10 -1
- crackerjack/models/config.py +639 -22
- crackerjack/models/config_adapter.py +6 -6
- crackerjack/models/protocols.py +1167 -23
- crackerjack/models/pydantic_models.py +320 -0
- crackerjack/models/qa_config.py +145 -0
- crackerjack/models/qa_results.py +134 -0
- crackerjack/models/results.py +35 -0
- crackerjack/models/semantic_models.py +258 -0
- crackerjack/models/task.py +19 -3
- crackerjack/models/test_models.py +60 -0
- crackerjack/monitoring/README.md +11 -0
- crackerjack/monitoring/ai_agent_watchdog.py +5 -4
- crackerjack/monitoring/metrics_collector.py +4 -3
- crackerjack/monitoring/regression_prevention.py +4 -3
- crackerjack/monitoring/websocket_server.py +4 -241
- crackerjack/orchestration/README.md +340 -0
- crackerjack/orchestration/__init__.py +43 -0
- crackerjack/orchestration/advanced_orchestrator.py +20 -67
- crackerjack/orchestration/cache/README.md +312 -0
- crackerjack/orchestration/cache/__init__.py +37 -0
- crackerjack/orchestration/cache/memory_cache.py +338 -0
- crackerjack/orchestration/cache/tool_proxy_cache.py +340 -0
- crackerjack/orchestration/config.py +297 -0
- crackerjack/orchestration/coverage_improvement.py +13 -6
- crackerjack/orchestration/execution_strategies.py +6 -6
- crackerjack/orchestration/hook_orchestrator.py +1398 -0
- crackerjack/orchestration/strategies/README.md +401 -0
- crackerjack/orchestration/strategies/__init__.py +39 -0
- crackerjack/orchestration/strategies/adaptive_strategy.py +630 -0
- crackerjack/orchestration/strategies/parallel_strategy.py +237 -0
- crackerjack/orchestration/strategies/sequential_strategy.py +299 -0
- crackerjack/orchestration/test_progress_streamer.py +1 -1
- crackerjack/plugins/README.md +11 -0
- crackerjack/plugins/hooks.py +3 -2
- crackerjack/plugins/loader.py +3 -3
- crackerjack/plugins/managers.py +1 -1
- crackerjack/py313.py +191 -0
- crackerjack/security/README.md +11 -0
- crackerjack/services/README.md +374 -0
- crackerjack/services/__init__.py +8 -21
- crackerjack/services/ai/README.md +295 -0
- crackerjack/services/ai/__init__.py +7 -0
- crackerjack/services/ai/advanced_optimizer.py +878 -0
- crackerjack/services/{contextual_ai_assistant.py → ai/contextual_ai_assistant.py} +5 -3
- crackerjack/services/ai/embeddings.py +444 -0
- crackerjack/services/ai/intelligent_commit.py +328 -0
- crackerjack/services/ai/predictive_analytics.py +510 -0
- crackerjack/services/api_extractor.py +5 -3
- crackerjack/services/bounded_status_operations.py +45 -5
- crackerjack/services/cache.py +249 -318
- crackerjack/services/changelog_automation.py +7 -3
- crackerjack/services/command_execution_service.py +305 -0
- crackerjack/services/config_integrity.py +83 -39
- crackerjack/services/config_merge.py +9 -6
- crackerjack/services/config_service.py +198 -0
- crackerjack/services/config_template.py +13 -26
- crackerjack/services/coverage_badge_service.py +6 -4
- crackerjack/services/coverage_ratchet.py +53 -27
- crackerjack/services/debug.py +18 -7
- crackerjack/services/dependency_analyzer.py +4 -4
- crackerjack/services/dependency_monitor.py +13 -13
- crackerjack/services/documentation_generator.py +4 -2
- crackerjack/services/documentation_service.py +62 -33
- crackerjack/services/enhanced_filesystem.py +81 -27
- crackerjack/services/enterprise_optimizer.py +1 -1
- crackerjack/services/error_pattern_analyzer.py +10 -10
- crackerjack/services/file_filter.py +221 -0
- crackerjack/services/file_hasher.py +5 -7
- crackerjack/services/file_io_service.py +361 -0
- crackerjack/services/file_modifier.py +615 -0
- crackerjack/services/filesystem.py +80 -109
- crackerjack/services/git.py +99 -5
- crackerjack/services/health_metrics.py +4 -6
- crackerjack/services/heatmap_generator.py +12 -3
- crackerjack/services/incremental_executor.py +380 -0
- crackerjack/services/initialization.py +101 -49
- crackerjack/services/log_manager.py +2 -2
- crackerjack/services/logging.py +120 -68
- crackerjack/services/lsp_client.py +12 -12
- crackerjack/services/memory_optimizer.py +27 -22
- crackerjack/services/monitoring/README.md +30 -0
- crackerjack/services/monitoring/__init__.py +9 -0
- crackerjack/services/monitoring/dependency_monitor.py +678 -0
- crackerjack/services/monitoring/error_pattern_analyzer.py +676 -0
- crackerjack/services/monitoring/health_metrics.py +716 -0
- crackerjack/services/monitoring/metrics.py +587 -0
- crackerjack/services/{performance_benchmarks.py → monitoring/performance_benchmarks.py} +100 -14
- crackerjack/services/{performance_cache.py → monitoring/performance_cache.py} +21 -15
- crackerjack/services/{performance_monitor.py → monitoring/performance_monitor.py} +10 -6
- crackerjack/services/parallel_executor.py +166 -55
- crackerjack/services/patterns/__init__.py +142 -0
- crackerjack/services/patterns/agents.py +107 -0
- crackerjack/services/patterns/code/__init__.py +15 -0
- crackerjack/services/patterns/code/detection.py +118 -0
- crackerjack/services/patterns/code/imports.py +107 -0
- crackerjack/services/patterns/code/paths.py +159 -0
- crackerjack/services/patterns/code/performance.py +119 -0
- crackerjack/services/patterns/code/replacement.py +36 -0
- crackerjack/services/patterns/core.py +212 -0
- crackerjack/services/patterns/documentation/__init__.py +14 -0
- crackerjack/services/patterns/documentation/badges_markdown.py +96 -0
- crackerjack/services/patterns/documentation/comments_blocks.py +83 -0
- crackerjack/services/patterns/documentation/docstrings.py +89 -0
- crackerjack/services/patterns/formatting.py +226 -0
- crackerjack/services/patterns/operations.py +339 -0
- crackerjack/services/patterns/security/__init__.py +23 -0
- crackerjack/services/patterns/security/code_injection.py +122 -0
- crackerjack/services/patterns/security/credentials.py +190 -0
- crackerjack/services/patterns/security/path_traversal.py +221 -0
- crackerjack/services/patterns/security/unsafe_operations.py +216 -0
- crackerjack/services/patterns/templates.py +62 -0
- crackerjack/services/patterns/testing/__init__.py +18 -0
- crackerjack/services/patterns/testing/error_patterns.py +107 -0
- crackerjack/services/patterns/testing/pytest_output.py +126 -0
- crackerjack/services/patterns/tool_output/__init__.py +16 -0
- crackerjack/services/patterns/tool_output/bandit.py +72 -0
- crackerjack/services/patterns/tool_output/other.py +97 -0
- crackerjack/services/patterns/tool_output/pyright.py +67 -0
- crackerjack/services/patterns/tool_output/ruff.py +44 -0
- crackerjack/services/patterns/url_sanitization.py +114 -0
- crackerjack/services/patterns/utilities.py +42 -0
- crackerjack/services/patterns/utils.py +339 -0
- crackerjack/services/patterns/validation.py +46 -0
- crackerjack/services/patterns/versioning.py +62 -0
- crackerjack/services/predictive_analytics.py +21 -8
- crackerjack/services/profiler.py +280 -0
- crackerjack/services/quality/README.md +415 -0
- crackerjack/services/quality/__init__.py +11 -0
- crackerjack/services/quality/anomaly_detector.py +392 -0
- crackerjack/services/quality/pattern_cache.py +333 -0
- crackerjack/services/quality/pattern_detector.py +479 -0
- crackerjack/services/quality/qa_orchestrator.py +491 -0
- crackerjack/services/{quality_baseline.py → quality/quality_baseline.py} +163 -2
- crackerjack/services/{quality_baseline_enhanced.py → quality/quality_baseline_enhanced.py} +4 -1
- crackerjack/services/{quality_intelligence.py → quality/quality_intelligence.py} +180 -16
- crackerjack/services/regex_patterns.py +58 -2987
- crackerjack/services/regex_utils.py +55 -29
- crackerjack/services/secure_status_formatter.py +42 -15
- crackerjack/services/secure_subprocess.py +35 -2
- crackerjack/services/security.py +16 -8
- crackerjack/services/server_manager.py +40 -51
- crackerjack/services/smart_scheduling.py +46 -6
- crackerjack/services/status_authentication.py +3 -3
- crackerjack/services/thread_safe_status_collector.py +1 -0
- crackerjack/services/tool_filter.py +368 -0
- crackerjack/services/tool_version_service.py +9 -5
- crackerjack/services/unified_config.py +43 -351
- crackerjack/services/vector_store.py +689 -0
- crackerjack/services/version_analyzer.py +6 -4
- crackerjack/services/version_checker.py +14 -8
- crackerjack/services/zuban_lsp_service.py +5 -4
- crackerjack/slash_commands/README.md +11 -0
- crackerjack/slash_commands/init.md +2 -12
- crackerjack/slash_commands/run.md +84 -50
- crackerjack/tools/README.md +11 -0
- crackerjack/tools/__init__.py +30 -0
- crackerjack/tools/_git_utils.py +105 -0
- crackerjack/tools/check_added_large_files.py +139 -0
- crackerjack/tools/check_ast.py +105 -0
- crackerjack/tools/check_json.py +103 -0
- crackerjack/tools/check_jsonschema.py +297 -0
- crackerjack/tools/check_toml.py +103 -0
- crackerjack/tools/check_yaml.py +110 -0
- crackerjack/tools/codespell_wrapper.py +72 -0
- crackerjack/tools/end_of_file_fixer.py +202 -0
- crackerjack/tools/format_json.py +128 -0
- crackerjack/tools/mdformat_wrapper.py +114 -0
- crackerjack/tools/trailing_whitespace.py +198 -0
- crackerjack/tools/validate_regex_patterns.py +7 -3
- crackerjack/ui/README.md +11 -0
- crackerjack/ui/dashboard_renderer.py +28 -0
- crackerjack/ui/templates/README.md +11 -0
- crackerjack/utils/console_utils.py +13 -0
- crackerjack/utils/dependency_guard.py +230 -0
- crackerjack/utils/retry_utils.py +275 -0
- crackerjack/workflows/README.md +590 -0
- crackerjack/workflows/__init__.py +46 -0
- crackerjack/workflows/actions.py +811 -0
- crackerjack/workflows/auto_fix.py +444 -0
- crackerjack/workflows/container_builder.py +499 -0
- crackerjack/workflows/definitions.py +443 -0
- crackerjack/workflows/engine.py +177 -0
- crackerjack/workflows/event_bridge.py +242 -0
- {crackerjack-0.37.9.dist-info → crackerjack-0.45.2.dist-info}/METADATA +678 -98
- crackerjack-0.45.2.dist-info/RECORD +478 -0
- {crackerjack-0.37.9.dist-info → crackerjack-0.45.2.dist-info}/WHEEL +1 -1
- crackerjack/managers/test_manager_backup.py +0 -1075
- crackerjack/mcp/tools/execution_tools_backup.py +0 -1011
- crackerjack/mixins/__init__.py +0 -3
- crackerjack/mixins/error_handling.py +0 -145
- crackerjack/services/config.py +0 -358
- crackerjack/ui/server_panels.py +0 -125
- crackerjack-0.37.9.dist-info/RECORD +0 -231
- /crackerjack/adapters/{rust_tool_adapter.py → lsp/_base.py} +0 -0
- /crackerjack/adapters/{lsp_client.py → lsp/_client.py} +0 -0
- {crackerjack-0.37.9.dist-info → crackerjack-0.45.2.dist-info}/entry_points.txt +0 -0
- {crackerjack-0.37.9.dist-info → crackerjack-0.45.2.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,12 +1,7 @@
|
|
|
1
|
-
import ast
|
|
2
|
-
import operator
|
|
3
1
|
import time
|
|
4
2
|
import typing as t
|
|
5
|
-
from contextlib import suppress
|
|
6
3
|
from pathlib import Path
|
|
7
4
|
|
|
8
|
-
from ..services.regex_patterns import SAFE_PATTERNS
|
|
9
|
-
from . import performance_helpers
|
|
10
5
|
from .base import (
|
|
11
6
|
AgentContext,
|
|
12
7
|
FixResult,
|
|
@@ -15,20 +10,33 @@ from .base import (
|
|
|
15
10
|
SubAgent,
|
|
16
11
|
agent_registry,
|
|
17
12
|
)
|
|
18
|
-
from .
|
|
13
|
+
from .helpers.performance.performance_ast_analyzer import PerformanceASTAnalyzer
|
|
14
|
+
from .helpers.performance.performance_pattern_detector import PerformancePatternDetector
|
|
15
|
+
from .helpers.performance.performance_recommender import PerformanceRecommender
|
|
16
|
+
from .semantic_helpers import (
|
|
17
|
+
SemanticInsight,
|
|
18
|
+
create_semantic_enhancer,
|
|
19
|
+
get_session_enhanced_recommendations,
|
|
20
|
+
)
|
|
19
21
|
|
|
20
22
|
|
|
21
23
|
class PerformanceAgent(SubAgent):
|
|
24
|
+
"""Agent for detecting and fixing performance issues.
|
|
25
|
+
|
|
26
|
+
Enhanced with semantic context to detect performance patterns across
|
|
27
|
+
the codebase and find similar bottlenecks that may not be immediately visible.
|
|
28
|
+
"""
|
|
29
|
+
|
|
22
30
|
def __init__(self, context: AgentContext) -> None:
|
|
23
31
|
super().__init__(context)
|
|
32
|
+
self.semantic_enhancer = create_semantic_enhancer(context.project_path)
|
|
33
|
+
self.semantic_insights: dict[str, SemanticInsight] = {}
|
|
24
34
|
self.performance_metrics: dict[str, t.Any] = {}
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
"comprehensions_applied": 0,
|
|
31
|
-
}
|
|
35
|
+
|
|
36
|
+
# Initialize helper modules
|
|
37
|
+
self._pattern_detector = PerformancePatternDetector(context)
|
|
38
|
+
self._ast_analyzer = PerformanceASTAnalyzer(context)
|
|
39
|
+
self._recommender = PerformanceRecommender(context)
|
|
32
40
|
|
|
33
41
|
def get_supported_types(self) -> set[IssueType]:
|
|
34
42
|
return {IssueType.PERFORMANCE}
|
|
@@ -118,7 +126,16 @@ class PerformanceAgent(SubAgent):
|
|
|
118
126
|
remaining_issues=[f"Could not read file: {file_path}"],
|
|
119
127
|
)
|
|
120
128
|
|
|
121
|
-
|
|
129
|
+
# Detect traditional performance issues using helper
|
|
130
|
+
performance_issues = self._pattern_detector.detect_performance_issues(
|
|
131
|
+
content, file_path
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
# Enhance with semantic performance pattern detection
|
|
135
|
+
semantic_issues = await self._detect_semantic_performance_issues(
|
|
136
|
+
content, file_path
|
|
137
|
+
)
|
|
138
|
+
performance_issues.extend(semantic_issues)
|
|
122
139
|
|
|
123
140
|
if not performance_issues:
|
|
124
141
|
return FixResult(
|
|
@@ -127,19 +144,22 @@ class PerformanceAgent(SubAgent):
|
|
|
127
144
|
recommendations=["No performance issues detected"],
|
|
128
145
|
)
|
|
129
146
|
|
|
130
|
-
return self._apply_and_save_optimizations(
|
|
147
|
+
return await self._apply_and_save_optimizations(
|
|
131
148
|
file_path,
|
|
132
149
|
content,
|
|
133
150
|
performance_issues,
|
|
134
151
|
)
|
|
135
152
|
|
|
136
|
-
def _apply_and_save_optimizations(
|
|
153
|
+
async def _apply_and_save_optimizations(
|
|
137
154
|
self,
|
|
138
155
|
file_path: Path,
|
|
139
156
|
content: str,
|
|
140
157
|
issues: list[dict[str, t.Any]],
|
|
141
158
|
) -> FixResult:
|
|
142
|
-
|
|
159
|
+
# Delegate to recommender helper
|
|
160
|
+
optimized_content = self._recommender.apply_performance_optimizations(
|
|
161
|
+
content, issues
|
|
162
|
+
)
|
|
143
163
|
|
|
144
164
|
if optimized_content == content:
|
|
145
165
|
return self._create_no_optimization_result()
|
|
@@ -152,15 +172,19 @@ class PerformanceAgent(SubAgent):
|
|
|
152
172
|
remaining_issues=[f"Failed to write optimized file: {file_path}"],
|
|
153
173
|
)
|
|
154
174
|
|
|
175
|
+
# Get summary from recommender
|
|
176
|
+
stats_summary = self._recommender.generate_optimization_summary()
|
|
177
|
+
|
|
155
178
|
return FixResult(
|
|
156
179
|
success=True,
|
|
157
180
|
confidence=0.8,
|
|
158
181
|
fixes_applied=[
|
|
159
182
|
f"Optimized {len(issues)} performance issues",
|
|
160
183
|
"Applied algorithmic improvements",
|
|
184
|
+
stats_summary,
|
|
161
185
|
],
|
|
162
186
|
files_modified=[str(file_path)],
|
|
163
|
-
recommendations=
|
|
187
|
+
recommendations=await self._generate_enhanced_recommendations(issues),
|
|
164
188
|
)
|
|
165
189
|
|
|
166
190
|
@staticmethod
|
|
@@ -185,1172 +209,117 @@ class PerformanceAgent(SubAgent):
|
|
|
185
209
|
remaining_issues=[f"Error processing file: {error}"],
|
|
186
210
|
)
|
|
187
211
|
|
|
188
|
-
def
|
|
189
|
-
self,
|
|
190
|
-
content: str,
|
|
191
|
-
file_path: Path,
|
|
192
|
-
) -> list[dict[str, t.Any]]:
|
|
193
|
-
issues: list[dict[str, t.Any]] = []
|
|
194
|
-
|
|
195
|
-
with suppress(SyntaxError):
|
|
196
|
-
tree = ast.parse(content)
|
|
197
|
-
|
|
198
|
-
nested_issues = self._detect_nested_loops_enhanced(tree)
|
|
199
|
-
issues.extend(nested_issues)
|
|
200
|
-
|
|
201
|
-
list_issues = self._detect_inefficient_list_ops_enhanced(content, tree)
|
|
202
|
-
issues.extend(list_issues)
|
|
203
|
-
|
|
204
|
-
repeated_issues = self._detect_repeated_operations_enhanced(content, tree)
|
|
205
|
-
issues.extend(repeated_issues)
|
|
206
|
-
|
|
207
|
-
string_issues = self._detect_string_inefficiencies_enhanced(content)
|
|
208
|
-
issues.extend(string_issues)
|
|
209
|
-
|
|
210
|
-
comprehension_issues = self._detect_list_comprehension_opportunities(tree)
|
|
211
|
-
issues.extend(comprehension_issues)
|
|
212
|
-
|
|
213
|
-
builtin_issues = self._detect_inefficient_builtin_usage(tree, content)
|
|
214
|
-
issues.extend(builtin_issues)
|
|
215
|
-
|
|
216
|
-
return issues
|
|
217
|
-
|
|
218
|
-
def _detect_nested_loops_enhanced(self, tree: ast.AST) -> list[dict[str, t.Any]]:
|
|
219
|
-
analyzer = self._create_nested_loop_analyzer()
|
|
220
|
-
analyzer.visit(tree)
|
|
221
|
-
return self._build_nested_loop_issues(analyzer)
|
|
222
|
-
|
|
223
|
-
@staticmethod
|
|
224
|
-
def _create_nested_loop_analyzer() -> (
|
|
225
|
-
performance_helpers.EnhancedNestedLoopAnalyzer
|
|
226
|
-
):
|
|
227
|
-
return performance_helpers.EnhancedNestedLoopAnalyzer()
|
|
228
|
-
|
|
229
|
-
def _build_nested_loop_issues(
|
|
230
|
-
self, analyzer: performance_helpers.EnhancedNestedLoopAnalyzer
|
|
212
|
+
async def _detect_semantic_performance_issues(
|
|
213
|
+
self, content: str, file_path: Path
|
|
231
214
|
) -> list[dict[str, t.Any]]:
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
return [
|
|
236
|
-
{
|
|
237
|
-
"type": "nested_loops_enhanced",
|
|
238
|
-
"instances": analyzer.nested_loops,
|
|
239
|
-
"hotspots": analyzer.complexity_hotspots,
|
|
240
|
-
"total_count": len(analyzer.nested_loops),
|
|
241
|
-
"high_priority_count": self._count_high_priority_loops(
|
|
242
|
-
analyzer.nested_loops
|
|
243
|
-
),
|
|
244
|
-
"suggestion": self._generate_nested_loop_suggestions(
|
|
245
|
-
analyzer.nested_loops
|
|
246
|
-
),
|
|
247
|
-
}
|
|
248
|
-
]
|
|
249
|
-
|
|
250
|
-
@staticmethod
|
|
251
|
-
def _count_high_priority_loops(nested_loops: list[dict[str, t.Any]]) -> int:
|
|
252
|
-
return len([n for n in nested_loops if n["priority"] in ("high", "critical")])
|
|
253
|
-
|
|
254
|
-
@staticmethod
|
|
255
|
-
def _generate_nested_loop_suggestions(nested_loops: list[dict[str, t.Any]]) -> str:
|
|
256
|
-
suggestions = []
|
|
257
|
-
|
|
258
|
-
critical_count = len(
|
|
259
|
-
[n for n in nested_loops if n.get("priority") == "critical"]
|
|
260
|
-
)
|
|
261
|
-
high_count = len([n for n in nested_loops if n.get("priority") == "high"])
|
|
262
|
-
|
|
263
|
-
if critical_count > 0:
|
|
264
|
-
suggestions.append(
|
|
265
|
-
f"CRITICAL: {critical_count} O(n⁴+) loops need immediate algorithmic redesign"
|
|
266
|
-
)
|
|
267
|
-
if high_count > 0:
|
|
268
|
-
suggestions.append(
|
|
269
|
-
f"HIGH: {high_count} O(n³) loops should use memoization/caching"
|
|
270
|
-
)
|
|
271
|
-
|
|
272
|
-
suggestions.extend(
|
|
273
|
-
[
|
|
274
|
-
"Consider: 1) Hash tables for lookups 2) List comprehensions 3) NumPy for numerical operations",
|
|
275
|
-
"Profile: Use timeit or cProfile to measure actual performance impact",
|
|
276
|
-
]
|
|
277
|
-
)
|
|
278
|
-
|
|
279
|
-
return "; ".join(suggestions)
|
|
280
|
-
|
|
281
|
-
def _detect_inefficient_list_ops_enhanced(
|
|
282
|
-
self,
|
|
283
|
-
content: str,
|
|
284
|
-
tree: ast.AST,
|
|
285
|
-
) -> list[dict[str, t.Any]]:
|
|
286
|
-
analyzer = self._create_enhanced_list_op_analyzer()
|
|
287
|
-
analyzer.visit(tree)
|
|
288
|
-
|
|
289
|
-
if not analyzer.list_ops:
|
|
290
|
-
return []
|
|
291
|
-
|
|
292
|
-
return self._build_list_ops_issues(analyzer)
|
|
293
|
-
|
|
294
|
-
@staticmethod
|
|
295
|
-
def _create_enhanced_list_op_analyzer() -> t.Any:
|
|
296
|
-
return performance_helpers.EnhancedListOpAnalyzer()
|
|
297
|
-
|
|
298
|
-
def _build_list_ops_issues(self, analyzer: t.Any) -> list[dict[str, t.Any]]:
|
|
299
|
-
total_impact = sum(int(op["impact_factor"]) for op in analyzer.list_ops)
|
|
300
|
-
high_impact_ops = [
|
|
301
|
-
op for op in analyzer.list_ops if int(op["impact_factor"]) >= 10
|
|
302
|
-
]
|
|
303
|
-
|
|
304
|
-
return [
|
|
305
|
-
{
|
|
306
|
-
"type": "inefficient_list_operations_enhanced",
|
|
307
|
-
"instances": analyzer.list_ops,
|
|
308
|
-
"total_impact": total_impact,
|
|
309
|
-
"high_impact_count": len(high_impact_ops),
|
|
310
|
-
"suggestion": self._generate_list_op_suggestions(analyzer.list_ops),
|
|
311
|
-
}
|
|
312
|
-
]
|
|
313
|
-
|
|
314
|
-
@staticmethod
|
|
315
|
-
def _generate_list_op_suggestions(list_ops: list[dict[str, t.Any]]) -> str:
|
|
316
|
-
suggestions = []
|
|
317
|
-
|
|
318
|
-
high_impact_count = len(
|
|
319
|
-
[op for op in list_ops if int(op["impact_factor"]) >= 10]
|
|
320
|
-
)
|
|
321
|
-
if high_impact_count > 0:
|
|
322
|
-
suggestions.append(
|
|
323
|
-
f"HIGH IMPACT: {high_impact_count} list[t.Any] operations in hot loops"
|
|
324
|
-
)
|
|
325
|
-
|
|
326
|
-
append_count = len([op for op in list_ops if op["optimization"] == "append"])
|
|
327
|
-
extend_count = len([op for op in list_ops if op["optimization"] == "extend"])
|
|
328
|
-
|
|
329
|
-
if append_count > 0:
|
|
330
|
-
suggestions.append(f"Replace {append_count} += [item] with .append(item)")
|
|
331
|
-
if extend_count > 0:
|
|
332
|
-
suggestions.append(
|
|
333
|
-
f"Replace {extend_count} += multiple_items with .extend()"
|
|
334
|
-
)
|
|
335
|
-
|
|
336
|
-
suggestions.append(
|
|
337
|
-
"Expected performance gains: 2-50x depending on loop context"
|
|
338
|
-
)
|
|
339
|
-
|
|
340
|
-
return "; ".join(suggestions)
|
|
341
|
-
|
|
342
|
-
def _detect_repeated_operations_enhanced(
|
|
343
|
-
self,
|
|
344
|
-
content: str,
|
|
345
|
-
tree: ast.AST,
|
|
346
|
-
) -> list[dict[str, t.Any]]:
|
|
347
|
-
lines = content.split("\n")
|
|
348
|
-
repeated_calls = self._find_expensive_operations_in_loops(lines)
|
|
349
|
-
|
|
350
|
-
return self._create_repeated_operations_issues(repeated_calls)
|
|
351
|
-
|
|
352
|
-
def _find_expensive_operations_in_loops(
|
|
353
|
-
self,
|
|
354
|
-
lines: list[str],
|
|
355
|
-
) -> list[dict[str, t.Any]]:
|
|
356
|
-
repeated_calls: list[dict[str, t.Any]] = []
|
|
357
|
-
expensive_patterns = self._get_expensive_operation_patterns()
|
|
358
|
-
|
|
359
|
-
for i, line in enumerate(lines):
|
|
360
|
-
stripped = line.strip()
|
|
361
|
-
if self._contains_expensive_operation(stripped, expensive_patterns):
|
|
362
|
-
if self._is_in_loop_context(lines, i):
|
|
363
|
-
repeated_calls.append(self._create_operation_record(i, stripped))
|
|
364
|
-
|
|
365
|
-
return repeated_calls
|
|
366
|
-
|
|
367
|
-
@staticmethod
|
|
368
|
-
def _get_expensive_operation_patterns() -> tuple[str, ...]:
|
|
369
|
-
return (
|
|
370
|
-
".exists()",
|
|
371
|
-
".read_text()",
|
|
372
|
-
".glob(",
|
|
373
|
-
".rglob(",
|
|
374
|
-
"Path(",
|
|
375
|
-
"len(",
|
|
376
|
-
".get(",
|
|
377
|
-
)
|
|
215
|
+
"""Detect performance issues using semantic analysis of similar code patterns."""
|
|
216
|
+
issues = []
|
|
378
217
|
|
|
379
|
-
@staticmethod
|
|
380
|
-
def _contains_expensive_operation(
|
|
381
|
-
line: str,
|
|
382
|
-
patterns: tuple[str, ...],
|
|
383
|
-
) -> bool:
|
|
384
|
-
return any(pattern in line for pattern in patterns)
|
|
385
|
-
|
|
386
|
-
@staticmethod
|
|
387
|
-
def _is_in_loop_context(lines: list[str], line_index: int) -> bool:
|
|
388
|
-
context_start = max(0, line_index - 5)
|
|
389
|
-
context_lines = lines[context_start : line_index + 1]
|
|
390
|
-
|
|
391
|
-
loop_keywords = ("for ", "while ")
|
|
392
|
-
return any(
|
|
393
|
-
any(keyword in ctx_line for keyword in loop_keywords)
|
|
394
|
-
for ctx_line in context_lines
|
|
395
|
-
)
|
|
396
|
-
|
|
397
|
-
@staticmethod
|
|
398
|
-
def _create_operation_record(
|
|
399
|
-
line_index: int,
|
|
400
|
-
content: str,
|
|
401
|
-
) -> dict[str, t.Any]:
|
|
402
|
-
return {
|
|
403
|
-
"line_number": line_index + 1,
|
|
404
|
-
"content": content,
|
|
405
|
-
"type": "expensive_operation_in_loop",
|
|
406
|
-
}
|
|
407
|
-
|
|
408
|
-
@staticmethod
|
|
409
|
-
def _create_repeated_operations_issues(
|
|
410
|
-
repeated_calls: list[dict[str, t.Any]],
|
|
411
|
-
) -> list[dict[str, t.Any]]:
|
|
412
|
-
if len(repeated_calls) >= 2:
|
|
413
|
-
return [
|
|
414
|
-
{
|
|
415
|
-
"type": "repeated_expensive_operations",
|
|
416
|
-
"instances": repeated_calls,
|
|
417
|
-
"suggestion": "Cache expensive operations outside loops",
|
|
418
|
-
},
|
|
419
|
-
]
|
|
420
|
-
return []
|
|
421
|
-
|
|
422
|
-
@staticmethod
|
|
423
|
-
def _detect_string_inefficiencies(content: str) -> list[dict[str, t.Any]]:
|
|
424
|
-
issues: list[dict[str, t.Any]] = []
|
|
425
|
-
lines = content.split("\n")
|
|
426
|
-
|
|
427
|
-
string_concat_in_loop: list[dict[str, t.Any]] = []
|
|
428
|
-
|
|
429
|
-
for i, line in enumerate(lines):
|
|
430
|
-
stripped = line.strip()
|
|
431
|
-
if "+=" in stripped and any(quote in stripped for quote in ('"', "'")):
|
|
432
|
-
context_start = max(0, i - 5)
|
|
433
|
-
context_lines = lines[context_start : i + 1]
|
|
434
|
-
|
|
435
|
-
loop_keywords = ("for ", "while ")
|
|
436
|
-
if any(
|
|
437
|
-
any(keyword in ctx_line for keyword in loop_keywords)
|
|
438
|
-
for ctx_line in context_lines
|
|
439
|
-
):
|
|
440
|
-
string_concat_in_loop.append(
|
|
441
|
-
{
|
|
442
|
-
"line_number": i + 1,
|
|
443
|
-
"content": stripped,
|
|
444
|
-
},
|
|
445
|
-
)
|
|
446
|
-
|
|
447
|
-
if len(string_concat_in_loop) >= 2:
|
|
448
|
-
issues.append(
|
|
449
|
-
{
|
|
450
|
-
"type": "string_concatenation_in_loop",
|
|
451
|
-
"instances": string_concat_in_loop,
|
|
452
|
-
"suggestion": 'Use list[t.Any].append() and "".join() for string building',
|
|
453
|
-
},
|
|
454
|
-
)
|
|
455
|
-
|
|
456
|
-
return issues
|
|
457
|
-
|
|
458
|
-
def _detect_string_inefficiencies_enhanced(
|
|
459
|
-
self, content: str
|
|
460
|
-
) -> list[dict[str, t.Any]]:
|
|
461
|
-
issues: list[dict[str, t.Any]] = []
|
|
462
|
-
lines = content.split("\n")
|
|
463
|
-
|
|
464
|
-
string_concat_patterns = []
|
|
465
|
-
inefficient_joins = []
|
|
466
|
-
repeated_format_calls = []
|
|
467
|
-
|
|
468
|
-
for i, line in enumerate(lines):
|
|
469
|
-
stripped = line.strip()
|
|
470
|
-
|
|
471
|
-
if "+=" in stripped and any(quote in stripped for quote in ('"', "'")):
|
|
472
|
-
if self._is_in_loop_context_enhanced(lines, i):
|
|
473
|
-
context_info = self._analyze_string_context(lines, i)
|
|
474
|
-
string_concat_patterns.append(
|
|
475
|
-
{
|
|
476
|
-
"line_number": i + 1,
|
|
477
|
-
"content": stripped,
|
|
478
|
-
"context": context_info,
|
|
479
|
-
"estimated_impact": int(
|
|
480
|
-
context_info.get("impact_factor", "1")
|
|
481
|
-
),
|
|
482
|
-
}
|
|
483
|
-
)
|
|
484
|
-
|
|
485
|
-
if ".join([])" in stripped:
|
|
486
|
-
inefficient_joins.append(
|
|
487
|
-
{
|
|
488
|
-
"line_number": i + 1,
|
|
489
|
-
"content": stripped,
|
|
490
|
-
"optimization": "Use empty string literal instead",
|
|
491
|
-
"performance_gain": "2x",
|
|
492
|
-
}
|
|
493
|
-
)
|
|
494
|
-
|
|
495
|
-
if any(pattern in stripped for pattern in ('f"', ".format(", "% ")):
|
|
496
|
-
if self._is_in_loop_context_enhanced(lines, i):
|
|
497
|
-
repeated_format_calls.append(
|
|
498
|
-
{
|
|
499
|
-
"line_number": i + 1,
|
|
500
|
-
"content": stripped,
|
|
501
|
-
"optimization": "Move formatting outside loop if static",
|
|
502
|
-
}
|
|
503
|
-
)
|
|
504
|
-
|
|
505
|
-
total_issues = (
|
|
506
|
-
len(string_concat_patterns)
|
|
507
|
-
+ len(inefficient_joins)
|
|
508
|
-
+ len(repeated_format_calls)
|
|
509
|
-
)
|
|
510
|
-
|
|
511
|
-
if total_issues > 0:
|
|
512
|
-
issues.append(
|
|
513
|
-
{
|
|
514
|
-
"type": "string_inefficiencies_enhanced",
|
|
515
|
-
"string_concat_patterns": string_concat_patterns,
|
|
516
|
-
"inefficient_joins": inefficient_joins,
|
|
517
|
-
"repeated_formatting": repeated_format_calls,
|
|
518
|
-
"total_count": total_issues,
|
|
519
|
-
"suggestion": self._generate_string_suggestions(
|
|
520
|
-
string_concat_patterns, inefficient_joins, repeated_format_calls
|
|
521
|
-
),
|
|
522
|
-
}
|
|
523
|
-
)
|
|
524
|
-
|
|
525
|
-
return issues
|
|
526
|
-
|
|
527
|
-
def _analyze_string_context(
|
|
528
|
-
self, lines: list[str], line_idx: int
|
|
529
|
-
) -> dict[str, t.Any]:
|
|
530
|
-
context = self._create_default_string_context()
|
|
531
|
-
loop_context = self._find_loop_context_in_lines(lines, line_idx)
|
|
532
|
-
|
|
533
|
-
if loop_context:
|
|
534
|
-
context.update(loop_context)
|
|
535
|
-
|
|
536
|
-
return context
|
|
537
|
-
|
|
538
|
-
@staticmethod
|
|
539
|
-
def _create_default_string_context() -> dict[str, t.Any]:
|
|
540
|
-
return {
|
|
541
|
-
"loop_type": "unknown",
|
|
542
|
-
"loop_depth": 1,
|
|
543
|
-
"impact_factor": "1",
|
|
544
|
-
}
|
|
545
|
-
|
|
546
|
-
def _find_loop_context_in_lines(
|
|
547
|
-
self, lines: list[str], line_idx: int
|
|
548
|
-
) -> dict[str, t.Any] | None:
|
|
549
|
-
for i in range(max(0, line_idx - 10), line_idx):
|
|
550
|
-
line = lines[i].strip()
|
|
551
|
-
loop_context = self._analyze_single_line_for_loop_context(line)
|
|
552
|
-
if loop_context:
|
|
553
|
-
return loop_context
|
|
554
|
-
return None
|
|
555
|
-
|
|
556
|
-
def _analyze_single_line_for_loop_context(
|
|
557
|
-
self, line: str
|
|
558
|
-
) -> dict[str, t.Any] | None:
|
|
559
|
-
if "for " in line and " in " in line:
|
|
560
|
-
return self._analyze_for_loop_context(line)
|
|
561
|
-
elif "while " in line:
|
|
562
|
-
return self._analyze_while_loop_context()
|
|
563
|
-
return None
|
|
564
|
-
|
|
565
|
-
def _analyze_for_loop_context(self, line: str) -> dict[str, t.Any]:
|
|
566
|
-
context = {"loop_type": "for"}
|
|
567
|
-
|
|
568
|
-
if "range(" in line:
|
|
569
|
-
impact_factor = self._estimate_range_impact_factor(line)
|
|
570
|
-
context["impact_factor"] = str(impact_factor)
|
|
571
|
-
else:
|
|
572
|
-
context["impact_factor"] = "2"
|
|
573
|
-
|
|
574
|
-
return context
|
|
575
|
-
|
|
576
|
-
@staticmethod
|
|
577
|
-
def _analyze_while_loop_context() -> dict[str, t.Any]:
|
|
578
|
-
return {
|
|
579
|
-
"loop_type": "while",
|
|
580
|
-
"impact_factor": "3",
|
|
581
|
-
}
|
|
582
|
-
|
|
583
|
-
def _estimate_range_impact_factor(self, line: str) -> int:
|
|
584
218
|
try:
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
range_str = pattern_obj.apply(line)
|
|
590
|
-
range_size = self._extract_range_size_from_string(range_str)
|
|
591
|
-
|
|
592
|
-
return self._calculate_impact_from_range_size(range_size)
|
|
593
|
-
except (ValueError, AttributeError):
|
|
594
|
-
return 2
|
|
595
|
-
|
|
596
|
-
@staticmethod
|
|
597
|
-
def _extract_range_size_from_string(range_str: str) -> int:
|
|
598
|
-
import re
|
|
599
|
-
|
|
600
|
-
number_match = re.search(r"\d+", range_str)
|
|
601
|
-
if number_match:
|
|
602
|
-
return int(number_match.group())
|
|
603
|
-
return 0
|
|
604
|
-
|
|
605
|
-
@staticmethod
|
|
606
|
-
def _calculate_impact_from_range_size(range_size: int) -> int:
|
|
607
|
-
if range_size > 1000:
|
|
608
|
-
return 10
|
|
609
|
-
elif range_size > 100:
|
|
610
|
-
return 5
|
|
611
|
-
return 2
|
|
612
|
-
|
|
613
|
-
@staticmethod
|
|
614
|
-
def _is_in_loop_context_enhanced(lines: list[str], line_index: int) -> bool:
|
|
615
|
-
context_start = max(0, line_index - 8)
|
|
616
|
-
context_lines = lines[context_start : line_index + 1]
|
|
617
|
-
|
|
618
|
-
for ctx_line in context_lines:
|
|
619
|
-
pattern_obj = SAFE_PATTERNS["match_loop_patterns"]
|
|
620
|
-
if pattern_obj.test(ctx_line):
|
|
621
|
-
return True
|
|
622
|
-
|
|
623
|
-
return False
|
|
624
|
-
|
|
625
|
-
@staticmethod
|
|
626
|
-
def _generate_string_suggestions(
|
|
627
|
-
concat_patterns: list[dict[str, t.Any]],
|
|
628
|
-
inefficient_joins: list[dict[str, t.Any]],
|
|
629
|
-
repeated_formatting: list[dict[str, t.Any]],
|
|
630
|
-
) -> str:
|
|
631
|
-
suggestions = []
|
|
632
|
-
|
|
633
|
-
if concat_patterns:
|
|
634
|
-
high_impact = len(
|
|
635
|
-
[p for p in concat_patterns if p.get("estimated_impact", 1) >= 5]
|
|
636
|
-
)
|
|
637
|
-
suggestions.append(
|
|
638
|
-
f"String concatenation: {len(concat_patterns)} instances "
|
|
639
|
-
f"({high_impact} high-impact) - use list[t.Any].append + join"
|
|
640
|
-
)
|
|
641
|
-
|
|
642
|
-
if inefficient_joins:
|
|
643
|
-
suggestions.append(
|
|
644
|
-
f"Empty joins: {len(inefficient_joins)} - use empty string literal"
|
|
219
|
+
# Delegate to AST analyzer helper
|
|
220
|
+
critical_functions = (
|
|
221
|
+
self._ast_analyzer.extract_performance_critical_functions(content)
|
|
645
222
|
)
|
|
646
223
|
|
|
647
|
-
|
|
648
|
-
suggestions.append(
|
|
649
|
-
f"Repeated formatting: {len(repeated_formatting)} - cache format strings"
|
|
650
|
-
)
|
|
651
|
-
|
|
652
|
-
suggestions.append("Expected gains: 3-50x for string building in loops")
|
|
653
|
-
return "; ".join(suggestions)
|
|
654
|
-
|
|
655
|
-
def _detect_list_comprehension_opportunities(
|
|
656
|
-
self, tree: ast.AST
|
|
657
|
-
) -> list[dict[str, t.Any]]:
|
|
658
|
-
issues: list[dict[str, t.Any]] = []
|
|
659
|
-
|
|
660
|
-
class ComprehensionAnalyzer(ast.NodeVisitor):
|
|
661
|
-
def __init__(self) -> None:
|
|
662
|
-
self.opportunities: list[dict[str, t.Any]] = []
|
|
663
|
-
|
|
664
|
-
def visit_For(self, node: ast.For) -> None:
|
|
224
|
+
for func in critical_functions:
|
|
665
225
|
if (
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
ast.Attribute,
|
|
226
|
+
func["estimated_complexity"] > 2
|
|
227
|
+
): # Focus on potentially complex functions
|
|
228
|
+
# Search for similar performance patterns
|
|
229
|
+
insight = await self.semantic_enhancer.find_similar_patterns(
|
|
230
|
+
f"performance {func['signature']} {func['body_sample']}",
|
|
231
|
+
current_file=file_path,
|
|
232
|
+
min_similarity=0.6,
|
|
233
|
+
max_results=8,
|
|
675
234
|
)
|
|
676
|
-
and node.body[0].value.func.attr == "append"
|
|
677
|
-
):
|
|
678
|
-
self.opportunities.append(
|
|
679
|
-
{
|
|
680
|
-
"line_number": node.lineno,
|
|
681
|
-
"type": "append_loop_to_comprehension",
|
|
682
|
-
"optimization": "list_comprehension",
|
|
683
|
-
"performance_gain": "20-30% faster",
|
|
684
|
-
"readability": "improved",
|
|
685
|
-
}
|
|
686
|
-
)
|
|
687
|
-
|
|
688
|
-
self.generic_visit(node)
|
|
689
235
|
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
"instances": analyzer.opportunities,
|
|
698
|
-
"total_count": len(analyzer.opportunities),
|
|
699
|
-
"suggestion": f"Convert {len(analyzer.opportunities)} append loops"
|
|
700
|
-
f" to list[t.Any] comprehensions for better performance "
|
|
701
|
-
f"and readability",
|
|
702
|
-
}
|
|
703
|
-
)
|
|
704
|
-
|
|
705
|
-
return issues
|
|
706
|
-
|
|
707
|
-
def _detect_inefficient_builtin_usage(
|
|
708
|
-
self, tree: ast.AST, content: str
|
|
709
|
-
) -> list[dict[str, t.Any]]:
|
|
710
|
-
issues: list[dict[str, t.Any]] = []
|
|
711
|
-
|
|
712
|
-
class BuiltinAnalyzer(ast.NodeVisitor):
|
|
713
|
-
def __init__(self) -> None:
|
|
714
|
-
self.inefficient_calls: list[dict[str, t.Any]] = []
|
|
715
|
-
self.in_loop = False
|
|
716
|
-
|
|
717
|
-
def visit_For(self, node: ast.For) -> None:
|
|
718
|
-
old_in_loop = self.in_loop
|
|
719
|
-
self.in_loop = True
|
|
720
|
-
self.generic_visit(node)
|
|
721
|
-
self.in_loop = old_in_loop
|
|
722
|
-
|
|
723
|
-
def visit_While(self, node: ast.While) -> None:
|
|
724
|
-
old_in_loop = self.in_loop
|
|
725
|
-
self.in_loop = True
|
|
726
|
-
self.generic_visit(node)
|
|
727
|
-
self.in_loop = old_in_loop
|
|
728
|
-
|
|
729
|
-
def visit_Call(self, node: ast.Call) -> None:
|
|
730
|
-
if self.in_loop and isinstance(node.func, ast.Name):
|
|
731
|
-
func_name = node.func.id
|
|
732
|
-
|
|
733
|
-
if func_name in ("len", "sum", "max", "min", "sorted"):
|
|
734
|
-
if node.args and isinstance(node.args[0], ast.Name):
|
|
735
|
-
self.inefficient_calls.append(
|
|
236
|
+
if insight.total_matches > 1:
|
|
237
|
+
# Delegate analysis to AST analyzer helper
|
|
238
|
+
analysis = self._ast_analyzer.analyze_performance_patterns(
|
|
239
|
+
insight, func
|
|
240
|
+
)
|
|
241
|
+
if analysis["issues_found"]:
|
|
242
|
+
issues.append(
|
|
736
243
|
{
|
|
737
|
-
"
|
|
738
|
-
"function":
|
|
739
|
-
"
|
|
740
|
-
"
|
|
741
|
-
|
|
742
|
-
|
|
244
|
+
"type": "semantic_performance_pattern",
|
|
245
|
+
"function": func,
|
|
246
|
+
"similar_patterns": insight.related_patterns,
|
|
247
|
+
"performance_analysis": analysis,
|
|
248
|
+
"confidence_score": insight.high_confidence_matches
|
|
249
|
+
/ max(insight.total_matches, 1),
|
|
250
|
+
"suggestion": analysis["optimization_suggestion"],
|
|
743
251
|
}
|
|
744
252
|
)
|
|
745
253
|
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
analyzer = BuiltinAnalyzer()
|
|
749
|
-
analyzer.visit(tree)
|
|
254
|
+
# Store insight for recommendation enhancement
|
|
255
|
+
self.semantic_insights[func["name"]] = insight
|
|
750
256
|
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
{
|
|
754
|
-
"type": "inefficient_builtin_usage",
|
|
755
|
-
"instances": analyzer.inefficient_calls,
|
|
756
|
-
"total_count": len(analyzer.inefficient_calls),
|
|
757
|
-
"suggestion": f"Cache {len(analyzer.inefficient_calls)} "
|
|
758
|
-
f"repeated builtin calls outside loops",
|
|
759
|
-
}
|
|
760
|
-
)
|
|
257
|
+
except Exception as e:
|
|
258
|
+
self.log(f"Warning: Semantic performance analysis failed: {e}")
|
|
761
259
|
|
|
762
260
|
return issues
|
|
763
261
|
|
|
764
262
|
def _generate_optimization_summary(self) -> str:
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
f"{opt_type}: {count}"
|
|
771
|
-
for opt_type, count in self.optimization_stats.items()
|
|
772
|
-
if count > 0
|
|
773
|
-
]
|
|
774
|
-
|
|
775
|
-
return (
|
|
776
|
-
f"Optimization Summary - {', '.join(summary_parts)} "
|
|
777
|
-
f"(Total: {total_optimizations})"
|
|
263
|
+
"""Generate a summary of optimization results."""
|
|
264
|
+
total_files = len(self.performance_metrics)
|
|
265
|
+
total_optimizations = sum(
|
|
266
|
+
metrics.get("optimizations_applied", 0)
|
|
267
|
+
for metrics in self.performance_metrics.values()
|
|
778
268
|
)
|
|
779
269
|
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
issues: list[dict[str, t.Any]],
|
|
784
|
-
) -> str:
|
|
785
|
-
lines = content.split("\n")
|
|
786
|
-
modified = False
|
|
787
|
-
optimizations_applied = []
|
|
788
|
-
|
|
789
|
-
for issue in issues:
|
|
790
|
-
result = self._process_single_issue(lines, issue)
|
|
791
|
-
if result.modified:
|
|
792
|
-
lines = result.lines
|
|
793
|
-
modified = True
|
|
794
|
-
if result.optimization_description:
|
|
795
|
-
optimizations_applied.append(result.optimization_description)
|
|
796
|
-
|
|
797
|
-
if optimizations_applied:
|
|
798
|
-
self.log(f"Applied optimizations: {', '.join(optimizations_applied)}")
|
|
799
|
-
|
|
800
|
-
return "\n".join(lines) if modified else content
|
|
801
|
-
|
|
802
|
-
def _process_single_issue(
|
|
803
|
-
self, lines: list[str], issue: dict[str, t.Any]
|
|
804
|
-
) -> OptimizationResult:
|
|
805
|
-
issue_type = issue["type"]
|
|
806
|
-
|
|
807
|
-
if issue_type in (
|
|
808
|
-
"inefficient_list_operations",
|
|
809
|
-
"inefficient_list_operations_enhanced",
|
|
810
|
-
):
|
|
811
|
-
return self._handle_list_operations_issue(lines, issue)
|
|
812
|
-
elif issue_type in (
|
|
813
|
-
"string_concatenation_in_loop",
|
|
814
|
-
"string_inefficiencies_enhanced",
|
|
815
|
-
):
|
|
816
|
-
return self._handle_string_operations_issue(lines, issue)
|
|
817
|
-
elif issue_type == "repeated_expensive_operations":
|
|
818
|
-
return self._handle_repeated_operations_issue(lines, issue)
|
|
819
|
-
elif issue_type in ("nested_loops", "nested_loops_enhanced"):
|
|
820
|
-
return self._handle_nested_loops_issue(lines, issue)
|
|
821
|
-
elif issue_type == "list_comprehension_opportunities":
|
|
822
|
-
return self._handle_comprehension_opportunities_issue(lines, issue)
|
|
823
|
-
elif issue_type == "inefficient_builtin_usage":
|
|
824
|
-
return self._handle_builtin_usage_issue(lines, issue)
|
|
825
|
-
return self._create_no_change_result(lines)
|
|
826
|
-
|
|
827
|
-
def _handle_list_operations_issue(
|
|
828
|
-
self, lines: list[str], issue: dict[str, t.Any]
|
|
829
|
-
) -> OptimizationResult:
|
|
830
|
-
new_lines, changed = self._fix_list_operations_enhanced(lines, issue)
|
|
831
|
-
description = None
|
|
832
|
-
|
|
833
|
-
if changed:
|
|
834
|
-
instance_count = len(issue.get("instances", []))
|
|
835
|
-
self.optimization_stats["list_ops_optimized"] += instance_count
|
|
836
|
-
description = f"List operations: {instance_count}"
|
|
837
|
-
|
|
838
|
-
return self._create_optimization_result(new_lines, changed, description)
|
|
839
|
-
|
|
840
|
-
def _handle_string_operations_issue(
|
|
841
|
-
self, lines: list[str], issue: dict[str, t.Any]
|
|
842
|
-
) -> OptimizationResult:
|
|
843
|
-
new_lines, changed = self._fix_string_operations_enhanced(lines, issue)
|
|
844
|
-
description = None
|
|
845
|
-
|
|
846
|
-
if changed:
|
|
847
|
-
total_string_fixes = (
|
|
848
|
-
len(issue.get("string_concat_patterns", []))
|
|
849
|
-
+ len(issue.get("inefficient_joins", []))
|
|
850
|
-
+ len(issue.get("repeated_formatting", []))
|
|
851
|
-
)
|
|
852
|
-
self.optimization_stats["string_concat_optimized"] += total_string_fixes
|
|
853
|
-
description = f"String operations: {total_string_fixes}"
|
|
854
|
-
|
|
855
|
-
return self._create_optimization_result(new_lines, changed, description)
|
|
856
|
-
|
|
857
|
-
def _handle_repeated_operations_issue(
|
|
858
|
-
self, lines: list[str], issue: dict[str, t.Any]
|
|
859
|
-
) -> OptimizationResult:
|
|
860
|
-
new_lines, changed = self._fix_repeated_operations(lines, issue)
|
|
861
|
-
|
|
862
|
-
if changed:
|
|
863
|
-
self.optimization_stats["repeated_ops_cached"] += len(
|
|
864
|
-
issue.get("instances", [])
|
|
865
|
-
)
|
|
866
|
-
|
|
867
|
-
return self._create_optimization_result(new_lines, changed)
|
|
868
|
-
|
|
869
|
-
def _handle_nested_loops_issue(
|
|
870
|
-
self, lines: list[str], issue: dict[str, t.Any]
|
|
871
|
-
) -> OptimizationResult:
|
|
872
|
-
new_lines, changed = self._add_nested_loop_comments(lines, issue)
|
|
873
|
-
|
|
874
|
-
if changed:
|
|
875
|
-
self.optimization_stats["nested_loops_optimized"] += len(
|
|
876
|
-
issue.get("instances", [])
|
|
877
|
-
)
|
|
878
|
-
|
|
879
|
-
return self._create_optimization_result(new_lines, changed)
|
|
880
|
-
|
|
881
|
-
def _handle_comprehension_opportunities_issue(
|
|
882
|
-
self, lines: list[str], issue: dict[str, t.Any]
|
|
883
|
-
) -> OptimizationResult:
|
|
884
|
-
new_lines, changed = self._apply_list_comprehension_optimizations(lines, issue)
|
|
885
|
-
|
|
886
|
-
if changed:
|
|
887
|
-
self.optimization_stats["comprehensions_applied"] += len(
|
|
888
|
-
issue.get("instances", [])
|
|
889
|
-
)
|
|
890
|
-
|
|
891
|
-
return self._create_optimization_result(new_lines, changed)
|
|
892
|
-
|
|
893
|
-
def _handle_builtin_usage_issue(
|
|
894
|
-
self, lines: list[str], issue: dict[str, t.Any]
|
|
895
|
-
) -> OptimizationResult:
|
|
896
|
-
new_lines, changed = self._add_builtin_caching_comments(lines, issue)
|
|
897
|
-
return self._create_optimization_result(new_lines, changed)
|
|
898
|
-
|
|
899
|
-
@staticmethod
|
|
900
|
-
def _create_optimization_result(
|
|
901
|
-
lines: list[str], modified: bool, description: str | None = None
|
|
902
|
-
) -> OptimizationResult:
|
|
903
|
-
return OptimizationResult(
|
|
904
|
-
lines=lines, modified=modified, optimization_description=description
|
|
270
|
+
total_time = sum(
|
|
271
|
+
metrics.get("analysis_duration", 0)
|
|
272
|
+
for metrics in self.performance_metrics.values()
|
|
905
273
|
)
|
|
906
274
|
|
|
907
|
-
|
|
908
|
-
|
|
909
|
-
|
|
910
|
-
|
|
911
|
-
)
|
|
912
|
-
|
|
913
|
-
@staticmethod
|
|
914
|
-
def _fix_list_operations_enhanced(
|
|
915
|
-
lines: list[str],
|
|
916
|
-
issue: dict[str, t.Any],
|
|
917
|
-
) -> tuple[list[str], bool]:
|
|
918
|
-
modified = False
|
|
919
|
-
|
|
920
|
-
instances = sorted(
|
|
921
|
-
issue["instances"],
|
|
922
|
-
key=operator.itemgetter("line_number"),
|
|
923
|
-
reverse=True,
|
|
275
|
+
return (
|
|
276
|
+
f"Performance optimization summary: "
|
|
277
|
+
f"{total_optimizations} optimizations applied across {total_files} files "
|
|
278
|
+
f"in {total_time:.2f}s total"
|
|
924
279
|
)
|
|
925
280
|
|
|
926
|
-
|
|
927
|
-
|
|
928
|
-
|
|
929
|
-
|
|
930
|
-
|
|
931
|
-
optimization_type = instance.get(
|
|
932
|
-
"optimization",
|
|
933
|
-
"append",
|
|
934
|
-
)
|
|
935
|
-
|
|
936
|
-
if optimization_type == "append":
|
|
937
|
-
list_pattern = SAFE_PATTERNS["list_append_inefficiency_pattern"]
|
|
938
|
-
if list_pattern.test(original_line):
|
|
939
|
-
optimized_line = list_pattern.apply(original_line)
|
|
940
|
-
lines[line_idx] = optimized_line
|
|
941
|
-
modified = True
|
|
281
|
+
async def _generate_enhanced_recommendations(
|
|
282
|
+
self, issues: list[dict[str, t.Any]]
|
|
283
|
+
) -> list[str]:
|
|
284
|
+
"""Generate enhanced recommendations including semantic insights."""
|
|
285
|
+
recommendations = ["Test performance improvements with benchmarks"]
|
|
942
286
|
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
lines.insert(line_idx, comment)
|
|
952
|
-
|
|
953
|
-
elif optimization_type == "extend":
|
|
954
|
-
extend_pattern = SAFE_PATTERNS["list_extend_optimization_pattern"]
|
|
955
|
-
if extend_pattern.test(original_line):
|
|
956
|
-
optimized_line = extend_pattern.apply(original_line)
|
|
957
|
-
lines[line_idx] = optimized_line
|
|
958
|
-
modified = True
|
|
959
|
-
|
|
960
|
-
indent = original_line[
|
|
961
|
-
: len(original_line) - len(original_line.lstrip())
|
|
962
|
-
]
|
|
963
|
-
performance_gain = instance.get("performance_gain", "x")
|
|
964
|
-
impact_factor = int(instance.get("impact_factor", "1"))
|
|
965
|
-
comment = (
|
|
966
|
-
f"{indent}# Performance: {performance_gain} "
|
|
967
|
-
f"improvement, impact factor: {impact_factor}"
|
|
968
|
-
)
|
|
969
|
-
lines.insert(line_idx, comment)
|
|
970
|
-
|
|
971
|
-
return lines, modified
|
|
972
|
-
|
|
973
|
-
def _fix_string_operations_enhanced(
|
|
974
|
-
self,
|
|
975
|
-
lines: list[str],
|
|
976
|
-
issue: dict[str, t.Any],
|
|
977
|
-
) -> tuple[list[str], bool]:
|
|
978
|
-
modified = False
|
|
979
|
-
|
|
980
|
-
concat_patterns = issue.get("string_concat_patterns", [])
|
|
981
|
-
if concat_patterns:
|
|
982
|
-
lines, concat_modified = self._fix_string_concatenation(
|
|
983
|
-
lines, {"instances": concat_patterns}
|
|
287
|
+
# Add semantic insights
|
|
288
|
+
semantic_issues = [
|
|
289
|
+
issue for issue in issues if issue["type"] == "semantic_performance_pattern"
|
|
290
|
+
]
|
|
291
|
+
if semantic_issues:
|
|
292
|
+
recommendations.append(
|
|
293
|
+
f"Semantic analysis found {len(semantic_issues)} similar performance patterns "
|
|
294
|
+
"across codebase - consider applying optimizations consistently"
|
|
984
295
|
)
|
|
985
|
-
modified = modified or concat_modified
|
|
986
|
-
|
|
987
|
-
inefficient_joins = issue.get("inefficient_joins", [])
|
|
988
|
-
for join_issue in inefficient_joins:
|
|
989
|
-
line_idx = join_issue["line_number"] - 1
|
|
990
|
-
if line_idx < len(lines):
|
|
991
|
-
original_line = lines[line_idx]
|
|
992
|
-
join_pattern = SAFE_PATTERNS["inefficient_string_join_pattern"]
|
|
993
|
-
if join_pattern.test(original_line):
|
|
994
|
-
lines[line_idx] = join_pattern.apply(original_line)
|
|
995
|
-
modified = True
|
|
996
|
-
|
|
997
|
-
repeated_formatting = issue.get("repeated_formatting", [])
|
|
998
|
-
for format_issue in repeated_formatting:
|
|
999
|
-
line_idx = format_issue["line_number"] - 1
|
|
1000
|
-
if line_idx < len(lines):
|
|
1001
|
-
original_line = lines[line_idx]
|
|
1002
|
-
indent = original_line[
|
|
1003
|
-
: len(original_line) - len(original_line.lstrip())
|
|
1004
|
-
]
|
|
1005
|
-
comment = (
|
|
1006
|
-
f"{indent}# Performance: Consider caching format string "
|
|
1007
|
-
f"outside loop"
|
|
1008
|
-
)
|
|
1009
|
-
lines.insert(line_idx, comment)
|
|
1010
|
-
modified = True
|
|
1011
|
-
|
|
1012
|
-
return lines, modified
|
|
1013
|
-
|
|
1014
|
-
@staticmethod
|
|
1015
|
-
def _add_nested_loop_comments(
|
|
1016
|
-
lines: list[str],
|
|
1017
|
-
issue: dict[str, t.Any],
|
|
1018
|
-
) -> tuple[list[str], bool]:
|
|
1019
|
-
modified = False
|
|
1020
|
-
|
|
1021
|
-
instances = issue.get("instances", [])
|
|
1022
|
-
for instance in sorted(
|
|
1023
|
-
instances, key=operator.itemgetter("line_number"), reverse=True
|
|
1024
|
-
):
|
|
1025
|
-
line_idx = instance["line_number"] - 1
|
|
1026
|
-
if line_idx < len(lines):
|
|
1027
|
-
original_line = lines[line_idx]
|
|
1028
|
-
indent = original_line[
|
|
1029
|
-
: len(original_line) - len(original_line.lstrip())
|
|
1030
|
-
]
|
|
1031
|
-
|
|
1032
|
-
complexity = instance.get("complexity", "O(n²)")
|
|
1033
|
-
priority = instance.get("priority", "medium")
|
|
1034
|
-
|
|
1035
|
-
comment_lines = [
|
|
1036
|
-
f"{indent}# Performance: {complexity} nested loop detected -"
|
|
1037
|
-
f" {priority} priority",
|
|
1038
|
-
]
|
|
1039
296
|
|
|
1040
|
-
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
)
|
|
1046
|
-
else:
|
|
1047
|
-
comment_lines.append(
|
|
1048
|
-
f"{indent}# Suggestion: Consider memoization, caching, "
|
|
1049
|
-
f" or hash tables"
|
|
1050
|
-
)
|
|
1051
|
-
|
|
1052
|
-
for i, comment in enumerate(comment_lines):
|
|
1053
|
-
lines.insert(line_idx + i, comment)
|
|
1054
|
-
|
|
1055
|
-
modified = True
|
|
1056
|
-
|
|
1057
|
-
return lines, modified
|
|
1058
|
-
|
|
1059
|
-
@staticmethod
|
|
1060
|
-
def _apply_list_comprehension_optimizations(
|
|
1061
|
-
lines: list[str],
|
|
1062
|
-
issue: dict[str, t.Any],
|
|
1063
|
-
) -> tuple[list[str], bool]:
|
|
1064
|
-
modified = False
|
|
1065
|
-
|
|
1066
|
-
instances = issue.get("instances", [])
|
|
1067
|
-
for instance in sorted(
|
|
1068
|
-
instances, key=operator.itemgetter("line_number"), reverse=True
|
|
1069
|
-
):
|
|
1070
|
-
line_idx = instance["line_number"] - 1
|
|
1071
|
-
if line_idx < len(lines):
|
|
1072
|
-
original_line = lines[line_idx]
|
|
1073
|
-
indent = original_line[
|
|
1074
|
-
: len(original_line) - len(original_line.lstrip())
|
|
1075
|
-
]
|
|
1076
|
-
|
|
1077
|
-
comment = (
|
|
1078
|
-
f"{indent}# Performance: Consider list[t.Any] comprehension for "
|
|
1079
|
-
f"20-30% improvement"
|
|
1080
|
-
)
|
|
1081
|
-
lines.insert(line_idx, comment)
|
|
1082
|
-
modified = True
|
|
1083
|
-
|
|
1084
|
-
return lines, modified
|
|
1085
|
-
|
|
1086
|
-
@staticmethod
|
|
1087
|
-
def _add_builtin_caching_comments(
|
|
1088
|
-
lines: list[str],
|
|
1089
|
-
issue: dict[str, t.Any],
|
|
1090
|
-
) -> tuple[list[str], bool]:
|
|
1091
|
-
modified = False
|
|
1092
|
-
|
|
1093
|
-
instances = issue.get("instances", [])
|
|
1094
|
-
for instance in sorted(
|
|
1095
|
-
instances, key=operator.itemgetter("line_number"), reverse=True
|
|
1096
|
-
):
|
|
1097
|
-
line_idx = instance["line_number"] - 1
|
|
1098
|
-
if line_idx < len(lines):
|
|
1099
|
-
original_line = lines[line_idx]
|
|
1100
|
-
indent = original_line[
|
|
1101
|
-
: len(original_line) - len(original_line.lstrip())
|
|
1102
|
-
]
|
|
1103
|
-
|
|
1104
|
-
func_name = instance.get("function", "builtin")
|
|
1105
|
-
performance_gain = instance.get(
|
|
1106
|
-
"performance_gain",
|
|
1107
|
-
"2-10x",
|
|
1108
|
-
)
|
|
1109
|
-
|
|
1110
|
-
comment = (
|
|
1111
|
-
f"{indent}# Performance: Cache {func_name}() result outside"
|
|
1112
|
-
f" loop for {performance_gain} improvement"
|
|
1113
|
-
)
|
|
1114
|
-
lines.insert(line_idx, comment)
|
|
1115
|
-
modified = True
|
|
1116
|
-
|
|
1117
|
-
return lines, modified
|
|
1118
|
-
|
|
1119
|
-
@staticmethod
|
|
1120
|
-
def _fix_list_operations(
|
|
1121
|
-
lines: list[str],
|
|
1122
|
-
issue: dict[str, t.Any],
|
|
1123
|
-
) -> tuple[list[str], bool]:
|
|
1124
|
-
modified = False
|
|
1125
|
-
|
|
1126
|
-
instances = sorted(
|
|
1127
|
-
issue["instances"],
|
|
1128
|
-
key=operator.itemgetter("line_number"),
|
|
1129
|
-
reverse=True,
|
|
1130
|
-
)
|
|
1131
|
-
|
|
1132
|
-
list_pattern = SAFE_PATTERNS["list_append_inefficiency_pattern"]
|
|
1133
|
-
|
|
1134
|
-
for instance in instances:
|
|
1135
|
-
line_idx = instance["line_number"] - 1
|
|
1136
|
-
if line_idx < len(lines):
|
|
1137
|
-
original_line = lines[line_idx]
|
|
1138
|
-
|
|
1139
|
-
if list_pattern.test(original_line):
|
|
1140
|
-
optimized_line = list_pattern.apply(original_line)
|
|
1141
|
-
lines[line_idx] = optimized_line
|
|
1142
|
-
modified = True
|
|
1143
|
-
|
|
1144
|
-
indent = original_line[
|
|
1145
|
-
: len(original_line) - len(original_line.lstrip())
|
|
1146
|
-
]
|
|
1147
|
-
comment = (
|
|
1148
|
-
f"{indent}# Performance: Changed += [item] to .append(item)"
|
|
1149
|
-
)
|
|
1150
|
-
lines.insert(line_idx, comment)
|
|
1151
|
-
|
|
1152
|
-
return lines, modified
|
|
1153
|
-
|
|
1154
|
-
def _fix_string_concatenation(
|
|
1155
|
-
self,
|
|
1156
|
-
lines: list[str],
|
|
1157
|
-
issue: dict[str, t.Any],
|
|
1158
|
-
) -> tuple[list[str], bool]:
|
|
1159
|
-
var_groups = self._group_concatenation_instances(lines, issue["instances"])
|
|
1160
|
-
return self._apply_concatenation_optimizations(lines, var_groups)
|
|
1161
|
-
|
|
1162
|
-
def _group_concatenation_instances(
|
|
1163
|
-
self,
|
|
1164
|
-
lines: list[str],
|
|
1165
|
-
instances: list[dict[str, t.Any]],
|
|
1166
|
-
) -> dict[str, list[dict[str, t.Any]]]:
|
|
1167
|
-
var_groups: dict[str, list[dict[str, t.Any]]] = {}
|
|
1168
|
-
|
|
1169
|
-
for instance in instances:
|
|
1170
|
-
line_info = self._parse_concatenation_line(lines, instance)
|
|
1171
|
-
if line_info:
|
|
1172
|
-
var_name = line_info["var_name"]
|
|
1173
|
-
if var_name not in var_groups:
|
|
1174
|
-
var_groups[var_name] = []
|
|
1175
|
-
var_groups[var_name].append(line_info)
|
|
1176
|
-
|
|
1177
|
-
return var_groups
|
|
1178
|
-
|
|
1179
|
-
@staticmethod
|
|
1180
|
-
def _parse_concatenation_line(
|
|
1181
|
-
lines: list[str],
|
|
1182
|
-
instance: dict[str, t.Any],
|
|
1183
|
-
) -> dict[str, t.Any] | None:
|
|
1184
|
-
line_idx = instance["line_number"] - 1
|
|
1185
|
-
if line_idx >= len(lines):
|
|
1186
|
-
return None
|
|
1187
|
-
|
|
1188
|
-
original_line = lines[line_idx]
|
|
1189
|
-
|
|
1190
|
-
concat_pattern = SAFE_PATTERNS["string_concatenation_pattern"]
|
|
1191
|
-
if concat_pattern.test(original_line):
|
|
1192
|
-
compiled = concat_pattern._get_compiled_pattern()
|
|
1193
|
-
match = compiled.match(original_line)
|
|
1194
|
-
if match:
|
|
1195
|
-
indent, var_name, expr = match.groups()
|
|
1196
|
-
return {
|
|
1197
|
-
"line_idx": line_idx,
|
|
1198
|
-
"indent": indent,
|
|
1199
|
-
"var_name": var_name,
|
|
1200
|
-
"expr": expr.strip(),
|
|
1201
|
-
"original_line": original_line,
|
|
1202
|
-
}
|
|
1203
|
-
return None
|
|
1204
|
-
|
|
1205
|
-
def _apply_concatenation_optimizations(
|
|
1206
|
-
self,
|
|
1207
|
-
lines: list[str],
|
|
1208
|
-
var_groups: dict[str, list[dict[str, t.Any]]],
|
|
1209
|
-
) -> tuple[list[str], bool]:
|
|
1210
|
-
modified = False
|
|
1211
|
-
|
|
1212
|
-
for var_name, instances in var_groups.items():
|
|
1213
|
-
if instances:
|
|
1214
|
-
first_instance = instances[0]
|
|
1215
|
-
loop_start = self._find_loop_start(lines, first_instance["line_idx"])
|
|
1216
|
-
|
|
1217
|
-
if loop_start is not None:
|
|
1218
|
-
optimization_applied = self._apply_string_building_optimization(
|
|
1219
|
-
lines, var_name, instances, loop_start
|
|
297
|
+
# Store insights for session continuity
|
|
298
|
+
for issue in semantic_issues:
|
|
299
|
+
if "semantic_insight" in issue:
|
|
300
|
+
await self.semantic_enhancer.store_insight_to_session(
|
|
301
|
+
issue["semantic_insight"], "PerformanceAgent"
|
|
1220
302
|
)
|
|
1221
|
-
modified = modified or optimization_applied
|
|
1222
|
-
|
|
1223
|
-
return lines, modified
|
|
1224
|
-
|
|
1225
|
-
@staticmethod
|
|
1226
|
-
def _find_loop_start(lines: list[str], start_idx: int) -> int | None:
|
|
1227
|
-
for i in range(start_idx, -1, -1):
|
|
1228
|
-
line = lines[i].strip()
|
|
1229
|
-
if line.startswith(("for ", "while ")):
|
|
1230
|
-
return i
|
|
1231
303
|
|
|
1232
|
-
|
|
1233
|
-
|
|
1234
|
-
|
|
1235
|
-
|
|
1236
|
-
def _apply_string_building_optimization(
|
|
1237
|
-
self,
|
|
1238
|
-
lines: list[str],
|
|
1239
|
-
var_name: str,
|
|
1240
|
-
instances: list[dict[str, t.Any]],
|
|
1241
|
-
loop_start: int,
|
|
1242
|
-
) -> bool:
|
|
1243
|
-
if not instances:
|
|
1244
|
-
return False
|
|
1245
|
-
|
|
1246
|
-
first_instance = instances[0]
|
|
1247
|
-
indent = first_instance["indent"]
|
|
1248
|
-
|
|
1249
|
-
init_line_idx = self._find_variable_initialization(lines, var_name, loop_start)
|
|
1250
|
-
if init_line_idx is not None:
|
|
1251
|
-
self._transform_string_initialization(
|
|
1252
|
-
lines, init_line_idx, var_name, indent
|
|
1253
|
-
)
|
|
1254
|
-
self._replace_concatenations_with_appends(
|
|
1255
|
-
lines, instances, var_name, indent
|
|
1256
|
-
)
|
|
1257
|
-
self._add_join_after_loop(lines, var_name, indent, loop_start)
|
|
1258
|
-
return True
|
|
1259
|
-
|
|
1260
|
-
return False
|
|
1261
|
-
|
|
1262
|
-
@staticmethod
|
|
1263
|
-
def _find_variable_initialization(
|
|
1264
|
-
lines: list[str],
|
|
1265
|
-
var_name: str,
|
|
1266
|
-
loop_start: int,
|
|
1267
|
-
) -> int | None:
|
|
1268
|
-
search_start = max(0, loop_start - 10)
|
|
1269
|
-
|
|
1270
|
-
for i in range(loop_start - 1, search_start - 1, -1):
|
|
1271
|
-
line = lines[i].strip()
|
|
1272
|
-
if f"{var_name} =" in line and '""' in line:
|
|
1273
|
-
return i
|
|
1274
|
-
return None
|
|
1275
|
-
|
|
1276
|
-
@staticmethod
|
|
1277
|
-
def _transform_string_initialization(
|
|
1278
|
-
lines: list[str],
|
|
1279
|
-
init_line_idx: int,
|
|
1280
|
-
var_name: str,
|
|
1281
|
-
indent: str,
|
|
1282
|
-
) -> None:
|
|
1283
|
-
lines[init_line_idx] = (
|
|
1284
|
-
f"{indent}{var_name}_parts = [] # Performance: Use list[t.Any] for string building"
|
|
304
|
+
# Enhance with session-stored insights
|
|
305
|
+
recommendations = await get_session_enhanced_recommendations(
|
|
306
|
+
recommendations, "PerformanceAgent", self.context.project_path
|
|
1285
307
|
)
|
|
1286
308
|
|
|
1287
|
-
|
|
1288
|
-
|
|
1289
|
-
|
|
1290
|
-
|
|
1291
|
-
|
|
1292
|
-
|
|
1293
|
-
) -> None:
|
|
1294
|
-
for instance in instances:
|
|
1295
|
-
line_idx = instance["line_idx"]
|
|
1296
|
-
expr = instance["expr"]
|
|
1297
|
-
lines[line_idx] = f"{indent}{var_name}_parts.append({expr})"
|
|
1298
|
-
|
|
1299
|
-
def _add_join_after_loop(
|
|
1300
|
-
self,
|
|
1301
|
-
lines: list[str],
|
|
1302
|
-
var_name: str,
|
|
1303
|
-
indent: str,
|
|
1304
|
-
loop_start: int,
|
|
1305
|
-
) -> None:
|
|
1306
|
-
loop_end = self._find_loop_end(lines, loop_start)
|
|
1307
|
-
if loop_end is not None:
|
|
1308
|
-
join_line = (
|
|
1309
|
-
f"{indent}{var_name} = ''.join({var_name}_parts) # Performance: "
|
|
1310
|
-
f" Join string parts"
|
|
1311
|
-
)
|
|
1312
|
-
lines.insert(loop_end + 1, join_line)
|
|
1313
|
-
|
|
1314
|
-
@staticmethod
|
|
1315
|
-
def _find_loop_end(lines: list[str], loop_start: int) -> int | None:
|
|
1316
|
-
if loop_start >= len(lines):
|
|
1317
|
-
return None
|
|
1318
|
-
|
|
1319
|
-
loop_indent = len(lines[loop_start]) - len(lines[loop_start].lstrip())
|
|
1320
|
-
|
|
1321
|
-
for i in range(loop_start + 1, len(lines)):
|
|
1322
|
-
line = lines[i]
|
|
1323
|
-
if line.strip() == "":
|
|
1324
|
-
continue
|
|
1325
|
-
|
|
1326
|
-
current_indent = len(line) - len(line.lstrip())
|
|
1327
|
-
if current_indent <= loop_indent:
|
|
1328
|
-
return i - 1
|
|
1329
|
-
|
|
1330
|
-
return len(lines) - 1
|
|
1331
|
-
|
|
1332
|
-
@staticmethod
|
|
1333
|
-
def _fix_repeated_operations(
|
|
1334
|
-
lines: list[str],
|
|
1335
|
-
issue: dict[str, t.Any],
|
|
1336
|
-
) -> tuple[list[str], bool]:
|
|
1337
|
-
modified = False
|
|
1338
|
-
|
|
1339
|
-
for instance in issue["instances"]:
|
|
1340
|
-
line_idx = instance["line_number"] - 1
|
|
1341
|
-
if line_idx < len(lines):
|
|
1342
|
-
original_line = lines[line_idx]
|
|
1343
|
-
indent_level = len(original_line) - len(original_line.lstrip())
|
|
1344
|
-
indent_str = " " * indent_level
|
|
1345
|
-
|
|
1346
|
-
comment = (
|
|
1347
|
-
f"{indent_str}# Performance: Consider caching this expensive"
|
|
1348
|
-
f" operation outside the loop"
|
|
309
|
+
# Add insights from stored semantic analysis
|
|
310
|
+
for func_name, insight in self.semantic_insights.items():
|
|
311
|
+
if insight.high_confidence_matches > 0:
|
|
312
|
+
enhanced_recs = self.semantic_enhancer.enhance_recommendations(
|
|
313
|
+
[], # Start with empty list to get just semantic recommendations
|
|
314
|
+
insight,
|
|
1349
315
|
)
|
|
1350
|
-
|
|
1351
|
-
|
|
316
|
+
recommendations.extend(enhanced_recs)
|
|
317
|
+
|
|
318
|
+
# Log semantic context for debugging
|
|
319
|
+
summary = self.semantic_enhancer.get_semantic_context_summary(insight)
|
|
320
|
+
self.log(f"Performance semantic context for {func_name}: {summary}")
|
|
1352
321
|
|
|
1353
|
-
return
|
|
322
|
+
return recommendations
|
|
1354
323
|
|
|
1355
324
|
|
|
1356
325
|
agent_registry.register(PerformanceAgent)
|