attune-ai 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/__init__.py +358 -0
- attune/adaptive/__init__.py +13 -0
- attune/adaptive/task_complexity.py +127 -0
- attune/agent_monitoring.py +414 -0
- attune/cache/__init__.py +117 -0
- attune/cache/base.py +166 -0
- attune/cache/dependency_manager.py +256 -0
- attune/cache/hash_only.py +251 -0
- attune/cache/hybrid.py +457 -0
- attune/cache/storage.py +285 -0
- attune/cache_monitor.py +356 -0
- attune/cache_stats.py +298 -0
- attune/cli/__init__.py +152 -0
- attune/cli/__main__.py +12 -0
- attune/cli/commands/__init__.py +1 -0
- attune/cli/commands/batch.py +264 -0
- attune/cli/commands/cache.py +248 -0
- attune/cli/commands/help.py +331 -0
- attune/cli/commands/info.py +140 -0
- attune/cli/commands/inspect.py +436 -0
- attune/cli/commands/inspection.py +57 -0
- attune/cli/commands/memory.py +48 -0
- attune/cli/commands/metrics.py +92 -0
- attune/cli/commands/orchestrate.py +184 -0
- attune/cli/commands/patterns.py +207 -0
- attune/cli/commands/profiling.py +202 -0
- attune/cli/commands/provider.py +98 -0
- attune/cli/commands/routing.py +285 -0
- attune/cli/commands/setup.py +96 -0
- attune/cli/commands/status.py +235 -0
- attune/cli/commands/sync.py +166 -0
- attune/cli/commands/tier.py +121 -0
- attune/cli/commands/utilities.py +114 -0
- attune/cli/commands/workflow.py +579 -0
- attune/cli/core.py +32 -0
- attune/cli/parsers/__init__.py +68 -0
- attune/cli/parsers/batch.py +118 -0
- attune/cli/parsers/cache.py +65 -0
- attune/cli/parsers/help.py +41 -0
- attune/cli/parsers/info.py +26 -0
- attune/cli/parsers/inspect.py +66 -0
- attune/cli/parsers/metrics.py +42 -0
- attune/cli/parsers/orchestrate.py +61 -0
- attune/cli/parsers/patterns.py +54 -0
- attune/cli/parsers/provider.py +40 -0
- attune/cli/parsers/routing.py +110 -0
- attune/cli/parsers/setup.py +42 -0
- attune/cli/parsers/status.py +47 -0
- attune/cli/parsers/sync.py +31 -0
- attune/cli/parsers/tier.py +33 -0
- attune/cli/parsers/workflow.py +77 -0
- attune/cli/utils/__init__.py +1 -0
- attune/cli/utils/data.py +242 -0
- attune/cli/utils/helpers.py +68 -0
- attune/cli_legacy.py +3957 -0
- attune/cli_minimal.py +1159 -0
- attune/cli_router.py +437 -0
- attune/cli_unified.py +814 -0
- attune/config/__init__.py +66 -0
- attune/config/xml_config.py +286 -0
- attune/config.py +545 -0
- attune/coordination.py +870 -0
- attune/core.py +1511 -0
- attune/core_modules/__init__.py +15 -0
- attune/cost_tracker.py +626 -0
- attune/dashboard/__init__.py +41 -0
- attune/dashboard/app.py +512 -0
- attune/dashboard/simple_server.py +435 -0
- attune/dashboard/standalone_server.py +547 -0
- attune/discovery.py +306 -0
- attune/emergence.py +306 -0
- attune/exceptions.py +123 -0
- attune/feedback_loops.py +373 -0
- attune/hot_reload/README.md +473 -0
- attune/hot_reload/__init__.py +62 -0
- attune/hot_reload/config.py +83 -0
- attune/hot_reload/integration.py +229 -0
- attune/hot_reload/reloader.py +298 -0
- attune/hot_reload/watcher.py +183 -0
- attune/hot_reload/websocket.py +177 -0
- attune/levels.py +577 -0
- attune/leverage_points.py +441 -0
- attune/logging_config.py +261 -0
- attune/mcp/__init__.py +10 -0
- attune/mcp/server.py +506 -0
- attune/memory/__init__.py +237 -0
- attune/memory/claude_memory.py +469 -0
- attune/memory/config.py +224 -0
- attune/memory/control_panel.py +1290 -0
- attune/memory/control_panel_support.py +145 -0
- attune/memory/cross_session.py +845 -0
- attune/memory/edges.py +179 -0
- attune/memory/encryption.py +159 -0
- attune/memory/file_session.py +770 -0
- attune/memory/graph.py +570 -0
- attune/memory/long_term.py +913 -0
- attune/memory/long_term_types.py +99 -0
- attune/memory/mixins/__init__.py +25 -0
- attune/memory/mixins/backend_init_mixin.py +249 -0
- attune/memory/mixins/capabilities_mixin.py +208 -0
- attune/memory/mixins/handoff_mixin.py +208 -0
- attune/memory/mixins/lifecycle_mixin.py +49 -0
- attune/memory/mixins/long_term_mixin.py +352 -0
- attune/memory/mixins/promotion_mixin.py +109 -0
- attune/memory/mixins/short_term_mixin.py +182 -0
- attune/memory/nodes.py +179 -0
- attune/memory/redis_bootstrap.py +540 -0
- attune/memory/security/__init__.py +31 -0
- attune/memory/security/audit_logger.py +932 -0
- attune/memory/security/pii_scrubber.py +640 -0
- attune/memory/security/secrets_detector.py +678 -0
- attune/memory/short_term.py +2192 -0
- attune/memory/simple_storage.py +302 -0
- attune/memory/storage/__init__.py +15 -0
- attune/memory/storage_backend.py +167 -0
- attune/memory/summary_index.py +583 -0
- attune/memory/types.py +446 -0
- attune/memory/unified.py +182 -0
- attune/meta_workflows/__init__.py +74 -0
- attune/meta_workflows/agent_creator.py +248 -0
- attune/meta_workflows/builtin_templates.py +567 -0
- attune/meta_workflows/cli_commands/__init__.py +56 -0
- attune/meta_workflows/cli_commands/agent_commands.py +321 -0
- attune/meta_workflows/cli_commands/analytics_commands.py +442 -0
- attune/meta_workflows/cli_commands/config_commands.py +232 -0
- attune/meta_workflows/cli_commands/memory_commands.py +182 -0
- attune/meta_workflows/cli_commands/template_commands.py +354 -0
- attune/meta_workflows/cli_commands/workflow_commands.py +382 -0
- attune/meta_workflows/cli_meta_workflows.py +59 -0
- attune/meta_workflows/form_engine.py +292 -0
- attune/meta_workflows/intent_detector.py +409 -0
- attune/meta_workflows/models.py +569 -0
- attune/meta_workflows/pattern_learner.py +738 -0
- attune/meta_workflows/plan_generator.py +384 -0
- attune/meta_workflows/session_context.py +397 -0
- attune/meta_workflows/template_registry.py +229 -0
- attune/meta_workflows/workflow.py +984 -0
- attune/metrics/__init__.py +12 -0
- attune/metrics/collector.py +31 -0
- attune/metrics/prompt_metrics.py +194 -0
- attune/models/__init__.py +172 -0
- attune/models/__main__.py +13 -0
- attune/models/adaptive_routing.py +437 -0
- attune/models/auth_cli.py +444 -0
- attune/models/auth_strategy.py +450 -0
- attune/models/cli.py +655 -0
- attune/models/empathy_executor.py +354 -0
- attune/models/executor.py +257 -0
- attune/models/fallback.py +762 -0
- attune/models/provider_config.py +282 -0
- attune/models/registry.py +472 -0
- attune/models/tasks.py +359 -0
- attune/models/telemetry/__init__.py +71 -0
- attune/models/telemetry/analytics.py +594 -0
- attune/models/telemetry/backend.py +196 -0
- attune/models/telemetry/data_models.py +431 -0
- attune/models/telemetry/storage.py +489 -0
- attune/models/token_estimator.py +420 -0
- attune/models/validation.py +280 -0
- attune/monitoring/__init__.py +52 -0
- attune/monitoring/alerts.py +946 -0
- attune/monitoring/alerts_cli.py +448 -0
- attune/monitoring/multi_backend.py +271 -0
- attune/monitoring/otel_backend.py +362 -0
- attune/optimization/__init__.py +19 -0
- attune/optimization/context_optimizer.py +272 -0
- attune/orchestration/__init__.py +67 -0
- attune/orchestration/agent_templates.py +707 -0
- attune/orchestration/config_store.py +499 -0
- attune/orchestration/execution_strategies.py +2111 -0
- attune/orchestration/meta_orchestrator.py +1168 -0
- attune/orchestration/pattern_learner.py +696 -0
- attune/orchestration/real_tools.py +931 -0
- attune/pattern_cache.py +187 -0
- attune/pattern_library.py +542 -0
- attune/patterns/debugging/all_patterns.json +81 -0
- attune/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- attune/patterns/refactoring_memory.json +89 -0
- attune/persistence.py +564 -0
- attune/platform_utils.py +265 -0
- attune/plugins/__init__.py +28 -0
- attune/plugins/base.py +361 -0
- attune/plugins/registry.py +268 -0
- attune/project_index/__init__.py +32 -0
- attune/project_index/cli.py +335 -0
- attune/project_index/index.py +667 -0
- attune/project_index/models.py +504 -0
- attune/project_index/reports.py +474 -0
- attune/project_index/scanner.py +777 -0
- attune/project_index/scanner_parallel.py +291 -0
- attune/prompts/__init__.py +61 -0
- attune/prompts/config.py +77 -0
- attune/prompts/context.py +177 -0
- attune/prompts/parser.py +285 -0
- attune/prompts/registry.py +313 -0
- attune/prompts/templates.py +208 -0
- attune/redis_config.py +302 -0
- attune/redis_memory.py +799 -0
- attune/resilience/__init__.py +56 -0
- attune/resilience/circuit_breaker.py +256 -0
- attune/resilience/fallback.py +179 -0
- attune/resilience/health.py +300 -0
- attune/resilience/retry.py +209 -0
- attune/resilience/timeout.py +135 -0
- attune/routing/__init__.py +43 -0
- attune/routing/chain_executor.py +433 -0
- attune/routing/classifier.py +217 -0
- attune/routing/smart_router.py +234 -0
- attune/routing/workflow_registry.py +343 -0
- attune/scaffolding/README.md +589 -0
- attune/scaffolding/__init__.py +35 -0
- attune/scaffolding/__main__.py +14 -0
- attune/scaffolding/cli.py +240 -0
- attune/scaffolding/templates/base_wizard.py.jinja2 +121 -0
- attune/scaffolding/templates/coach_wizard.py.jinja2 +321 -0
- attune/scaffolding/templates/domain_wizard.py.jinja2 +408 -0
- attune/scaffolding/templates/linear_flow_wizard.py.jinja2 +203 -0
- attune/socratic/__init__.py +256 -0
- attune/socratic/ab_testing.py +958 -0
- attune/socratic/blueprint.py +533 -0
- attune/socratic/cli.py +703 -0
- attune/socratic/collaboration.py +1114 -0
- attune/socratic/domain_templates.py +924 -0
- attune/socratic/embeddings.py +738 -0
- attune/socratic/engine.py +794 -0
- attune/socratic/explainer.py +682 -0
- attune/socratic/feedback.py +772 -0
- attune/socratic/forms.py +629 -0
- attune/socratic/generator.py +732 -0
- attune/socratic/llm_analyzer.py +637 -0
- attune/socratic/mcp_server.py +702 -0
- attune/socratic/session.py +312 -0
- attune/socratic/storage.py +667 -0
- attune/socratic/success.py +730 -0
- attune/socratic/visual_editor.py +860 -0
- attune/socratic/web_ui.py +958 -0
- attune/telemetry/__init__.py +39 -0
- attune/telemetry/agent_coordination.py +475 -0
- attune/telemetry/agent_tracking.py +367 -0
- attune/telemetry/approval_gates.py +545 -0
- attune/telemetry/cli.py +1231 -0
- attune/telemetry/commands/__init__.py +14 -0
- attune/telemetry/commands/dashboard_commands.py +696 -0
- attune/telemetry/event_streaming.py +409 -0
- attune/telemetry/feedback_loop.py +567 -0
- attune/telemetry/usage_tracker.py +591 -0
- attune/templates.py +754 -0
- attune/test_generator/__init__.py +38 -0
- attune/test_generator/__main__.py +14 -0
- attune/test_generator/cli.py +234 -0
- attune/test_generator/generator.py +355 -0
- attune/test_generator/risk_analyzer.py +216 -0
- attune/test_generator/templates/unit_test.py.jinja2 +272 -0
- attune/tier_recommender.py +384 -0
- attune/tools.py +183 -0
- attune/trust/__init__.py +28 -0
- attune/trust/circuit_breaker.py +579 -0
- attune/trust_building.py +527 -0
- attune/validation/__init__.py +19 -0
- attune/validation/xml_validator.py +281 -0
- attune/vscode_bridge.py +173 -0
- attune/workflow_commands.py +780 -0
- attune/workflow_patterns/__init__.py +33 -0
- attune/workflow_patterns/behavior.py +249 -0
- attune/workflow_patterns/core.py +76 -0
- attune/workflow_patterns/output.py +99 -0
- attune/workflow_patterns/registry.py +255 -0
- attune/workflow_patterns/structural.py +288 -0
- attune/workflows/__init__.py +539 -0
- attune/workflows/autonomous_test_gen.py +1268 -0
- attune/workflows/base.py +2667 -0
- attune/workflows/batch_processing.py +342 -0
- attune/workflows/bug_predict.py +1084 -0
- attune/workflows/builder.py +273 -0
- attune/workflows/caching.py +253 -0
- attune/workflows/code_review.py +1048 -0
- attune/workflows/code_review_adapters.py +312 -0
- attune/workflows/code_review_pipeline.py +722 -0
- attune/workflows/config.py +645 -0
- attune/workflows/dependency_check.py +644 -0
- attune/workflows/document_gen/__init__.py +25 -0
- attune/workflows/document_gen/config.py +30 -0
- attune/workflows/document_gen/report_formatter.py +162 -0
- attune/workflows/document_gen/workflow.py +1426 -0
- attune/workflows/document_manager.py +216 -0
- attune/workflows/document_manager_README.md +134 -0
- attune/workflows/documentation_orchestrator.py +1205 -0
- attune/workflows/history.py +510 -0
- attune/workflows/keyboard_shortcuts/__init__.py +39 -0
- attune/workflows/keyboard_shortcuts/generators.py +391 -0
- attune/workflows/keyboard_shortcuts/parsers.py +416 -0
- attune/workflows/keyboard_shortcuts/prompts.py +295 -0
- attune/workflows/keyboard_shortcuts/schema.py +193 -0
- attune/workflows/keyboard_shortcuts/workflow.py +509 -0
- attune/workflows/llm_base.py +363 -0
- attune/workflows/manage_docs.py +87 -0
- attune/workflows/manage_docs_README.md +134 -0
- attune/workflows/manage_documentation.py +821 -0
- attune/workflows/new_sample_workflow1.py +149 -0
- attune/workflows/new_sample_workflow1_README.md +150 -0
- attune/workflows/orchestrated_health_check.py +849 -0
- attune/workflows/orchestrated_release_prep.py +600 -0
- attune/workflows/output.py +413 -0
- attune/workflows/perf_audit.py +863 -0
- attune/workflows/pr_review.py +762 -0
- attune/workflows/progress.py +785 -0
- attune/workflows/progress_server.py +322 -0
- attune/workflows/progressive/README 2.md +454 -0
- attune/workflows/progressive/README.md +454 -0
- attune/workflows/progressive/__init__.py +82 -0
- attune/workflows/progressive/cli.py +219 -0
- attune/workflows/progressive/core.py +488 -0
- attune/workflows/progressive/orchestrator.py +723 -0
- attune/workflows/progressive/reports.py +520 -0
- attune/workflows/progressive/telemetry.py +274 -0
- attune/workflows/progressive/test_gen.py +495 -0
- attune/workflows/progressive/workflow.py +589 -0
- attune/workflows/refactor_plan.py +694 -0
- attune/workflows/release_prep.py +895 -0
- attune/workflows/release_prep_crew.py +969 -0
- attune/workflows/research_synthesis.py +404 -0
- attune/workflows/routing.py +168 -0
- attune/workflows/secure_release.py +593 -0
- attune/workflows/security_adapters.py +297 -0
- attune/workflows/security_audit.py +1329 -0
- attune/workflows/security_audit_phase3.py +355 -0
- attune/workflows/seo_optimization.py +633 -0
- attune/workflows/step_config.py +234 -0
- attune/workflows/telemetry_mixin.py +269 -0
- attune/workflows/test5.py +125 -0
- attune/workflows/test5_README.md +158 -0
- attune/workflows/test_coverage_boost_crew.py +849 -0
- attune/workflows/test_gen/__init__.py +52 -0
- attune/workflows/test_gen/ast_analyzer.py +249 -0
- attune/workflows/test_gen/config.py +88 -0
- attune/workflows/test_gen/data_models.py +38 -0
- attune/workflows/test_gen/report_formatter.py +289 -0
- attune/workflows/test_gen/test_templates.py +381 -0
- attune/workflows/test_gen/workflow.py +655 -0
- attune/workflows/test_gen.py +54 -0
- attune/workflows/test_gen_behavioral.py +477 -0
- attune/workflows/test_gen_parallel.py +341 -0
- attune/workflows/test_lifecycle.py +526 -0
- attune/workflows/test_maintenance.py +627 -0
- attune/workflows/test_maintenance_cli.py +590 -0
- attune/workflows/test_maintenance_crew.py +840 -0
- attune/workflows/test_runner.py +622 -0
- attune/workflows/tier_tracking.py +531 -0
- attune/workflows/xml_enhanced_crew.py +285 -0
- attune_ai-2.0.0.dist-info/METADATA +1026 -0
- attune_ai-2.0.0.dist-info/RECORD +457 -0
- attune_ai-2.0.0.dist-info/WHEEL +5 -0
- attune_ai-2.0.0.dist-info/entry_points.txt +26 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE +201 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- attune_ai-2.0.0.dist-info/top_level.txt +5 -0
- attune_healthcare/__init__.py +13 -0
- attune_healthcare/monitors/__init__.py +9 -0
- attune_healthcare/monitors/clinical_protocol_monitor.py +315 -0
- attune_healthcare/monitors/monitoring/__init__.py +44 -0
- attune_healthcare/monitors/monitoring/protocol_checker.py +300 -0
- attune_healthcare/monitors/monitoring/protocol_loader.py +214 -0
- attune_healthcare/monitors/monitoring/sensor_parsers.py +306 -0
- attune_healthcare/monitors/monitoring/trajectory_analyzer.py +389 -0
- attune_llm/README.md +553 -0
- attune_llm/__init__.py +28 -0
- attune_llm/agent_factory/__init__.py +53 -0
- attune_llm/agent_factory/adapters/__init__.py +85 -0
- attune_llm/agent_factory/adapters/autogen_adapter.py +312 -0
- attune_llm/agent_factory/adapters/crewai_adapter.py +483 -0
- attune_llm/agent_factory/adapters/haystack_adapter.py +298 -0
- attune_llm/agent_factory/adapters/langchain_adapter.py +362 -0
- attune_llm/agent_factory/adapters/langgraph_adapter.py +333 -0
- attune_llm/agent_factory/adapters/native.py +228 -0
- attune_llm/agent_factory/adapters/wizard_adapter.py +423 -0
- attune_llm/agent_factory/base.py +305 -0
- attune_llm/agent_factory/crews/__init__.py +67 -0
- attune_llm/agent_factory/crews/code_review.py +1113 -0
- attune_llm/agent_factory/crews/health_check.py +1262 -0
- attune_llm/agent_factory/crews/refactoring.py +1128 -0
- attune_llm/agent_factory/crews/security_audit.py +1018 -0
- attune_llm/agent_factory/decorators.py +287 -0
- attune_llm/agent_factory/factory.py +558 -0
- attune_llm/agent_factory/framework.py +193 -0
- attune_llm/agent_factory/memory_integration.py +328 -0
- attune_llm/agent_factory/resilient.py +320 -0
- attune_llm/agents_md/__init__.py +22 -0
- attune_llm/agents_md/loader.py +218 -0
- attune_llm/agents_md/parser.py +271 -0
- attune_llm/agents_md/registry.py +307 -0
- attune_llm/claude_memory.py +466 -0
- attune_llm/cli/__init__.py +8 -0
- attune_llm/cli/sync_claude.py +487 -0
- attune_llm/code_health.py +1313 -0
- attune_llm/commands/__init__.py +51 -0
- attune_llm/commands/context.py +375 -0
- attune_llm/commands/loader.py +301 -0
- attune_llm/commands/models.py +231 -0
- attune_llm/commands/parser.py +371 -0
- attune_llm/commands/registry.py +429 -0
- attune_llm/config/__init__.py +29 -0
- attune_llm/config/unified.py +291 -0
- attune_llm/context/__init__.py +22 -0
- attune_llm/context/compaction.py +455 -0
- attune_llm/context/manager.py +434 -0
- attune_llm/contextual_patterns.py +361 -0
- attune_llm/core.py +907 -0
- attune_llm/git_pattern_extractor.py +435 -0
- attune_llm/hooks/__init__.py +24 -0
- attune_llm/hooks/config.py +306 -0
- attune_llm/hooks/executor.py +289 -0
- attune_llm/hooks/registry.py +302 -0
- attune_llm/hooks/scripts/__init__.py +39 -0
- attune_llm/hooks/scripts/evaluate_session.py +201 -0
- attune_llm/hooks/scripts/first_time_init.py +285 -0
- attune_llm/hooks/scripts/pre_compact.py +207 -0
- attune_llm/hooks/scripts/session_end.py +183 -0
- attune_llm/hooks/scripts/session_start.py +163 -0
- attune_llm/hooks/scripts/suggest_compact.py +225 -0
- attune_llm/learning/__init__.py +30 -0
- attune_llm/learning/evaluator.py +438 -0
- attune_llm/learning/extractor.py +514 -0
- attune_llm/learning/storage.py +560 -0
- attune_llm/levels.py +227 -0
- attune_llm/pattern_confidence.py +414 -0
- attune_llm/pattern_resolver.py +272 -0
- attune_llm/pattern_summary.py +350 -0
- attune_llm/providers.py +967 -0
- attune_llm/routing/__init__.py +32 -0
- attune_llm/routing/model_router.py +362 -0
- attune_llm/security/IMPLEMENTATION_SUMMARY.md +413 -0
- attune_llm/security/PHASE2_COMPLETE.md +384 -0
- attune_llm/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- attune_llm/security/QUICK_REFERENCE.md +316 -0
- attune_llm/security/README.md +262 -0
- attune_llm/security/__init__.py +62 -0
- attune_llm/security/audit_logger.py +929 -0
- attune_llm/security/audit_logger_example.py +152 -0
- attune_llm/security/pii_scrubber.py +640 -0
- attune_llm/security/secrets_detector.py +678 -0
- attune_llm/security/secrets_detector_example.py +304 -0
- attune_llm/security/secure_memdocs.py +1192 -0
- attune_llm/security/secure_memdocs_example.py +278 -0
- attune_llm/session_status.py +745 -0
- attune_llm/state.py +246 -0
- attune_llm/utils/__init__.py +5 -0
- attune_llm/utils/tokens.py +349 -0
- attune_software/SOFTWARE_PLUGIN_README.md +57 -0
- attune_software/__init__.py +13 -0
- attune_software/cli/__init__.py +120 -0
- attune_software/cli/inspect.py +362 -0
- attune_software/cli.py +574 -0
- attune_software/plugin.py +188 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
|
@@ -0,0 +1,863 @@
|
|
|
1
|
+
"""Performance Audit Workflow
|
|
2
|
+
|
|
3
|
+
Identifies performance bottlenecks and optimization opportunities
|
|
4
|
+
through static analysis.
|
|
5
|
+
|
|
6
|
+
Stages:
|
|
7
|
+
1. profile (CHEAP) - Static analysis for common perf anti-patterns
|
|
8
|
+
2. analyze (CAPABLE) - Deep analysis of algorithmic complexity
|
|
9
|
+
3. hotspots (CAPABLE) - Identify performance hotspots
|
|
10
|
+
4. optimize (PREMIUM) - Generate optimization recommendations (conditional)
|
|
11
|
+
|
|
12
|
+
Copyright 2025 Smart-AI-Memory
|
|
13
|
+
Licensed under Fair Source License 0.9
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
import heapq
|
|
17
|
+
import json
|
|
18
|
+
import re
|
|
19
|
+
from pathlib import Path
|
|
20
|
+
from typing import Any
|
|
21
|
+
|
|
22
|
+
from .base import BaseWorkflow, ModelTier
|
|
23
|
+
from .output import Finding, WorkflowReport, get_console
|
|
24
|
+
from .step_config import WorkflowStepConfig
|
|
25
|
+
|
|
26
|
+
# Define step configurations for executor-based execution
|
|
27
|
+
PERF_AUDIT_STEPS = {
|
|
28
|
+
"optimize": WorkflowStepConfig(
|
|
29
|
+
name="optimize",
|
|
30
|
+
task_type="final_review", # Premium tier task
|
|
31
|
+
tier_hint="premium",
|
|
32
|
+
description="Generate performance optimization recommendations",
|
|
33
|
+
max_tokens=3000,
|
|
34
|
+
),
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
# Performance anti-patterns to detect
|
|
38
|
+
PERF_PATTERNS = {
|
|
39
|
+
"n_plus_one": {
|
|
40
|
+
"patterns": [
|
|
41
|
+
r"for\s+\w+\s+in\s+\w+:.*?\.get\(",
|
|
42
|
+
r"for\s+\w+\s+in\s+\w+:.*?\.query\(",
|
|
43
|
+
r"for\s+\w+\s+in\s+\w+:.*?\.fetch\(",
|
|
44
|
+
],
|
|
45
|
+
"description": "Potential N+1 query pattern",
|
|
46
|
+
"impact": "high",
|
|
47
|
+
},
|
|
48
|
+
"sync_in_async": {
|
|
49
|
+
"patterns": [
|
|
50
|
+
r"async\s+def.*?time\.sleep\(",
|
|
51
|
+
r"async\s+def.*?requests\.get\(",
|
|
52
|
+
r"async\s+def.*?open\([^)]+\)\.read\(",
|
|
53
|
+
],
|
|
54
|
+
"description": "Synchronous operation in async context",
|
|
55
|
+
"impact": "high",
|
|
56
|
+
},
|
|
57
|
+
"list_comprehension_in_loop": {
|
|
58
|
+
"patterns": [
|
|
59
|
+
r"for\s+\w+\s+in\s+\[.*for.*\]:",
|
|
60
|
+
],
|
|
61
|
+
"description": "List comprehension recreated in loop",
|
|
62
|
+
"impact": "medium",
|
|
63
|
+
},
|
|
64
|
+
"string_concat_loop": {
|
|
65
|
+
"patterns": [
|
|
66
|
+
# Match: for x in y: \n str += "..." (actual loop, not generator expression)
|
|
67
|
+
# Exclude: any(... for x in ...) by requiring standalone for statement
|
|
68
|
+
r'^[ \t]*for\s+\w+\s+in\s+[^:]+:\s*\n[ \t]+\w+\s*\+=\s*["\']',
|
|
69
|
+
],
|
|
70
|
+
"description": "String concatenation in loop (use join)",
|
|
71
|
+
"impact": "medium",
|
|
72
|
+
},
|
|
73
|
+
"global_import": {
|
|
74
|
+
"patterns": [
|
|
75
|
+
r"^from\s+\w+\s+import\s+\*",
|
|
76
|
+
],
|
|
77
|
+
"description": "Wildcard import may slow startup",
|
|
78
|
+
"impact": "low",
|
|
79
|
+
},
|
|
80
|
+
"large_list_copy": {
|
|
81
|
+
"patterns": [
|
|
82
|
+
r"list\(\w+\)",
|
|
83
|
+
r"\w+\[:\]",
|
|
84
|
+
],
|
|
85
|
+
"description": "Full list copy (may be inefficient for large lists)",
|
|
86
|
+
"impact": "low",
|
|
87
|
+
},
|
|
88
|
+
"repeated_regex": {
|
|
89
|
+
"patterns": [
|
|
90
|
+
r're\.(search|match|findall)\s*\(["\'][^"\']+["\']',
|
|
91
|
+
],
|
|
92
|
+
"description": "Regex pattern not pre-compiled",
|
|
93
|
+
"impact": "medium",
|
|
94
|
+
},
|
|
95
|
+
"nested_loops": {
|
|
96
|
+
"patterns": [
|
|
97
|
+
r"for\s+\w+\s+in\s+\w+:\s*\n\s+for\s+\w+\s+in\s+\w+:\s*\n\s+for",
|
|
98
|
+
],
|
|
99
|
+
"description": "Triple nested loop (O(n³) complexity)",
|
|
100
|
+
"impact": "high",
|
|
101
|
+
},
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
# Known false positives - patterns that match but aren't performance issues
|
|
105
|
+
# These are documented for transparency; the regex-based detection has limitations.
|
|
106
|
+
#
|
|
107
|
+
# IMPROVED: string_concat_loop
|
|
108
|
+
# - Pattern now requires line to START with 'for' (excludes generator expressions)
|
|
109
|
+
# - Previously matched: any(x for x in y) followed by += on next line
|
|
110
|
+
# - Now correctly excludes: generator expressions inside any(), all(), etc.
|
|
111
|
+
# - Sequential string building (code += "line1"; code += "line2") correctly ignored
|
|
112
|
+
#
|
|
113
|
+
# FALSE POSITIVE: large_list_copy
|
|
114
|
+
# - list(x) or x[:] used for defensive copying or type conversion
|
|
115
|
+
# - Often intentional to avoid mutating original data
|
|
116
|
+
# - Verdict: OK - usually intentional, low impact
|
|
117
|
+
#
|
|
118
|
+
# FALSE POSITIVE: repeated_regex (edge cases)
|
|
119
|
+
# - Single-use regex in rarely-called functions
|
|
120
|
+
# - Verdict: OK - pre-compilation only matters for hot paths
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
class PerformanceAuditWorkflow(BaseWorkflow):
|
|
124
|
+
"""Identify performance bottlenecks and optimization opportunities.
|
|
125
|
+
|
|
126
|
+
Uses static analysis to find common performance anti-patterns
|
|
127
|
+
and algorithmic complexity issues.
|
|
128
|
+
"""
|
|
129
|
+
|
|
130
|
+
name = "perf-audit"
|
|
131
|
+
description = "Identify performance bottlenecks and optimization opportunities"
|
|
132
|
+
stages = ["profile", "analyze", "hotspots", "optimize"]
|
|
133
|
+
tier_map = {
|
|
134
|
+
"profile": ModelTier.CHEAP,
|
|
135
|
+
"analyze": ModelTier.CAPABLE,
|
|
136
|
+
"hotspots": ModelTier.CAPABLE,
|
|
137
|
+
"optimize": ModelTier.PREMIUM,
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
def __init__(
|
|
141
|
+
self,
|
|
142
|
+
min_hotspots_for_premium: int = 3,
|
|
143
|
+
enable_auth_strategy: bool = True,
|
|
144
|
+
**kwargs: Any,
|
|
145
|
+
):
|
|
146
|
+
"""Initialize performance audit workflow.
|
|
147
|
+
|
|
148
|
+
Args:
|
|
149
|
+
min_hotspots_for_premium: Minimum hotspots to trigger premium optimization
|
|
150
|
+
enable_auth_strategy: Enable intelligent auth routing (default: True)
|
|
151
|
+
**kwargs: Additional arguments passed to BaseWorkflow
|
|
152
|
+
|
|
153
|
+
"""
|
|
154
|
+
super().__init__(**kwargs)
|
|
155
|
+
self.min_hotspots_for_premium = min_hotspots_for_premium
|
|
156
|
+
self.enable_auth_strategy = enable_auth_strategy
|
|
157
|
+
self._hotspot_count: int = 0
|
|
158
|
+
self._auth_mode_used: str | None = None
|
|
159
|
+
|
|
160
|
+
def should_skip_stage(self, stage_name: str, input_data: Any) -> tuple[bool, str | None]:
|
|
161
|
+
"""Downgrade optimize stage if few hotspots.
|
|
162
|
+
|
|
163
|
+
Args:
|
|
164
|
+
stage_name: Name of the stage to check
|
|
165
|
+
input_data: Current workflow data
|
|
166
|
+
|
|
167
|
+
Returns:
|
|
168
|
+
Tuple of (should_skip, reason)
|
|
169
|
+
|
|
170
|
+
"""
|
|
171
|
+
if stage_name == "optimize":
|
|
172
|
+
if self._hotspot_count < self.min_hotspots_for_premium:
|
|
173
|
+
self.tier_map["optimize"] = ModelTier.CAPABLE
|
|
174
|
+
return False, None
|
|
175
|
+
return False, None
|
|
176
|
+
|
|
177
|
+
async def run_stage(
|
|
178
|
+
self,
|
|
179
|
+
stage_name: str,
|
|
180
|
+
tier: ModelTier,
|
|
181
|
+
input_data: Any,
|
|
182
|
+
) -> tuple[Any, int, int]:
|
|
183
|
+
"""Route to specific stage implementation."""
|
|
184
|
+
if stage_name == "profile":
|
|
185
|
+
return await self._profile(input_data, tier)
|
|
186
|
+
if stage_name == "analyze":
|
|
187
|
+
return await self._analyze(input_data, tier)
|
|
188
|
+
if stage_name == "hotspots":
|
|
189
|
+
return await self._hotspots(input_data, tier)
|
|
190
|
+
if stage_name == "optimize":
|
|
191
|
+
return await self._optimize(input_data, tier)
|
|
192
|
+
raise ValueError(f"Unknown stage: {stage_name}")
|
|
193
|
+
|
|
194
|
+
async def _profile(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
|
|
195
|
+
"""Static analysis for common performance anti-patterns.
|
|
196
|
+
|
|
197
|
+
Scans code for known performance issues.
|
|
198
|
+
"""
|
|
199
|
+
target_path = input_data.get("path", ".")
|
|
200
|
+
file_types = input_data.get("file_types", [".py"])
|
|
201
|
+
|
|
202
|
+
findings: list[dict] = []
|
|
203
|
+
files_scanned = 0
|
|
204
|
+
|
|
205
|
+
target = Path(target_path)
|
|
206
|
+
|
|
207
|
+
# === AUTH STRATEGY INTEGRATION ===
|
|
208
|
+
if self.enable_auth_strategy:
|
|
209
|
+
try:
|
|
210
|
+
import logging
|
|
211
|
+
|
|
212
|
+
from attune.models import (
|
|
213
|
+
count_lines_of_code,
|
|
214
|
+
get_auth_strategy,
|
|
215
|
+
get_module_size_category,
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
logger = logging.getLogger(__name__)
|
|
219
|
+
|
|
220
|
+
# Calculate total LOC for the project/path
|
|
221
|
+
total_lines = 0
|
|
222
|
+
if target.is_file():
|
|
223
|
+
total_lines = count_lines_of_code(target)
|
|
224
|
+
elif target.is_dir():
|
|
225
|
+
# Estimate total lines for directory
|
|
226
|
+
for ext in file_types:
|
|
227
|
+
for file_path in target.rglob(f"*{ext}"):
|
|
228
|
+
if any(
|
|
229
|
+
skip in str(file_path)
|
|
230
|
+
for skip in [".git", "node_modules", "__pycache__", "venv", "test"]
|
|
231
|
+
):
|
|
232
|
+
continue
|
|
233
|
+
try:
|
|
234
|
+
total_lines += count_lines_of_code(file_path)
|
|
235
|
+
except Exception:
|
|
236
|
+
pass
|
|
237
|
+
|
|
238
|
+
if total_lines > 0:
|
|
239
|
+
strategy = get_auth_strategy()
|
|
240
|
+
recommended_mode = strategy.get_recommended_mode(total_lines)
|
|
241
|
+
self._auth_mode_used = recommended_mode.value
|
|
242
|
+
|
|
243
|
+
size_category = get_module_size_category(total_lines)
|
|
244
|
+
logger.info(
|
|
245
|
+
f"Performance audit target: {target_path} "
|
|
246
|
+
f"({total_lines:,} LOC, {size_category})"
|
|
247
|
+
)
|
|
248
|
+
logger.info(f"Recommended auth mode: {recommended_mode.value}")
|
|
249
|
+
|
|
250
|
+
cost_estimate = strategy.estimate_cost(total_lines, recommended_mode)
|
|
251
|
+
if recommended_mode.value == "subscription":
|
|
252
|
+
logger.info(
|
|
253
|
+
f"Cost estimate: ~${cost_estimate:.4f} "
|
|
254
|
+
"(significantly cheaper with subscription)"
|
|
255
|
+
)
|
|
256
|
+
else:
|
|
257
|
+
logger.info(f"Cost estimate: ~${cost_estimate:.4f} (API-based)")
|
|
258
|
+
|
|
259
|
+
except ImportError as e:
|
|
260
|
+
import logging
|
|
261
|
+
|
|
262
|
+
logger = logging.getLogger(__name__)
|
|
263
|
+
logger.debug(f"Auth strategy not available: {e}")
|
|
264
|
+
except Exception as e:
|
|
265
|
+
import logging
|
|
266
|
+
|
|
267
|
+
logger = logging.getLogger(__name__)
|
|
268
|
+
logger.warning(f"Auth strategy detection failed: {e}")
|
|
269
|
+
# === END AUTH STRATEGY INTEGRATION ===
|
|
270
|
+
if target.exists():
|
|
271
|
+
for ext in file_types:
|
|
272
|
+
for file_path in target.rglob(f"*{ext}"):
|
|
273
|
+
if any(
|
|
274
|
+
skip in str(file_path)
|
|
275
|
+
for skip in [".git", "node_modules", "__pycache__", "venv", "test"]
|
|
276
|
+
):
|
|
277
|
+
continue
|
|
278
|
+
|
|
279
|
+
try:
|
|
280
|
+
content = file_path.read_text(errors="ignore")
|
|
281
|
+
files_scanned += 1
|
|
282
|
+
|
|
283
|
+
for pattern_name, pattern_info in PERF_PATTERNS.items():
|
|
284
|
+
for pattern in pattern_info["patterns"]:
|
|
285
|
+
matches = list(re.finditer(pattern, content, re.MULTILINE))
|
|
286
|
+
for match in matches:
|
|
287
|
+
line_num = content[: match.start()].count("\n") + 1
|
|
288
|
+
findings.append(
|
|
289
|
+
{
|
|
290
|
+
"type": pattern_name,
|
|
291
|
+
"file": str(file_path),
|
|
292
|
+
"line": line_num,
|
|
293
|
+
"description": pattern_info["description"],
|
|
294
|
+
"impact": pattern_info["impact"],
|
|
295
|
+
"match": match.group()[:80],
|
|
296
|
+
},
|
|
297
|
+
)
|
|
298
|
+
except OSError:
|
|
299
|
+
continue
|
|
300
|
+
|
|
301
|
+
# Group by impact
|
|
302
|
+
by_impact: dict[str, list] = {"high": [], "medium": [], "low": []}
|
|
303
|
+
for f in findings:
|
|
304
|
+
impact = f.get("impact", "low")
|
|
305
|
+
by_impact[impact].append(f)
|
|
306
|
+
|
|
307
|
+
input_tokens = len(str(input_data)) // 4
|
|
308
|
+
output_tokens = len(str(findings)) // 4
|
|
309
|
+
|
|
310
|
+
return (
|
|
311
|
+
{
|
|
312
|
+
"findings": findings,
|
|
313
|
+
"finding_count": len(findings),
|
|
314
|
+
"files_scanned": files_scanned,
|
|
315
|
+
"by_impact": {k: len(v) for k, v in by_impact.items()},
|
|
316
|
+
**input_data,
|
|
317
|
+
},
|
|
318
|
+
input_tokens,
|
|
319
|
+
output_tokens,
|
|
320
|
+
)
|
|
321
|
+
|
|
322
|
+
async def _analyze(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
|
|
323
|
+
"""Deep analysis of algorithmic complexity.
|
|
324
|
+
|
|
325
|
+
Examines code structure for complexity issues.
|
|
326
|
+
"""
|
|
327
|
+
findings = input_data.get("findings", [])
|
|
328
|
+
|
|
329
|
+
# Group findings by file
|
|
330
|
+
by_file: dict[str, list] = {}
|
|
331
|
+
for f in findings:
|
|
332
|
+
file_path = f.get("file", "")
|
|
333
|
+
if file_path not in by_file:
|
|
334
|
+
by_file[file_path] = []
|
|
335
|
+
by_file[file_path].append(f)
|
|
336
|
+
|
|
337
|
+
# Analyze each file
|
|
338
|
+
analysis: list[dict] = []
|
|
339
|
+
for file_path, file_findings in by_file.items():
|
|
340
|
+
# Calculate file complexity score (generator expressions for memory efficiency)
|
|
341
|
+
high_count = sum(1 for f in file_findings if f["impact"] == "high")
|
|
342
|
+
medium_count = sum(1 for f in file_findings if f["impact"] == "medium")
|
|
343
|
+
low_count = sum(1 for f in file_findings if f["impact"] == "low")
|
|
344
|
+
|
|
345
|
+
complexity_score = high_count * 10 + medium_count * 5 + low_count * 1
|
|
346
|
+
|
|
347
|
+
# Identify primary concerns
|
|
348
|
+
concerns = list({f["type"] for f in file_findings})
|
|
349
|
+
|
|
350
|
+
analysis.append(
|
|
351
|
+
{
|
|
352
|
+
"file": file_path,
|
|
353
|
+
"complexity_score": complexity_score,
|
|
354
|
+
"finding_count": len(file_findings),
|
|
355
|
+
"high_impact": high_count,
|
|
356
|
+
"concerns": concerns[:5],
|
|
357
|
+
},
|
|
358
|
+
)
|
|
359
|
+
|
|
360
|
+
# Sort by complexity score
|
|
361
|
+
analysis.sort(key=lambda x: -x["complexity_score"])
|
|
362
|
+
|
|
363
|
+
input_tokens = len(str(input_data)) // 4
|
|
364
|
+
output_tokens = len(str(analysis)) // 4
|
|
365
|
+
|
|
366
|
+
return (
|
|
367
|
+
{
|
|
368
|
+
"analysis": analysis,
|
|
369
|
+
"analyzed_files": len(analysis),
|
|
370
|
+
**input_data,
|
|
371
|
+
},
|
|
372
|
+
input_tokens,
|
|
373
|
+
output_tokens,
|
|
374
|
+
)
|
|
375
|
+
|
|
376
|
+
async def _hotspots(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
|
|
377
|
+
"""Identify performance hotspots.
|
|
378
|
+
|
|
379
|
+
Pinpoints files and areas requiring immediate attention.
|
|
380
|
+
"""
|
|
381
|
+
analysis = input_data.get("analysis", [])
|
|
382
|
+
|
|
383
|
+
# Top hotspots (highest complexity scores)
|
|
384
|
+
hotspots = [a for a in analysis if a["complexity_score"] >= 10 or a["high_impact"] >= 2]
|
|
385
|
+
|
|
386
|
+
self._hotspot_count = len(hotspots)
|
|
387
|
+
|
|
388
|
+
# Categorize hotspots
|
|
389
|
+
critical = [h for h in hotspots if h["complexity_score"] >= 20]
|
|
390
|
+
moderate = [h for h in hotspots if 10 <= h["complexity_score"] < 20]
|
|
391
|
+
|
|
392
|
+
# Calculate overall perf score (inverse of problems)
|
|
393
|
+
total_score = sum(a["complexity_score"] for a in analysis)
|
|
394
|
+
max_score = len(analysis) * 30 # Max possible score
|
|
395
|
+
perf_score = max(0, 100 - int((total_score / max(max_score, 1)) * 100))
|
|
396
|
+
|
|
397
|
+
hotspot_result = {
|
|
398
|
+
"hotspots": hotspots[:15], # Top 15
|
|
399
|
+
"hotspot_count": self._hotspot_count,
|
|
400
|
+
"critical_count": len(critical),
|
|
401
|
+
"moderate_count": len(moderate),
|
|
402
|
+
"perf_score": perf_score,
|
|
403
|
+
"perf_level": (
|
|
404
|
+
"critical" if perf_score < 50 else "warning" if perf_score < 75 else "good"
|
|
405
|
+
),
|
|
406
|
+
}
|
|
407
|
+
|
|
408
|
+
input_tokens = len(str(input_data)) // 4
|
|
409
|
+
output_tokens = len(str(hotspot_result)) // 4
|
|
410
|
+
|
|
411
|
+
return (
|
|
412
|
+
{
|
|
413
|
+
"hotspot_result": hotspot_result,
|
|
414
|
+
**input_data,
|
|
415
|
+
},
|
|
416
|
+
input_tokens,
|
|
417
|
+
output_tokens,
|
|
418
|
+
)
|
|
419
|
+
|
|
420
|
+
async def _optimize(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
|
|
421
|
+
"""Generate optimization recommendations using LLM.
|
|
422
|
+
|
|
423
|
+
Creates actionable recommendations for performance improvements.
|
|
424
|
+
|
|
425
|
+
Supports XML-enhanced prompts when enabled in workflow config.
|
|
426
|
+
"""
|
|
427
|
+
hotspot_result = input_data.get("hotspot_result", {})
|
|
428
|
+
hotspots = hotspot_result.get("hotspots", [])
|
|
429
|
+
findings = input_data.get("findings", [])
|
|
430
|
+
target = input_data.get("target", "")
|
|
431
|
+
|
|
432
|
+
# Build hotspots summary for LLM
|
|
433
|
+
hotspots_summary = []
|
|
434
|
+
for h in hotspots[:10]:
|
|
435
|
+
hotspots_summary.append(
|
|
436
|
+
f"- {h.get('file')}: score={h.get('complexity_score', 0)}, "
|
|
437
|
+
f"concerns={', '.join(h.get('concerns', []))}",
|
|
438
|
+
)
|
|
439
|
+
|
|
440
|
+
# Summary of most common issues
|
|
441
|
+
issue_counts: dict[str, int] = {}
|
|
442
|
+
for f in findings:
|
|
443
|
+
t = f.get("type", "unknown")
|
|
444
|
+
issue_counts[t] = issue_counts.get(t, 0) + 1
|
|
445
|
+
top_issues = heapq.nlargest(5, issue_counts.items(), key=lambda x: x[1])
|
|
446
|
+
|
|
447
|
+
# Build input payload for prompt
|
|
448
|
+
input_payload = f"""Target: {target or "codebase"}
|
|
449
|
+
|
|
450
|
+
Performance Score: {hotspot_result.get("perf_score", 0)}/100
|
|
451
|
+
Performance Level: {hotspot_result.get("perf_level", "unknown")}
|
|
452
|
+
|
|
453
|
+
Hotspots:
|
|
454
|
+
{chr(10).join(hotspots_summary) if hotspots_summary else "No hotspots identified"}
|
|
455
|
+
|
|
456
|
+
Top Issues:
|
|
457
|
+
{json.dumps([{"type": t, "count": c} for t, c in top_issues], indent=2)}"""
|
|
458
|
+
|
|
459
|
+
# Check if XML prompts are enabled
|
|
460
|
+
if self._is_xml_enabled():
|
|
461
|
+
# Use XML-enhanced prompt
|
|
462
|
+
user_message = self._render_xml_prompt(
|
|
463
|
+
role="performance engineer specializing in optimization",
|
|
464
|
+
goal="Generate comprehensive optimization recommendations for performance issues",
|
|
465
|
+
instructions=[
|
|
466
|
+
"Analyze each performance hotspot and its concerns",
|
|
467
|
+
"Provide specific optimization strategies with code examples",
|
|
468
|
+
"Estimate the impact of each optimization (high/medium/low)",
|
|
469
|
+
"Prioritize recommendations by potential performance gain",
|
|
470
|
+
"Include before/after code patterns where helpful",
|
|
471
|
+
],
|
|
472
|
+
constraints=[
|
|
473
|
+
"Be specific about which files and patterns to optimize",
|
|
474
|
+
"Include actionable code changes",
|
|
475
|
+
"Focus on high-impact optimizations first",
|
|
476
|
+
],
|
|
477
|
+
input_type="performance_hotspots",
|
|
478
|
+
input_payload=input_payload,
|
|
479
|
+
extra={
|
|
480
|
+
"perf_score": hotspot_result.get("perf_score", 0),
|
|
481
|
+
"hotspot_count": len(hotspots),
|
|
482
|
+
},
|
|
483
|
+
)
|
|
484
|
+
system = None # XML prompt includes all context
|
|
485
|
+
else:
|
|
486
|
+
# Use legacy plain text prompts
|
|
487
|
+
system = """You are a performance engineer specializing in code optimization.
|
|
488
|
+
Analyze the identified performance hotspots and generate actionable recommendations.
|
|
489
|
+
|
|
490
|
+
For each hotspot:
|
|
491
|
+
1. Explain why the pattern causes performance issues
|
|
492
|
+
2. Provide specific optimization strategies with code examples
|
|
493
|
+
3. Estimate the impact of the optimization
|
|
494
|
+
|
|
495
|
+
Prioritize by potential performance gain."""
|
|
496
|
+
|
|
497
|
+
user_message = f"""Generate optimization recommendations for these performance issues:
|
|
498
|
+
|
|
499
|
+
{input_payload}
|
|
500
|
+
|
|
501
|
+
Provide detailed optimization strategies."""
|
|
502
|
+
|
|
503
|
+
# Try executor-based execution first (Phase 3 pattern)
|
|
504
|
+
if self._executor is not None or self._api_key:
|
|
505
|
+
try:
|
|
506
|
+
step = PERF_AUDIT_STEPS["optimize"]
|
|
507
|
+
response, input_tokens, output_tokens, cost = await self.run_step_with_executor(
|
|
508
|
+
step=step,
|
|
509
|
+
prompt=user_message,
|
|
510
|
+
system=system,
|
|
511
|
+
)
|
|
512
|
+
except Exception:
|
|
513
|
+
# Fall back to legacy _call_llm if executor fails
|
|
514
|
+
response, input_tokens, output_tokens = await self._call_llm(
|
|
515
|
+
tier,
|
|
516
|
+
system or "",
|
|
517
|
+
user_message,
|
|
518
|
+
max_tokens=3000,
|
|
519
|
+
)
|
|
520
|
+
else:
|
|
521
|
+
# Legacy path for backward compatibility
|
|
522
|
+
response, input_tokens, output_tokens = await self._call_llm(
|
|
523
|
+
tier,
|
|
524
|
+
system or "",
|
|
525
|
+
user_message,
|
|
526
|
+
max_tokens=3000,
|
|
527
|
+
)
|
|
528
|
+
|
|
529
|
+
# Parse XML response if enforcement is enabled
|
|
530
|
+
parsed_data = self._parse_xml_response(response)
|
|
531
|
+
|
|
532
|
+
result = {
|
|
533
|
+
"optimization_plan": response,
|
|
534
|
+
"recommendation_count": len(hotspots),
|
|
535
|
+
"top_issues": [{"type": t, "count": c} for t, c in top_issues],
|
|
536
|
+
"perf_score": hotspot_result.get("perf_score", 0),
|
|
537
|
+
"perf_level": hotspot_result.get("perf_level", "unknown"),
|
|
538
|
+
"model_tier_used": tier.value,
|
|
539
|
+
"auth_mode_used": self._auth_mode_used,
|
|
540
|
+
}
|
|
541
|
+
|
|
542
|
+
# Merge parsed XML data if available
|
|
543
|
+
if parsed_data.get("xml_parsed"):
|
|
544
|
+
result.update(
|
|
545
|
+
{
|
|
546
|
+
"xml_parsed": True,
|
|
547
|
+
"summary": parsed_data.get("summary"),
|
|
548
|
+
"findings": parsed_data.get("findings", []),
|
|
549
|
+
"checklist": parsed_data.get("checklist", []),
|
|
550
|
+
},
|
|
551
|
+
)
|
|
552
|
+
|
|
553
|
+
# Add formatted report for human readability
|
|
554
|
+
result["formatted_report"] = format_perf_audit_report(result, input_data)
|
|
555
|
+
|
|
556
|
+
# Add structured WorkflowReport for Rich rendering
|
|
557
|
+
result["workflow_report"] = create_perf_audit_workflow_report(result, input_data)
|
|
558
|
+
|
|
559
|
+
return (result, input_tokens, output_tokens)
|
|
560
|
+
|
|
561
|
+
def _get_optimization_action(self, concern: str) -> dict | None:
|
|
562
|
+
"""Generate specific optimization action for a concern type."""
|
|
563
|
+
actions = {
|
|
564
|
+
"n_plus_one": {
|
|
565
|
+
"action": "Batch database queries",
|
|
566
|
+
"description": "Use prefetch_related/select_related or batch queries",
|
|
567
|
+
"estimated_impact": "high",
|
|
568
|
+
},
|
|
569
|
+
"sync_in_async": {
|
|
570
|
+
"action": "Use async alternatives",
|
|
571
|
+
"description": "Replace sync operations with async versions",
|
|
572
|
+
"estimated_impact": "high",
|
|
573
|
+
},
|
|
574
|
+
"string_concat_loop": {
|
|
575
|
+
"action": "Use str.join()",
|
|
576
|
+
"description": "Build list of strings and join at the end instead of concatenating",
|
|
577
|
+
"estimated_impact": "medium",
|
|
578
|
+
},
|
|
579
|
+
"repeated_regex": {
|
|
580
|
+
"action": "Pre-compile regex",
|
|
581
|
+
"description": "Use re.compile() and reuse the compiled pattern",
|
|
582
|
+
"estimated_impact": "medium",
|
|
583
|
+
},
|
|
584
|
+
"nested_loops": {
|
|
585
|
+
"action": "Optimize algorithm",
|
|
586
|
+
"description": "Consider using sets, dicts, or itertools to reduce complexity",
|
|
587
|
+
"estimated_impact": "high",
|
|
588
|
+
},
|
|
589
|
+
"list_comprehension_in_loop": {
|
|
590
|
+
"action": "Move comprehension outside loop",
|
|
591
|
+
"description": "Create the list once before the loop",
|
|
592
|
+
"estimated_impact": "medium",
|
|
593
|
+
},
|
|
594
|
+
"large_list_copy": {
|
|
595
|
+
"action": "Use iterators",
|
|
596
|
+
"description": "Consider using iterators instead of copying entire lists",
|
|
597
|
+
"estimated_impact": "low",
|
|
598
|
+
},
|
|
599
|
+
"global_import": {
|
|
600
|
+
"action": "Use specific imports",
|
|
601
|
+
"description": "Import only needed names to reduce memory and startup time",
|
|
602
|
+
"estimated_impact": "low",
|
|
603
|
+
},
|
|
604
|
+
}
|
|
605
|
+
return actions.get(concern)
|
|
606
|
+
|
|
607
|
+
|
|
608
|
+
def create_perf_audit_workflow_report(result: dict, input_data: dict) -> WorkflowReport:
|
|
609
|
+
"""Create a WorkflowReport from performance audit results.
|
|
610
|
+
|
|
611
|
+
Args:
|
|
612
|
+
result: The optimize stage result
|
|
613
|
+
input_data: Input data from previous stages
|
|
614
|
+
|
|
615
|
+
Returns:
|
|
616
|
+
WorkflowReport instance for Rich or plain text rendering
|
|
617
|
+
"""
|
|
618
|
+
perf_score = result.get("perf_score", 0)
|
|
619
|
+
perf_level = result.get("perf_level", "unknown")
|
|
620
|
+
|
|
621
|
+
# Determine report level
|
|
622
|
+
if perf_score >= 85:
|
|
623
|
+
level = "success"
|
|
624
|
+
elif perf_score >= 50:
|
|
625
|
+
level = "warning"
|
|
626
|
+
else:
|
|
627
|
+
level = "error"
|
|
628
|
+
|
|
629
|
+
# Build summary
|
|
630
|
+
files_scanned = input_data.get("files_scanned", 0)
|
|
631
|
+
finding_count = input_data.get("finding_count", 0)
|
|
632
|
+
by_impact = input_data.get("by_impact", {})
|
|
633
|
+
|
|
634
|
+
summary = (
|
|
635
|
+
f"Scanned {files_scanned} files, found {finding_count} issues. "
|
|
636
|
+
f"High: {by_impact.get('high', 0)}, Medium: {by_impact.get('medium', 0)}, "
|
|
637
|
+
f"Low: {by_impact.get('low', 0)}"
|
|
638
|
+
)
|
|
639
|
+
|
|
640
|
+
report = WorkflowReport(
|
|
641
|
+
title="Performance Audit Report",
|
|
642
|
+
summary=summary,
|
|
643
|
+
score=perf_score,
|
|
644
|
+
level=level,
|
|
645
|
+
metadata={
|
|
646
|
+
"perf_level": perf_level,
|
|
647
|
+
"files_scanned": files_scanned,
|
|
648
|
+
"finding_count": finding_count,
|
|
649
|
+
},
|
|
650
|
+
)
|
|
651
|
+
|
|
652
|
+
# Add top issues section
|
|
653
|
+
top_issues = result.get("top_issues", [])
|
|
654
|
+
if top_issues:
|
|
655
|
+
issues_content = {
|
|
656
|
+
issue.get("type", "unknown").replace("_", " ").title(): f"{issue.get('count', 0)} occurrences"
|
|
657
|
+
for issue in top_issues
|
|
658
|
+
}
|
|
659
|
+
report.add_section("Top Performance Issues", issues_content)
|
|
660
|
+
|
|
661
|
+
# Add hotspots section
|
|
662
|
+
hotspot_result = input_data.get("hotspot_result", {})
|
|
663
|
+
hotspots = hotspot_result.get("hotspots", [])
|
|
664
|
+
if hotspots:
|
|
665
|
+
hotspot_content = {
|
|
666
|
+
"Critical Hotspots": hotspot_result.get("critical_count", 0),
|
|
667
|
+
"Moderate Hotspots": hotspot_result.get("moderate_count", 0),
|
|
668
|
+
}
|
|
669
|
+
report.add_section("Hotspot Summary", hotspot_content)
|
|
670
|
+
|
|
671
|
+
# Add findings section
|
|
672
|
+
findings = input_data.get("findings", [])
|
|
673
|
+
high_impact = [f for f in findings if f.get("impact") == "high"]
|
|
674
|
+
if high_impact:
|
|
675
|
+
finding_objs = [
|
|
676
|
+
Finding(
|
|
677
|
+
severity="high",
|
|
678
|
+
file=f.get("file", "unknown"),
|
|
679
|
+
line=f.get("line"),
|
|
680
|
+
message=f.get("description", ""),
|
|
681
|
+
)
|
|
682
|
+
for f in high_impact[:10]
|
|
683
|
+
]
|
|
684
|
+
report.add_section("High Impact Findings", finding_objs, style="error")
|
|
685
|
+
|
|
686
|
+
# Add recommendations section
|
|
687
|
+
optimization_plan = result.get("optimization_plan", "")
|
|
688
|
+
if optimization_plan:
|
|
689
|
+
report.add_section("Optimization Recommendations", optimization_plan)
|
|
690
|
+
|
|
691
|
+
return report
|
|
692
|
+
|
|
693
|
+
|
|
694
|
+
def format_perf_audit_report(result: dict, input_data: dict) -> str:
|
|
695
|
+
"""Format performance audit output as a human-readable report.
|
|
696
|
+
|
|
697
|
+
Args:
|
|
698
|
+
result: The optimize stage result
|
|
699
|
+
input_data: Input data from previous stages
|
|
700
|
+
|
|
701
|
+
Returns:
|
|
702
|
+
Formatted report string
|
|
703
|
+
|
|
704
|
+
"""
|
|
705
|
+
lines = []
|
|
706
|
+
|
|
707
|
+
# Header with performance score
|
|
708
|
+
perf_score = result.get("perf_score", 0)
|
|
709
|
+
perf_level = result.get("perf_level", "unknown").upper()
|
|
710
|
+
|
|
711
|
+
if perf_score >= 85:
|
|
712
|
+
perf_icon = "🟢"
|
|
713
|
+
perf_text = "EXCELLENT"
|
|
714
|
+
elif perf_score >= 75:
|
|
715
|
+
perf_icon = "🟡"
|
|
716
|
+
perf_text = "GOOD"
|
|
717
|
+
elif perf_score >= 50:
|
|
718
|
+
perf_icon = "🟠"
|
|
719
|
+
perf_text = "NEEDS OPTIMIZATION"
|
|
720
|
+
else:
|
|
721
|
+
perf_icon = "🔴"
|
|
722
|
+
perf_text = "CRITICAL"
|
|
723
|
+
|
|
724
|
+
lines.append("=" * 60)
|
|
725
|
+
lines.append("PERFORMANCE AUDIT REPORT")
|
|
726
|
+
lines.append("=" * 60)
|
|
727
|
+
lines.append("")
|
|
728
|
+
lines.append(f"Performance Score: {perf_icon} {perf_score}/100 ({perf_text})")
|
|
729
|
+
lines.append(f"Performance Level: {perf_level}")
|
|
730
|
+
lines.append("")
|
|
731
|
+
|
|
732
|
+
# Scan summary
|
|
733
|
+
files_scanned = input_data.get("files_scanned", 0)
|
|
734
|
+
finding_count = input_data.get("finding_count", 0)
|
|
735
|
+
by_impact = input_data.get("by_impact", {})
|
|
736
|
+
|
|
737
|
+
lines.append("-" * 60)
|
|
738
|
+
lines.append("SCAN SUMMARY")
|
|
739
|
+
lines.append("-" * 60)
|
|
740
|
+
lines.append(f"Files Scanned: {files_scanned}")
|
|
741
|
+
lines.append(f"Issues Found: {finding_count}")
|
|
742
|
+
lines.append("")
|
|
743
|
+
lines.append("Issues by Impact:")
|
|
744
|
+
lines.append(f" 🔴 High: {by_impact.get('high', 0)}")
|
|
745
|
+
lines.append(f" 🟡 Medium: {by_impact.get('medium', 0)}")
|
|
746
|
+
lines.append(f" 🟢 Low: {by_impact.get('low', 0)}")
|
|
747
|
+
lines.append("")
|
|
748
|
+
|
|
749
|
+
# Top issues
|
|
750
|
+
top_issues = result.get("top_issues", [])
|
|
751
|
+
if top_issues:
|
|
752
|
+
lines.append("-" * 60)
|
|
753
|
+
lines.append("TOP PERFORMANCE ISSUES")
|
|
754
|
+
lines.append("-" * 60)
|
|
755
|
+
for issue in top_issues:
|
|
756
|
+
issue_type = issue.get("type", "unknown").replace("_", " ").title()
|
|
757
|
+
count = issue.get("count", 0)
|
|
758
|
+
lines.append(f" • {issue_type}: {count} occurrences")
|
|
759
|
+
lines.append("")
|
|
760
|
+
|
|
761
|
+
# Hotspots
|
|
762
|
+
hotspot_result = input_data.get("hotspot_result", {})
|
|
763
|
+
hotspots = hotspot_result.get("hotspots", [])
|
|
764
|
+
if hotspots:
|
|
765
|
+
lines.append("-" * 60)
|
|
766
|
+
lines.append("PERFORMANCE HOTSPOTS")
|
|
767
|
+
lines.append("-" * 60)
|
|
768
|
+
lines.append(f"Critical Hotspots: {hotspot_result.get('critical_count', 0)}")
|
|
769
|
+
lines.append(f"Moderate Hotspots: {hotspot_result.get('moderate_count', 0)}")
|
|
770
|
+
lines.append("")
|
|
771
|
+
for h in hotspots[:8]:
|
|
772
|
+
file_path = h.get("file", "unknown")
|
|
773
|
+
score = h.get("complexity_score", 0)
|
|
774
|
+
concerns = h.get("concerns", [])
|
|
775
|
+
score_icon = "🔴" if score >= 20 else "🟠" if score >= 10 else "🟡"
|
|
776
|
+
lines.append(f" {score_icon} {file_path}")
|
|
777
|
+
lines.append(f" Score: {score} | Concerns: {', '.join(concerns[:3])}")
|
|
778
|
+
lines.append("")
|
|
779
|
+
|
|
780
|
+
# High impact findings
|
|
781
|
+
findings = input_data.get("findings", [])
|
|
782
|
+
high_impact = [f for f in findings if f.get("impact") == "high"]
|
|
783
|
+
if high_impact:
|
|
784
|
+
lines.append("-" * 60)
|
|
785
|
+
lines.append("HIGH IMPACT FINDINGS")
|
|
786
|
+
lines.append("-" * 60)
|
|
787
|
+
for f in high_impact[:10]:
|
|
788
|
+
file_path = f.get("file", "unknown")
|
|
789
|
+
line = f.get("line", "?")
|
|
790
|
+
desc = f.get("description", "Unknown issue")
|
|
791
|
+
lines.append(f" 🔴 {file_path}:{line}")
|
|
792
|
+
lines.append(f" {desc}")
|
|
793
|
+
lines.append("")
|
|
794
|
+
|
|
795
|
+
# Optimization recommendations
|
|
796
|
+
optimization_plan = result.get("optimization_plan", "")
|
|
797
|
+
if optimization_plan:
|
|
798
|
+
lines.append("-" * 60)
|
|
799
|
+
lines.append("OPTIMIZATION RECOMMENDATIONS")
|
|
800
|
+
lines.append("-" * 60)
|
|
801
|
+
lines.append(optimization_plan)
|
|
802
|
+
lines.append("")
|
|
803
|
+
|
|
804
|
+
# Footer
|
|
805
|
+
lines.append("=" * 60)
|
|
806
|
+
model_tier = result.get("model_tier_used", "unknown")
|
|
807
|
+
rec_count = result.get("recommendation_count", 0)
|
|
808
|
+
lines.append(f"Analyzed {rec_count} hotspots using {model_tier} tier model")
|
|
809
|
+
lines.append("=" * 60)
|
|
810
|
+
|
|
811
|
+
return "\n".join(lines)
|
|
812
|
+
|
|
813
|
+
|
|
814
|
+
def main():
|
|
815
|
+
"""CLI entry point for performance audit workflow."""
|
|
816
|
+
import asyncio
|
|
817
|
+
|
|
818
|
+
async def run():
|
|
819
|
+
workflow = PerformanceAuditWorkflow()
|
|
820
|
+
result = await workflow.execute(path=".", file_types=[".py"])
|
|
821
|
+
|
|
822
|
+
output = result.final_output
|
|
823
|
+
|
|
824
|
+
# Try Rich output first
|
|
825
|
+
console = get_console()
|
|
826
|
+
workflow_report = output.get("workflow_report")
|
|
827
|
+
|
|
828
|
+
if console and workflow_report:
|
|
829
|
+
# Render with Rich
|
|
830
|
+
workflow_report.render(console, use_rich=True)
|
|
831
|
+
console.print()
|
|
832
|
+
console.print(f"[dim]Provider: {result.provider}[/dim]")
|
|
833
|
+
console.print(f"[dim]Cost: ${result.cost_report.total_cost:.4f}[/dim]")
|
|
834
|
+
savings = result.cost_report.savings
|
|
835
|
+
pct = result.cost_report.savings_percent
|
|
836
|
+
console.print(f"[dim]Savings: ${savings:.4f} ({pct:.1f}%)[/dim]")
|
|
837
|
+
else:
|
|
838
|
+
# Fallback to plain text
|
|
839
|
+
print("\nPerformance Audit Results")
|
|
840
|
+
print("=" * 50)
|
|
841
|
+
print(f"Provider: {result.provider}")
|
|
842
|
+
print(f"Success: {result.success}")
|
|
843
|
+
|
|
844
|
+
print(f"Performance Level: {output.get('perf_level', 'N/A')}")
|
|
845
|
+
print(f"Performance Score: {output.get('perf_score', 0)}/100")
|
|
846
|
+
print(f"Recommendations: {output.get('recommendation_count', 0)}")
|
|
847
|
+
|
|
848
|
+
if output.get("top_issues"):
|
|
849
|
+
print("\nTop Issues:")
|
|
850
|
+
for issue in output["top_issues"]:
|
|
851
|
+
print(f" - {issue['type']}: {issue['count']} occurrences")
|
|
852
|
+
|
|
853
|
+
print("\nCost Report:")
|
|
854
|
+
print(f" Total Cost: ${result.cost_report.total_cost:.4f}")
|
|
855
|
+
savings = result.cost_report.savings
|
|
856
|
+
pct = result.cost_report.savings_percent
|
|
857
|
+
print(f" Savings: ${savings:.4f} ({pct:.1f}%)")
|
|
858
|
+
|
|
859
|
+
asyncio.run(run())
|
|
860
|
+
|
|
861
|
+
|
|
862
|
+
if __name__ == "__main__":
|
|
863
|
+
main()
|