attune-ai 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/__init__.py +358 -0
- attune/adaptive/__init__.py +13 -0
- attune/adaptive/task_complexity.py +127 -0
- attune/agent_monitoring.py +414 -0
- attune/cache/__init__.py +117 -0
- attune/cache/base.py +166 -0
- attune/cache/dependency_manager.py +256 -0
- attune/cache/hash_only.py +251 -0
- attune/cache/hybrid.py +457 -0
- attune/cache/storage.py +285 -0
- attune/cache_monitor.py +356 -0
- attune/cache_stats.py +298 -0
- attune/cli/__init__.py +152 -0
- attune/cli/__main__.py +12 -0
- attune/cli/commands/__init__.py +1 -0
- attune/cli/commands/batch.py +264 -0
- attune/cli/commands/cache.py +248 -0
- attune/cli/commands/help.py +331 -0
- attune/cli/commands/info.py +140 -0
- attune/cli/commands/inspect.py +436 -0
- attune/cli/commands/inspection.py +57 -0
- attune/cli/commands/memory.py +48 -0
- attune/cli/commands/metrics.py +92 -0
- attune/cli/commands/orchestrate.py +184 -0
- attune/cli/commands/patterns.py +207 -0
- attune/cli/commands/profiling.py +202 -0
- attune/cli/commands/provider.py +98 -0
- attune/cli/commands/routing.py +285 -0
- attune/cli/commands/setup.py +96 -0
- attune/cli/commands/status.py +235 -0
- attune/cli/commands/sync.py +166 -0
- attune/cli/commands/tier.py +121 -0
- attune/cli/commands/utilities.py +114 -0
- attune/cli/commands/workflow.py +579 -0
- attune/cli/core.py +32 -0
- attune/cli/parsers/__init__.py +68 -0
- attune/cli/parsers/batch.py +118 -0
- attune/cli/parsers/cache.py +65 -0
- attune/cli/parsers/help.py +41 -0
- attune/cli/parsers/info.py +26 -0
- attune/cli/parsers/inspect.py +66 -0
- attune/cli/parsers/metrics.py +42 -0
- attune/cli/parsers/orchestrate.py +61 -0
- attune/cli/parsers/patterns.py +54 -0
- attune/cli/parsers/provider.py +40 -0
- attune/cli/parsers/routing.py +110 -0
- attune/cli/parsers/setup.py +42 -0
- attune/cli/parsers/status.py +47 -0
- attune/cli/parsers/sync.py +31 -0
- attune/cli/parsers/tier.py +33 -0
- attune/cli/parsers/workflow.py +77 -0
- attune/cli/utils/__init__.py +1 -0
- attune/cli/utils/data.py +242 -0
- attune/cli/utils/helpers.py +68 -0
- attune/cli_legacy.py +3957 -0
- attune/cli_minimal.py +1159 -0
- attune/cli_router.py +437 -0
- attune/cli_unified.py +814 -0
- attune/config/__init__.py +66 -0
- attune/config/xml_config.py +286 -0
- attune/config.py +545 -0
- attune/coordination.py +870 -0
- attune/core.py +1511 -0
- attune/core_modules/__init__.py +15 -0
- attune/cost_tracker.py +626 -0
- attune/dashboard/__init__.py +41 -0
- attune/dashboard/app.py +512 -0
- attune/dashboard/simple_server.py +435 -0
- attune/dashboard/standalone_server.py +547 -0
- attune/discovery.py +306 -0
- attune/emergence.py +306 -0
- attune/exceptions.py +123 -0
- attune/feedback_loops.py +373 -0
- attune/hot_reload/README.md +473 -0
- attune/hot_reload/__init__.py +62 -0
- attune/hot_reload/config.py +83 -0
- attune/hot_reload/integration.py +229 -0
- attune/hot_reload/reloader.py +298 -0
- attune/hot_reload/watcher.py +183 -0
- attune/hot_reload/websocket.py +177 -0
- attune/levels.py +577 -0
- attune/leverage_points.py +441 -0
- attune/logging_config.py +261 -0
- attune/mcp/__init__.py +10 -0
- attune/mcp/server.py +506 -0
- attune/memory/__init__.py +237 -0
- attune/memory/claude_memory.py +469 -0
- attune/memory/config.py +224 -0
- attune/memory/control_panel.py +1290 -0
- attune/memory/control_panel_support.py +145 -0
- attune/memory/cross_session.py +845 -0
- attune/memory/edges.py +179 -0
- attune/memory/encryption.py +159 -0
- attune/memory/file_session.py +770 -0
- attune/memory/graph.py +570 -0
- attune/memory/long_term.py +913 -0
- attune/memory/long_term_types.py +99 -0
- attune/memory/mixins/__init__.py +25 -0
- attune/memory/mixins/backend_init_mixin.py +249 -0
- attune/memory/mixins/capabilities_mixin.py +208 -0
- attune/memory/mixins/handoff_mixin.py +208 -0
- attune/memory/mixins/lifecycle_mixin.py +49 -0
- attune/memory/mixins/long_term_mixin.py +352 -0
- attune/memory/mixins/promotion_mixin.py +109 -0
- attune/memory/mixins/short_term_mixin.py +182 -0
- attune/memory/nodes.py +179 -0
- attune/memory/redis_bootstrap.py +540 -0
- attune/memory/security/__init__.py +31 -0
- attune/memory/security/audit_logger.py +932 -0
- attune/memory/security/pii_scrubber.py +640 -0
- attune/memory/security/secrets_detector.py +678 -0
- attune/memory/short_term.py +2192 -0
- attune/memory/simple_storage.py +302 -0
- attune/memory/storage/__init__.py +15 -0
- attune/memory/storage_backend.py +167 -0
- attune/memory/summary_index.py +583 -0
- attune/memory/types.py +446 -0
- attune/memory/unified.py +182 -0
- attune/meta_workflows/__init__.py +74 -0
- attune/meta_workflows/agent_creator.py +248 -0
- attune/meta_workflows/builtin_templates.py +567 -0
- attune/meta_workflows/cli_commands/__init__.py +56 -0
- attune/meta_workflows/cli_commands/agent_commands.py +321 -0
- attune/meta_workflows/cli_commands/analytics_commands.py +442 -0
- attune/meta_workflows/cli_commands/config_commands.py +232 -0
- attune/meta_workflows/cli_commands/memory_commands.py +182 -0
- attune/meta_workflows/cli_commands/template_commands.py +354 -0
- attune/meta_workflows/cli_commands/workflow_commands.py +382 -0
- attune/meta_workflows/cli_meta_workflows.py +59 -0
- attune/meta_workflows/form_engine.py +292 -0
- attune/meta_workflows/intent_detector.py +409 -0
- attune/meta_workflows/models.py +569 -0
- attune/meta_workflows/pattern_learner.py +738 -0
- attune/meta_workflows/plan_generator.py +384 -0
- attune/meta_workflows/session_context.py +397 -0
- attune/meta_workflows/template_registry.py +229 -0
- attune/meta_workflows/workflow.py +984 -0
- attune/metrics/__init__.py +12 -0
- attune/metrics/collector.py +31 -0
- attune/metrics/prompt_metrics.py +194 -0
- attune/models/__init__.py +172 -0
- attune/models/__main__.py +13 -0
- attune/models/adaptive_routing.py +437 -0
- attune/models/auth_cli.py +444 -0
- attune/models/auth_strategy.py +450 -0
- attune/models/cli.py +655 -0
- attune/models/empathy_executor.py +354 -0
- attune/models/executor.py +257 -0
- attune/models/fallback.py +762 -0
- attune/models/provider_config.py +282 -0
- attune/models/registry.py +472 -0
- attune/models/tasks.py +359 -0
- attune/models/telemetry/__init__.py +71 -0
- attune/models/telemetry/analytics.py +594 -0
- attune/models/telemetry/backend.py +196 -0
- attune/models/telemetry/data_models.py +431 -0
- attune/models/telemetry/storage.py +489 -0
- attune/models/token_estimator.py +420 -0
- attune/models/validation.py +280 -0
- attune/monitoring/__init__.py +52 -0
- attune/monitoring/alerts.py +946 -0
- attune/monitoring/alerts_cli.py +448 -0
- attune/monitoring/multi_backend.py +271 -0
- attune/monitoring/otel_backend.py +362 -0
- attune/optimization/__init__.py +19 -0
- attune/optimization/context_optimizer.py +272 -0
- attune/orchestration/__init__.py +67 -0
- attune/orchestration/agent_templates.py +707 -0
- attune/orchestration/config_store.py +499 -0
- attune/orchestration/execution_strategies.py +2111 -0
- attune/orchestration/meta_orchestrator.py +1168 -0
- attune/orchestration/pattern_learner.py +696 -0
- attune/orchestration/real_tools.py +931 -0
- attune/pattern_cache.py +187 -0
- attune/pattern_library.py +542 -0
- attune/patterns/debugging/all_patterns.json +81 -0
- attune/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- attune/patterns/refactoring_memory.json +89 -0
- attune/persistence.py +564 -0
- attune/platform_utils.py +265 -0
- attune/plugins/__init__.py +28 -0
- attune/plugins/base.py +361 -0
- attune/plugins/registry.py +268 -0
- attune/project_index/__init__.py +32 -0
- attune/project_index/cli.py +335 -0
- attune/project_index/index.py +667 -0
- attune/project_index/models.py +504 -0
- attune/project_index/reports.py +474 -0
- attune/project_index/scanner.py +777 -0
- attune/project_index/scanner_parallel.py +291 -0
- attune/prompts/__init__.py +61 -0
- attune/prompts/config.py +77 -0
- attune/prompts/context.py +177 -0
- attune/prompts/parser.py +285 -0
- attune/prompts/registry.py +313 -0
- attune/prompts/templates.py +208 -0
- attune/redis_config.py +302 -0
- attune/redis_memory.py +799 -0
- attune/resilience/__init__.py +56 -0
- attune/resilience/circuit_breaker.py +256 -0
- attune/resilience/fallback.py +179 -0
- attune/resilience/health.py +300 -0
- attune/resilience/retry.py +209 -0
- attune/resilience/timeout.py +135 -0
- attune/routing/__init__.py +43 -0
- attune/routing/chain_executor.py +433 -0
- attune/routing/classifier.py +217 -0
- attune/routing/smart_router.py +234 -0
- attune/routing/workflow_registry.py +343 -0
- attune/scaffolding/README.md +589 -0
- attune/scaffolding/__init__.py +35 -0
- attune/scaffolding/__main__.py +14 -0
- attune/scaffolding/cli.py +240 -0
- attune/scaffolding/templates/base_wizard.py.jinja2 +121 -0
- attune/scaffolding/templates/coach_wizard.py.jinja2 +321 -0
- attune/scaffolding/templates/domain_wizard.py.jinja2 +408 -0
- attune/scaffolding/templates/linear_flow_wizard.py.jinja2 +203 -0
- attune/socratic/__init__.py +256 -0
- attune/socratic/ab_testing.py +958 -0
- attune/socratic/blueprint.py +533 -0
- attune/socratic/cli.py +703 -0
- attune/socratic/collaboration.py +1114 -0
- attune/socratic/domain_templates.py +924 -0
- attune/socratic/embeddings.py +738 -0
- attune/socratic/engine.py +794 -0
- attune/socratic/explainer.py +682 -0
- attune/socratic/feedback.py +772 -0
- attune/socratic/forms.py +629 -0
- attune/socratic/generator.py +732 -0
- attune/socratic/llm_analyzer.py +637 -0
- attune/socratic/mcp_server.py +702 -0
- attune/socratic/session.py +312 -0
- attune/socratic/storage.py +667 -0
- attune/socratic/success.py +730 -0
- attune/socratic/visual_editor.py +860 -0
- attune/socratic/web_ui.py +958 -0
- attune/telemetry/__init__.py +39 -0
- attune/telemetry/agent_coordination.py +475 -0
- attune/telemetry/agent_tracking.py +367 -0
- attune/telemetry/approval_gates.py +545 -0
- attune/telemetry/cli.py +1231 -0
- attune/telemetry/commands/__init__.py +14 -0
- attune/telemetry/commands/dashboard_commands.py +696 -0
- attune/telemetry/event_streaming.py +409 -0
- attune/telemetry/feedback_loop.py +567 -0
- attune/telemetry/usage_tracker.py +591 -0
- attune/templates.py +754 -0
- attune/test_generator/__init__.py +38 -0
- attune/test_generator/__main__.py +14 -0
- attune/test_generator/cli.py +234 -0
- attune/test_generator/generator.py +355 -0
- attune/test_generator/risk_analyzer.py +216 -0
- attune/test_generator/templates/unit_test.py.jinja2 +272 -0
- attune/tier_recommender.py +384 -0
- attune/tools.py +183 -0
- attune/trust/__init__.py +28 -0
- attune/trust/circuit_breaker.py +579 -0
- attune/trust_building.py +527 -0
- attune/validation/__init__.py +19 -0
- attune/validation/xml_validator.py +281 -0
- attune/vscode_bridge.py +173 -0
- attune/workflow_commands.py +780 -0
- attune/workflow_patterns/__init__.py +33 -0
- attune/workflow_patterns/behavior.py +249 -0
- attune/workflow_patterns/core.py +76 -0
- attune/workflow_patterns/output.py +99 -0
- attune/workflow_patterns/registry.py +255 -0
- attune/workflow_patterns/structural.py +288 -0
- attune/workflows/__init__.py +539 -0
- attune/workflows/autonomous_test_gen.py +1268 -0
- attune/workflows/base.py +2667 -0
- attune/workflows/batch_processing.py +342 -0
- attune/workflows/bug_predict.py +1084 -0
- attune/workflows/builder.py +273 -0
- attune/workflows/caching.py +253 -0
- attune/workflows/code_review.py +1048 -0
- attune/workflows/code_review_adapters.py +312 -0
- attune/workflows/code_review_pipeline.py +722 -0
- attune/workflows/config.py +645 -0
- attune/workflows/dependency_check.py +644 -0
- attune/workflows/document_gen/__init__.py +25 -0
- attune/workflows/document_gen/config.py +30 -0
- attune/workflows/document_gen/report_formatter.py +162 -0
- attune/workflows/document_gen/workflow.py +1426 -0
- attune/workflows/document_manager.py +216 -0
- attune/workflows/document_manager_README.md +134 -0
- attune/workflows/documentation_orchestrator.py +1205 -0
- attune/workflows/history.py +510 -0
- attune/workflows/keyboard_shortcuts/__init__.py +39 -0
- attune/workflows/keyboard_shortcuts/generators.py +391 -0
- attune/workflows/keyboard_shortcuts/parsers.py +416 -0
- attune/workflows/keyboard_shortcuts/prompts.py +295 -0
- attune/workflows/keyboard_shortcuts/schema.py +193 -0
- attune/workflows/keyboard_shortcuts/workflow.py +509 -0
- attune/workflows/llm_base.py +363 -0
- attune/workflows/manage_docs.py +87 -0
- attune/workflows/manage_docs_README.md +134 -0
- attune/workflows/manage_documentation.py +821 -0
- attune/workflows/new_sample_workflow1.py +149 -0
- attune/workflows/new_sample_workflow1_README.md +150 -0
- attune/workflows/orchestrated_health_check.py +849 -0
- attune/workflows/orchestrated_release_prep.py +600 -0
- attune/workflows/output.py +413 -0
- attune/workflows/perf_audit.py +863 -0
- attune/workflows/pr_review.py +762 -0
- attune/workflows/progress.py +785 -0
- attune/workflows/progress_server.py +322 -0
- attune/workflows/progressive/README 2.md +454 -0
- attune/workflows/progressive/README.md +454 -0
- attune/workflows/progressive/__init__.py +82 -0
- attune/workflows/progressive/cli.py +219 -0
- attune/workflows/progressive/core.py +488 -0
- attune/workflows/progressive/orchestrator.py +723 -0
- attune/workflows/progressive/reports.py +520 -0
- attune/workflows/progressive/telemetry.py +274 -0
- attune/workflows/progressive/test_gen.py +495 -0
- attune/workflows/progressive/workflow.py +589 -0
- attune/workflows/refactor_plan.py +694 -0
- attune/workflows/release_prep.py +895 -0
- attune/workflows/release_prep_crew.py +969 -0
- attune/workflows/research_synthesis.py +404 -0
- attune/workflows/routing.py +168 -0
- attune/workflows/secure_release.py +593 -0
- attune/workflows/security_adapters.py +297 -0
- attune/workflows/security_audit.py +1329 -0
- attune/workflows/security_audit_phase3.py +355 -0
- attune/workflows/seo_optimization.py +633 -0
- attune/workflows/step_config.py +234 -0
- attune/workflows/telemetry_mixin.py +269 -0
- attune/workflows/test5.py +125 -0
- attune/workflows/test5_README.md +158 -0
- attune/workflows/test_coverage_boost_crew.py +849 -0
- attune/workflows/test_gen/__init__.py +52 -0
- attune/workflows/test_gen/ast_analyzer.py +249 -0
- attune/workflows/test_gen/config.py +88 -0
- attune/workflows/test_gen/data_models.py +38 -0
- attune/workflows/test_gen/report_formatter.py +289 -0
- attune/workflows/test_gen/test_templates.py +381 -0
- attune/workflows/test_gen/workflow.py +655 -0
- attune/workflows/test_gen.py +54 -0
- attune/workflows/test_gen_behavioral.py +477 -0
- attune/workflows/test_gen_parallel.py +341 -0
- attune/workflows/test_lifecycle.py +526 -0
- attune/workflows/test_maintenance.py +627 -0
- attune/workflows/test_maintenance_cli.py +590 -0
- attune/workflows/test_maintenance_crew.py +840 -0
- attune/workflows/test_runner.py +622 -0
- attune/workflows/tier_tracking.py +531 -0
- attune/workflows/xml_enhanced_crew.py +285 -0
- attune_ai-2.0.0.dist-info/METADATA +1026 -0
- attune_ai-2.0.0.dist-info/RECORD +457 -0
- attune_ai-2.0.0.dist-info/WHEEL +5 -0
- attune_ai-2.0.0.dist-info/entry_points.txt +26 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE +201 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- attune_ai-2.0.0.dist-info/top_level.txt +5 -0
- attune_healthcare/__init__.py +13 -0
- attune_healthcare/monitors/__init__.py +9 -0
- attune_healthcare/monitors/clinical_protocol_monitor.py +315 -0
- attune_healthcare/monitors/monitoring/__init__.py +44 -0
- attune_healthcare/monitors/monitoring/protocol_checker.py +300 -0
- attune_healthcare/monitors/monitoring/protocol_loader.py +214 -0
- attune_healthcare/monitors/monitoring/sensor_parsers.py +306 -0
- attune_healthcare/monitors/monitoring/trajectory_analyzer.py +389 -0
- attune_llm/README.md +553 -0
- attune_llm/__init__.py +28 -0
- attune_llm/agent_factory/__init__.py +53 -0
- attune_llm/agent_factory/adapters/__init__.py +85 -0
- attune_llm/agent_factory/adapters/autogen_adapter.py +312 -0
- attune_llm/agent_factory/adapters/crewai_adapter.py +483 -0
- attune_llm/agent_factory/adapters/haystack_adapter.py +298 -0
- attune_llm/agent_factory/adapters/langchain_adapter.py +362 -0
- attune_llm/agent_factory/adapters/langgraph_adapter.py +333 -0
- attune_llm/agent_factory/adapters/native.py +228 -0
- attune_llm/agent_factory/adapters/wizard_adapter.py +423 -0
- attune_llm/agent_factory/base.py +305 -0
- attune_llm/agent_factory/crews/__init__.py +67 -0
- attune_llm/agent_factory/crews/code_review.py +1113 -0
- attune_llm/agent_factory/crews/health_check.py +1262 -0
- attune_llm/agent_factory/crews/refactoring.py +1128 -0
- attune_llm/agent_factory/crews/security_audit.py +1018 -0
- attune_llm/agent_factory/decorators.py +287 -0
- attune_llm/agent_factory/factory.py +558 -0
- attune_llm/agent_factory/framework.py +193 -0
- attune_llm/agent_factory/memory_integration.py +328 -0
- attune_llm/agent_factory/resilient.py +320 -0
- attune_llm/agents_md/__init__.py +22 -0
- attune_llm/agents_md/loader.py +218 -0
- attune_llm/agents_md/parser.py +271 -0
- attune_llm/agents_md/registry.py +307 -0
- attune_llm/claude_memory.py +466 -0
- attune_llm/cli/__init__.py +8 -0
- attune_llm/cli/sync_claude.py +487 -0
- attune_llm/code_health.py +1313 -0
- attune_llm/commands/__init__.py +51 -0
- attune_llm/commands/context.py +375 -0
- attune_llm/commands/loader.py +301 -0
- attune_llm/commands/models.py +231 -0
- attune_llm/commands/parser.py +371 -0
- attune_llm/commands/registry.py +429 -0
- attune_llm/config/__init__.py +29 -0
- attune_llm/config/unified.py +291 -0
- attune_llm/context/__init__.py +22 -0
- attune_llm/context/compaction.py +455 -0
- attune_llm/context/manager.py +434 -0
- attune_llm/contextual_patterns.py +361 -0
- attune_llm/core.py +907 -0
- attune_llm/git_pattern_extractor.py +435 -0
- attune_llm/hooks/__init__.py +24 -0
- attune_llm/hooks/config.py +306 -0
- attune_llm/hooks/executor.py +289 -0
- attune_llm/hooks/registry.py +302 -0
- attune_llm/hooks/scripts/__init__.py +39 -0
- attune_llm/hooks/scripts/evaluate_session.py +201 -0
- attune_llm/hooks/scripts/first_time_init.py +285 -0
- attune_llm/hooks/scripts/pre_compact.py +207 -0
- attune_llm/hooks/scripts/session_end.py +183 -0
- attune_llm/hooks/scripts/session_start.py +163 -0
- attune_llm/hooks/scripts/suggest_compact.py +225 -0
- attune_llm/learning/__init__.py +30 -0
- attune_llm/learning/evaluator.py +438 -0
- attune_llm/learning/extractor.py +514 -0
- attune_llm/learning/storage.py +560 -0
- attune_llm/levels.py +227 -0
- attune_llm/pattern_confidence.py +414 -0
- attune_llm/pattern_resolver.py +272 -0
- attune_llm/pattern_summary.py +350 -0
- attune_llm/providers.py +967 -0
- attune_llm/routing/__init__.py +32 -0
- attune_llm/routing/model_router.py +362 -0
- attune_llm/security/IMPLEMENTATION_SUMMARY.md +413 -0
- attune_llm/security/PHASE2_COMPLETE.md +384 -0
- attune_llm/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- attune_llm/security/QUICK_REFERENCE.md +316 -0
- attune_llm/security/README.md +262 -0
- attune_llm/security/__init__.py +62 -0
- attune_llm/security/audit_logger.py +929 -0
- attune_llm/security/audit_logger_example.py +152 -0
- attune_llm/security/pii_scrubber.py +640 -0
- attune_llm/security/secrets_detector.py +678 -0
- attune_llm/security/secrets_detector_example.py +304 -0
- attune_llm/security/secure_memdocs.py +1192 -0
- attune_llm/security/secure_memdocs_example.py +278 -0
- attune_llm/session_status.py +745 -0
- attune_llm/state.py +246 -0
- attune_llm/utils/__init__.py +5 -0
- attune_llm/utils/tokens.py +349 -0
- attune_software/SOFTWARE_PLUGIN_README.md +57 -0
- attune_software/__init__.py +13 -0
- attune_software/cli/__init__.py +120 -0
- attune_software/cli/inspect.py +362 -0
- attune_software/cli.py +574 -0
- attune_software/plugin.py +188 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
|
@@ -0,0 +1,1128 @@
|
|
|
1
|
+
"""Refactoring Crew
|
|
2
|
+
|
|
3
|
+
A 2-agent crew that performs interactive code refactoring analysis.
|
|
4
|
+
Designed for Level 4 Empathy with session memory, rollback capability,
|
|
5
|
+
and learning from user preferences over time.
|
|
6
|
+
|
|
7
|
+
Agents:
|
|
8
|
+
- RefactorAnalyzer: Identifies refactoring opportunities, prioritizes by impact
|
|
9
|
+
- RefactorWriter: Generates concrete code changes with before/after
|
|
10
|
+
|
|
11
|
+
Usage:
|
|
12
|
+
from attune_llm.agent_factory.crews import RefactoringCrew
|
|
13
|
+
|
|
14
|
+
crew = RefactoringCrew(api_key="...")
|
|
15
|
+
report = await crew.analyze(code="...", file_path="src/api.py")
|
|
16
|
+
|
|
17
|
+
for finding in report.findings:
|
|
18
|
+
print(f" - {finding.title} ({finding.category.value})")
|
|
19
|
+
|
|
20
|
+
# Generate the refactored code
|
|
21
|
+
finding = await crew.generate_refactor(finding, code)
|
|
22
|
+
print(f" Before: {finding.before_code[:50]}...")
|
|
23
|
+
print(f" After: {finding.after_code[:50]}...")
|
|
24
|
+
|
|
25
|
+
Copyright 2025 Smart-AI-Memory
|
|
26
|
+
Licensed under Fair Source License 0.9
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
import json
|
|
30
|
+
import logging
|
|
31
|
+
import uuid
|
|
32
|
+
from dataclasses import dataclass, field
|
|
33
|
+
from datetime import datetime
|
|
34
|
+
from enum import Enum
|
|
35
|
+
from pathlib import Path
|
|
36
|
+
from typing import Any
|
|
37
|
+
|
|
38
|
+
logger = logging.getLogger(__name__)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
# =============================================================================
|
|
42
|
+
# Enums
|
|
43
|
+
# =============================================================================
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class RefactoringCategory(Enum):
|
|
47
|
+
"""Categories of refactoring opportunities."""
|
|
48
|
+
|
|
49
|
+
EXTRACT_METHOD = "extract_method"
|
|
50
|
+
EXTRACT_VARIABLE = "extract_variable"
|
|
51
|
+
RENAME = "rename"
|
|
52
|
+
SIMPLIFY = "simplify"
|
|
53
|
+
REMOVE_DUPLICATION = "remove_duplication"
|
|
54
|
+
RESTRUCTURE = "restructure"
|
|
55
|
+
DEAD_CODE = "dead_code"
|
|
56
|
+
TYPE_SAFETY = "type_safety"
|
|
57
|
+
INLINE = "inline"
|
|
58
|
+
CONSOLIDATE_CONDITIONAL = "consolidate_conditional"
|
|
59
|
+
OTHER = "other"
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class Severity(Enum):
|
|
63
|
+
"""Severity levels for refactoring findings."""
|
|
64
|
+
|
|
65
|
+
CRITICAL = "critical"
|
|
66
|
+
HIGH = "high"
|
|
67
|
+
MEDIUM = "medium"
|
|
68
|
+
LOW = "low"
|
|
69
|
+
INFO = "info"
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class Impact(Enum):
|
|
73
|
+
"""Estimated impact of applying a refactoring."""
|
|
74
|
+
|
|
75
|
+
HIGH = "high"
|
|
76
|
+
MEDIUM = "medium"
|
|
77
|
+
LOW = "low"
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
# =============================================================================
|
|
81
|
+
# Data Classes
|
|
82
|
+
# =============================================================================
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
@dataclass
|
|
86
|
+
class RefactoringFinding:
|
|
87
|
+
"""A single refactoring opportunity identified by the analyzer."""
|
|
88
|
+
|
|
89
|
+
id: str
|
|
90
|
+
title: str
|
|
91
|
+
description: str
|
|
92
|
+
category: RefactoringCategory
|
|
93
|
+
severity: Severity
|
|
94
|
+
file_path: str
|
|
95
|
+
start_line: int
|
|
96
|
+
end_line: int
|
|
97
|
+
before_code: str = ""
|
|
98
|
+
after_code: str | None = None
|
|
99
|
+
confidence: float = 1.0
|
|
100
|
+
estimated_impact: Impact = Impact.MEDIUM
|
|
101
|
+
rationale: str = ""
|
|
102
|
+
metadata: dict = field(default_factory=dict)
|
|
103
|
+
|
|
104
|
+
def to_dict(self) -> dict:
|
|
105
|
+
"""Convert finding to dictionary for serialization."""
|
|
106
|
+
return {
|
|
107
|
+
"id": self.id,
|
|
108
|
+
"title": self.title,
|
|
109
|
+
"description": self.description,
|
|
110
|
+
"category": self.category.value,
|
|
111
|
+
"severity": self.severity.value,
|
|
112
|
+
"file_path": self.file_path,
|
|
113
|
+
"start_line": self.start_line,
|
|
114
|
+
"end_line": self.end_line,
|
|
115
|
+
"before_code": self.before_code,
|
|
116
|
+
"after_code": self.after_code,
|
|
117
|
+
"confidence": self.confidence,
|
|
118
|
+
"estimated_impact": self.estimated_impact.value,
|
|
119
|
+
"rationale": self.rationale,
|
|
120
|
+
"metadata": self.metadata,
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
@classmethod
|
|
124
|
+
def from_dict(cls, data: dict) -> "RefactoringFinding":
|
|
125
|
+
"""Create finding from dictionary."""
|
|
126
|
+
return cls(
|
|
127
|
+
id=data.get("id", str(uuid.uuid4())),
|
|
128
|
+
title=data.get("title", "Untitled"),
|
|
129
|
+
description=data.get("description", ""),
|
|
130
|
+
category=RefactoringCategory(data.get("category", "other")),
|
|
131
|
+
severity=Severity(data.get("severity", "medium")),
|
|
132
|
+
file_path=data.get("file_path", ""),
|
|
133
|
+
start_line=data.get("start_line", 0),
|
|
134
|
+
end_line=data.get("end_line", 0),
|
|
135
|
+
before_code=data.get("before_code", ""),
|
|
136
|
+
after_code=data.get("after_code"),
|
|
137
|
+
confidence=data.get("confidence", 1.0),
|
|
138
|
+
estimated_impact=Impact(data.get("estimated_impact", "medium")),
|
|
139
|
+
rationale=data.get("rationale", ""),
|
|
140
|
+
metadata=data.get("metadata", {}),
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
@dataclass
|
|
145
|
+
class CodeCheckpoint:
|
|
146
|
+
"""Checkpoint for rollback capability."""
|
|
147
|
+
|
|
148
|
+
id: str
|
|
149
|
+
file_path: str
|
|
150
|
+
original_content: str
|
|
151
|
+
timestamp: str
|
|
152
|
+
finding_id: str
|
|
153
|
+
|
|
154
|
+
def to_dict(self) -> dict:
|
|
155
|
+
"""Convert checkpoint to dictionary."""
|
|
156
|
+
return {
|
|
157
|
+
"id": self.id,
|
|
158
|
+
"file_path": self.file_path,
|
|
159
|
+
"original_content": self.original_content,
|
|
160
|
+
"timestamp": self.timestamp,
|
|
161
|
+
"finding_id": self.finding_id,
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
@classmethod
|
|
165
|
+
def from_dict(cls, data: dict) -> "CodeCheckpoint":
|
|
166
|
+
"""Create checkpoint from dictionary."""
|
|
167
|
+
return cls(
|
|
168
|
+
id=data["id"],
|
|
169
|
+
file_path=data["file_path"],
|
|
170
|
+
original_content=data["original_content"],
|
|
171
|
+
timestamp=data["timestamp"],
|
|
172
|
+
finding_id=data["finding_id"],
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
@dataclass
|
|
177
|
+
class RefactoringReport:
|
|
178
|
+
"""Complete refactoring analysis report."""
|
|
179
|
+
|
|
180
|
+
target: str
|
|
181
|
+
findings: list[RefactoringFinding]
|
|
182
|
+
summary: str = ""
|
|
183
|
+
duration_seconds: float = 0.0
|
|
184
|
+
agents_used: list[str] = field(default_factory=list)
|
|
185
|
+
checkpoints: list[CodeCheckpoint] = field(default_factory=list)
|
|
186
|
+
memory_graph_hits: int = 0
|
|
187
|
+
metadata: dict = field(default_factory=dict)
|
|
188
|
+
|
|
189
|
+
@property
|
|
190
|
+
def high_impact_findings(self) -> list[RefactoringFinding]:
|
|
191
|
+
"""Get high impact findings."""
|
|
192
|
+
return [f for f in self.findings if f.estimated_impact == Impact.HIGH]
|
|
193
|
+
|
|
194
|
+
@property
|
|
195
|
+
def findings_by_category(self) -> dict[str, list[RefactoringFinding]]:
|
|
196
|
+
"""Group findings by category."""
|
|
197
|
+
result: dict[str, list[RefactoringFinding]] = {}
|
|
198
|
+
for finding in self.findings:
|
|
199
|
+
cat = finding.category.value
|
|
200
|
+
if cat not in result:
|
|
201
|
+
result[cat] = []
|
|
202
|
+
result[cat].append(finding)
|
|
203
|
+
return result
|
|
204
|
+
|
|
205
|
+
@property
|
|
206
|
+
def total_lines_affected(self) -> int:
|
|
207
|
+
"""Calculate total lines that would be affected."""
|
|
208
|
+
return sum(f.end_line - f.start_line + 1 for f in self.findings)
|
|
209
|
+
|
|
210
|
+
def to_dict(self) -> dict:
|
|
211
|
+
"""Convert report to dictionary."""
|
|
212
|
+
return {
|
|
213
|
+
"target": self.target,
|
|
214
|
+
"findings": [f.to_dict() for f in self.findings],
|
|
215
|
+
"summary": self.summary,
|
|
216
|
+
"duration_seconds": self.duration_seconds,
|
|
217
|
+
"agents_used": self.agents_used,
|
|
218
|
+
"checkpoints": [c.to_dict() for c in self.checkpoints],
|
|
219
|
+
"memory_graph_hits": self.memory_graph_hits,
|
|
220
|
+
"high_impact_count": len(self.high_impact_findings),
|
|
221
|
+
"total_lines_affected": self.total_lines_affected,
|
|
222
|
+
"metadata": self.metadata,
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
@dataclass
|
|
227
|
+
class UserProfile:
|
|
228
|
+
"""User preferences learned over time."""
|
|
229
|
+
|
|
230
|
+
user_id: str = "default"
|
|
231
|
+
updated_at: str = ""
|
|
232
|
+
accepted_categories: dict[str, int] = field(default_factory=dict)
|
|
233
|
+
rejected_categories: dict[str, int] = field(default_factory=dict)
|
|
234
|
+
preferred_complexity: str = "medium"
|
|
235
|
+
history: list[dict] = field(default_factory=list)
|
|
236
|
+
|
|
237
|
+
def get_category_score(self, category: RefactoringCategory) -> float:
|
|
238
|
+
"""Get score for a category based on user history (higher = more preferred)."""
|
|
239
|
+
cat = category.value
|
|
240
|
+
accepted = self.accepted_categories.get(cat, 0)
|
|
241
|
+
rejected = self.rejected_categories.get(cat, 0)
|
|
242
|
+
total = accepted + rejected
|
|
243
|
+
if total == 0:
|
|
244
|
+
return 0.5 # Neutral
|
|
245
|
+
return accepted / total
|
|
246
|
+
|
|
247
|
+
def to_dict(self) -> dict:
|
|
248
|
+
"""Convert to dictionary."""
|
|
249
|
+
return {
|
|
250
|
+
"user_id": self.user_id,
|
|
251
|
+
"updated_at": self.updated_at,
|
|
252
|
+
"preferences": {
|
|
253
|
+
"accepted_categories": self.accepted_categories,
|
|
254
|
+
"rejected_categories": self.rejected_categories,
|
|
255
|
+
"preferred_complexity": self.preferred_complexity,
|
|
256
|
+
},
|
|
257
|
+
"history": self.history,
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
@classmethod
|
|
261
|
+
def from_dict(cls, data: dict) -> "UserProfile":
|
|
262
|
+
"""Create from dictionary."""
|
|
263
|
+
prefs = data.get("preferences", {})
|
|
264
|
+
return cls(
|
|
265
|
+
user_id=data.get("user_id", "default"),
|
|
266
|
+
updated_at=data.get("updated_at", ""),
|
|
267
|
+
accepted_categories=prefs.get("accepted_categories", {}),
|
|
268
|
+
rejected_categories=prefs.get("rejected_categories", {}),
|
|
269
|
+
preferred_complexity=prefs.get("preferred_complexity", "medium"),
|
|
270
|
+
history=data.get("history", []),
|
|
271
|
+
)
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
@dataclass
|
|
275
|
+
class RefactoringConfig:
|
|
276
|
+
"""Configuration for the refactoring crew."""
|
|
277
|
+
|
|
278
|
+
# API Configuration
|
|
279
|
+
provider: str = "anthropic"
|
|
280
|
+
api_key: str | None = None
|
|
281
|
+
|
|
282
|
+
# Analysis Configuration
|
|
283
|
+
depth: str = "standard" # "quick", "standard", "thorough"
|
|
284
|
+
focus_areas: list[str] = field(
|
|
285
|
+
default_factory=lambda: [
|
|
286
|
+
"extract_method",
|
|
287
|
+
"simplify",
|
|
288
|
+
"remove_duplication",
|
|
289
|
+
"rename",
|
|
290
|
+
"dead_code",
|
|
291
|
+
],
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
# Memory Graph
|
|
295
|
+
memory_graph_enabled: bool = True
|
|
296
|
+
memory_graph_path: str = "patterns/refactoring_memory.json"
|
|
297
|
+
|
|
298
|
+
# User Profile
|
|
299
|
+
user_profile_enabled: bool = True
|
|
300
|
+
user_profile_path: str = ".attune/refactor_profile.json"
|
|
301
|
+
|
|
302
|
+
# Agent Tiers (cost optimization)
|
|
303
|
+
analyzer_tier: str = "capable" # GPT-4o / Claude Sonnet
|
|
304
|
+
writer_tier: str = "capable"
|
|
305
|
+
|
|
306
|
+
# Resilience
|
|
307
|
+
resilience_enabled: bool = True
|
|
308
|
+
timeout_seconds: float = 300.0
|
|
309
|
+
|
|
310
|
+
# XML Prompts
|
|
311
|
+
xml_prompts_enabled: bool = True
|
|
312
|
+
xml_schema_version: str = "1.0"
|
|
313
|
+
|
|
314
|
+
|
|
315
|
+
# =============================================================================
|
|
316
|
+
# XML Prompt Templates
|
|
317
|
+
# =============================================================================
|
|
318
|
+
|
|
319
|
+
XML_PROMPT_TEMPLATES = {
|
|
320
|
+
"refactor_analyzer": """<agent role="refactor_analyzer" version="{schema_version}">
|
|
321
|
+
<identity>
|
|
322
|
+
<role>Refactoring Analyst</role>
|
|
323
|
+
<expertise>Code analysis, refactoring patterns, code smells detection</expertise>
|
|
324
|
+
</identity>
|
|
325
|
+
|
|
326
|
+
<goal>
|
|
327
|
+
Analyze code to identify refactoring opportunities that improve maintainability,
|
|
328
|
+
readability, and performance. Prioritize by impact and confidence.
|
|
329
|
+
</goal>
|
|
330
|
+
|
|
331
|
+
<instructions>
|
|
332
|
+
<step>Analyze the code structure, complexity, and patterns</step>
|
|
333
|
+
<step>Identify code smells: long methods, duplication, poor naming, dead code</step>
|
|
334
|
+
<step>Detect opportunities for extraction, simplification, or restructuring</step>
|
|
335
|
+
<step>Assess the impact and risk of each potential refactoring</step>
|
|
336
|
+
<step>Prioritize findings by impact (high > medium > low) and confidence</step>
|
|
337
|
+
<step>Provide clear rationale for each recommendation</step>
|
|
338
|
+
</instructions>
|
|
339
|
+
|
|
340
|
+
<constraints>
|
|
341
|
+
<rule>Focus on actionable refactorings, not style preferences</rule>
|
|
342
|
+
<rule>Consider the broader codebase context when suggesting changes</rule>
|
|
343
|
+
<rule>Prioritize safety - prefer low-risk refactorings over high-risk ones</rule>
|
|
344
|
+
<rule>Include exact line numbers for each finding</rule>
|
|
345
|
+
<rule>Provide the before_code snippet for context</rule>
|
|
346
|
+
</constraints>
|
|
347
|
+
|
|
348
|
+
<refactoring_patterns>
|
|
349
|
+
<pattern name="extract_method">Long or complex code blocks that can be extracted</pattern>
|
|
350
|
+
<pattern name="extract_variable">Complex expressions that deserve a named variable</pattern>
|
|
351
|
+
<pattern name="rename">Unclear or misleading names for variables, functions, or classes</pattern>
|
|
352
|
+
<pattern name="simplify">Overly complex conditionals or logic that can be simplified</pattern>
|
|
353
|
+
<pattern name="remove_duplication">Repeated code blocks that should be consolidated</pattern>
|
|
354
|
+
<pattern name="dead_code">Unused variables, functions, or imports</pattern>
|
|
355
|
+
<pattern name="inline">Over-abstracted code that should be inlined</pattern>
|
|
356
|
+
<pattern name="consolidate_conditional">Multiple conditionals that can be merged</pattern>
|
|
357
|
+
</refactoring_patterns>
|
|
358
|
+
|
|
359
|
+
<output_format>
|
|
360
|
+
Return a JSON array of findings, each with:
|
|
361
|
+
- id: unique identifier
|
|
362
|
+
- title: brief description
|
|
363
|
+
- description: detailed explanation
|
|
364
|
+
- category: one of the refactoring patterns
|
|
365
|
+
- severity: critical/high/medium/low/info
|
|
366
|
+
- file_path: path to the file
|
|
367
|
+
- start_line: starting line number
|
|
368
|
+
- end_line: ending line number
|
|
369
|
+
- before_code: the current code snippet
|
|
370
|
+
- confidence: 0.0 to 1.0
|
|
371
|
+
- estimated_impact: high/medium/low
|
|
372
|
+
- rationale: why this refactoring is recommended
|
|
373
|
+
</output_format>
|
|
374
|
+
</agent>""",
|
|
375
|
+
"refactor_writer": """<agent role="refactor_writer" version="{schema_version}">
|
|
376
|
+
<identity>
|
|
377
|
+
<role>Refactoring Engineer</role>
|
|
378
|
+
<expertise>Code transformation, refactoring implementation, clean code</expertise>
|
|
379
|
+
</identity>
|
|
380
|
+
|
|
381
|
+
<goal>
|
|
382
|
+
Generate the refactored code for a specific finding. Produce clean, correct,
|
|
383
|
+
and idiomatic code that addresses the identified issue.
|
|
384
|
+
</goal>
|
|
385
|
+
|
|
386
|
+
<instructions>
|
|
387
|
+
<step>Understand the original code and the refactoring goal</step>
|
|
388
|
+
<step>Apply the appropriate refactoring pattern</step>
|
|
389
|
+
<step>Ensure the refactored code is syntactically correct</step>
|
|
390
|
+
<step>Maintain the original functionality - no behavior changes</step>
|
|
391
|
+
<step>Follow the project's coding style and conventions</step>
|
|
392
|
+
<step>Return the complete refactored code snippet</step>
|
|
393
|
+
</instructions>
|
|
394
|
+
|
|
395
|
+
<constraints>
|
|
396
|
+
<rule>The refactored code MUST be syntactically valid</rule>
|
|
397
|
+
<rule>Preserve all functionality - this is refactoring, not feature changes</rule>
|
|
398
|
+
<rule>Match the indentation and style of surrounding code</rule>
|
|
399
|
+
<rule>Include any necessary imports or helper functions</rule>
|
|
400
|
+
<rule>Keep the refactoring minimal - only change what's needed</rule>
|
|
401
|
+
</constraints>
|
|
402
|
+
|
|
403
|
+
<output_format>
|
|
404
|
+
Return a JSON object with:
|
|
405
|
+
- after_code: the complete refactored code snippet
|
|
406
|
+
- explanation: brief explanation of changes made
|
|
407
|
+
- imports_needed: list of any new imports required (if any)
|
|
408
|
+
</output_format>
|
|
409
|
+
</agent>""",
|
|
410
|
+
}
|
|
411
|
+
|
|
412
|
+
|
|
413
|
+
# =============================================================================
|
|
414
|
+
# RefactoringCrew Class
|
|
415
|
+
# =============================================================================
|
|
416
|
+
|
|
417
|
+
|
|
418
|
+
class RefactoringCrew:
|
|
419
|
+
"""2-agent crew for interactive code refactoring.
|
|
420
|
+
|
|
421
|
+
The crew consists of:
|
|
422
|
+
|
|
423
|
+
1. **RefactorAnalyzer** (Analysis Agent)
|
|
424
|
+
- Analyzes code for refactoring opportunities
|
|
425
|
+
- Identifies code smells and improvement areas
|
|
426
|
+
- Prioritizes by impact and confidence
|
|
427
|
+
- Model: Capable tier (cost-effective)
|
|
428
|
+
|
|
429
|
+
2. **RefactorWriter** (Generation Agent)
|
|
430
|
+
- Generates concrete refactored code
|
|
431
|
+
- Produces before/after diffs
|
|
432
|
+
- Ensures syntactic correctness
|
|
433
|
+
- Model: Capable tier
|
|
434
|
+
|
|
435
|
+
Features:
|
|
436
|
+
- Checkpoint/rollback for safe refactoring
|
|
437
|
+
- User profile learning for personalized recommendations
|
|
438
|
+
- Memory Graph integration for cross-session learning
|
|
439
|
+
|
|
440
|
+
Example:
|
|
441
|
+
crew = RefactoringCrew(api_key="...")
|
|
442
|
+
report = await crew.analyze(code="...", file_path="api.py")
|
|
443
|
+
|
|
444
|
+
for finding in report.findings:
|
|
445
|
+
# Generate refactored code on demand
|
|
446
|
+
finding = await crew.generate_refactor(finding, full_code)
|
|
447
|
+
print(f"After: {finding.after_code}")
|
|
448
|
+
|
|
449
|
+
"""
|
|
450
|
+
|
|
451
|
+
def __init__(self, config: RefactoringConfig | None = None, **kwargs: Any):
|
|
452
|
+
"""Initialize the Refactoring Crew.
|
|
453
|
+
|
|
454
|
+
Args:
|
|
455
|
+
config: RefactoringConfig or pass individual params as kwargs
|
|
456
|
+
**kwargs: Individual config parameters (api_key, provider, etc.)
|
|
457
|
+
|
|
458
|
+
"""
|
|
459
|
+
if config:
|
|
460
|
+
self.config = config
|
|
461
|
+
else:
|
|
462
|
+
self.config = RefactoringConfig(**kwargs)
|
|
463
|
+
|
|
464
|
+
self._factory: Any = None
|
|
465
|
+
self._agents: dict[str, Any] = {}
|
|
466
|
+
self._workflow: Any = None
|
|
467
|
+
self._graph: Any = None
|
|
468
|
+
self._user_profile: UserProfile | None = None
|
|
469
|
+
self._initialized = False
|
|
470
|
+
|
|
471
|
+
def _render_xml_prompt(self, template_key: str) -> str:
|
|
472
|
+
"""Render XML prompt template with config values."""
|
|
473
|
+
template = XML_PROMPT_TEMPLATES.get(template_key, "")
|
|
474
|
+
return template.format(schema_version=self.config.xml_schema_version)
|
|
475
|
+
|
|
476
|
+
def _get_system_prompt(self, agent_key: str, fallback: str) -> str:
|
|
477
|
+
"""Get system prompt - XML if enabled, fallback otherwise."""
|
|
478
|
+
if self.config.xml_prompts_enabled:
|
|
479
|
+
return self._render_xml_prompt(agent_key)
|
|
480
|
+
return fallback
|
|
481
|
+
|
|
482
|
+
async def _initialize(self) -> None:
|
|
483
|
+
"""Lazy initialization of agents and workflow."""
|
|
484
|
+
if self._initialized:
|
|
485
|
+
return
|
|
486
|
+
|
|
487
|
+
from attune_llm.agent_factory import AgentFactory, Framework
|
|
488
|
+
|
|
489
|
+
# Check if CrewAI is available
|
|
490
|
+
try:
|
|
491
|
+
from attune_llm.agent_factory.adapters.crewai_adapter import _check_crewai
|
|
492
|
+
|
|
493
|
+
use_crewai = _check_crewai()
|
|
494
|
+
except ImportError:
|
|
495
|
+
use_crewai = False
|
|
496
|
+
|
|
497
|
+
# Use CrewAI if available, otherwise fall back to Native
|
|
498
|
+
framework = Framework.CREWAI if use_crewai else Framework.NATIVE
|
|
499
|
+
|
|
500
|
+
self._factory = AgentFactory(
|
|
501
|
+
framework=framework,
|
|
502
|
+
provider=self.config.provider,
|
|
503
|
+
api_key=self.config.api_key,
|
|
504
|
+
)
|
|
505
|
+
|
|
506
|
+
# Initialize Memory Graph if enabled
|
|
507
|
+
if self.config.memory_graph_enabled:
|
|
508
|
+
try:
|
|
509
|
+
from attune.memory import MemoryGraph
|
|
510
|
+
|
|
511
|
+
self._graph = MemoryGraph(path=self.config.memory_graph_path)
|
|
512
|
+
except ImportError:
|
|
513
|
+
logger.warning("Memory Graph not available, continuing without it")
|
|
514
|
+
|
|
515
|
+
# Load user profile if enabled
|
|
516
|
+
if self.config.user_profile_enabled:
|
|
517
|
+
self._user_profile = self._load_user_profile()
|
|
518
|
+
|
|
519
|
+
# Create the 2 specialized agents
|
|
520
|
+
await self._create_agents()
|
|
521
|
+
|
|
522
|
+
self._initialized = True
|
|
523
|
+
|
|
524
|
+
async def _create_agents(self) -> None:
|
|
525
|
+
"""Create the 2 specialized refactoring agents."""
|
|
526
|
+
# Fallback prompts
|
|
527
|
+
analyzer_fallback = """You are a Refactoring Analyst.
|
|
528
|
+
|
|
529
|
+
Your job is to analyze code and identify refactoring opportunities:
|
|
530
|
+
|
|
531
|
+
1. Look for code smells:
|
|
532
|
+
- Long methods (>20 lines)
|
|
533
|
+
- Duplicate code blocks
|
|
534
|
+
- Poor naming
|
|
535
|
+
- Dead code (unused variables/functions)
|
|
536
|
+
- Complex conditionals
|
|
537
|
+
|
|
538
|
+
2. For each finding, provide:
|
|
539
|
+
- Title and description
|
|
540
|
+
- Category (extract_method, rename, simplify, etc.)
|
|
541
|
+
- Severity (critical/high/medium/low)
|
|
542
|
+
- Line numbers (start_line, end_line)
|
|
543
|
+
- The before_code snippet
|
|
544
|
+
- Confidence score (0-1)
|
|
545
|
+
- Estimated impact (high/medium/low)
|
|
546
|
+
- Rationale for the recommendation
|
|
547
|
+
|
|
548
|
+
Focus on actionable, safe refactorings. Prioritize by impact."""
|
|
549
|
+
|
|
550
|
+
writer_fallback = """You are a Refactoring Engineer.
|
|
551
|
+
|
|
552
|
+
Your job is to generate the refactored code for a specific finding.
|
|
553
|
+
|
|
554
|
+
Requirements:
|
|
555
|
+
1. The code MUST be syntactically valid
|
|
556
|
+
2. Preserve all functionality - no behavior changes
|
|
557
|
+
3. Match the project's coding style
|
|
558
|
+
4. Keep changes minimal - only change what's needed
|
|
559
|
+
|
|
560
|
+
Return the refactored code as after_code."""
|
|
561
|
+
|
|
562
|
+
# 1. RefactorAnalyzer
|
|
563
|
+
self._agents["analyzer"] = self._factory.create_agent(
|
|
564
|
+
name="refactor_analyzer",
|
|
565
|
+
role="analyst",
|
|
566
|
+
description="Analyzes code for refactoring opportunities",
|
|
567
|
+
system_prompt=self._get_system_prompt("refactor_analyzer", analyzer_fallback),
|
|
568
|
+
model_tier=self.config.analyzer_tier,
|
|
569
|
+
memory_graph_enabled=self.config.memory_graph_enabled,
|
|
570
|
+
memory_graph_path=self.config.memory_graph_path,
|
|
571
|
+
resilience_enabled=self.config.resilience_enabled,
|
|
572
|
+
)
|
|
573
|
+
|
|
574
|
+
# 2. RefactorWriter
|
|
575
|
+
self._agents["writer"] = self._factory.create_agent(
|
|
576
|
+
name="refactor_writer",
|
|
577
|
+
role="engineer",
|
|
578
|
+
description="Generates refactored code for specific findings",
|
|
579
|
+
system_prompt=self._get_system_prompt("refactor_writer", writer_fallback),
|
|
580
|
+
model_tier=self.config.writer_tier,
|
|
581
|
+
memory_graph_enabled=self.config.memory_graph_enabled,
|
|
582
|
+
memory_graph_path=self.config.memory_graph_path,
|
|
583
|
+
resilience_enabled=self.config.resilience_enabled,
|
|
584
|
+
)
|
|
585
|
+
|
|
586
|
+
# =========================================================================
|
|
587
|
+
# Public Methods
|
|
588
|
+
# =========================================================================
|
|
589
|
+
|
|
590
|
+
async def analyze(
|
|
591
|
+
self,
|
|
592
|
+
code: str,
|
|
593
|
+
file_path: str,
|
|
594
|
+
context: dict | None = None,
|
|
595
|
+
) -> RefactoringReport:
|
|
596
|
+
"""Analyze code for refactoring opportunities.
|
|
597
|
+
|
|
598
|
+
Args:
|
|
599
|
+
code: The source code to analyze
|
|
600
|
+
file_path: Path to the file being analyzed
|
|
601
|
+
context: Optional context (language, project conventions, etc.)
|
|
602
|
+
|
|
603
|
+
Returns:
|
|
604
|
+
RefactoringReport with prioritized findings
|
|
605
|
+
|
|
606
|
+
"""
|
|
607
|
+
import time
|
|
608
|
+
|
|
609
|
+
start_time = time.time()
|
|
610
|
+
|
|
611
|
+
# Initialize if needed
|
|
612
|
+
await self._initialize()
|
|
613
|
+
|
|
614
|
+
context = context or {}
|
|
615
|
+
findings: list[RefactoringFinding] = []
|
|
616
|
+
memory_hits = 0
|
|
617
|
+
|
|
618
|
+
# Query Memory Graph for similar past refactorings
|
|
619
|
+
if self._graph and self.config.memory_graph_enabled:
|
|
620
|
+
try:
|
|
621
|
+
similar = self._graph.find_similar(
|
|
622
|
+
{"name": f"refactor:{file_path}", "description": file_path},
|
|
623
|
+
threshold=0.4,
|
|
624
|
+
limit=10,
|
|
625
|
+
)
|
|
626
|
+
if similar:
|
|
627
|
+
memory_hits = len(similar)
|
|
628
|
+
context["similar_refactorings"] = [
|
|
629
|
+
{
|
|
630
|
+
"name": node.name,
|
|
631
|
+
"category": node.metadata.get("category", "unknown"),
|
|
632
|
+
}
|
|
633
|
+
for node, score in similar
|
|
634
|
+
]
|
|
635
|
+
logger.info(f"Found {memory_hits} similar past refactorings")
|
|
636
|
+
except (AttributeError, KeyError, ValueError) as e:
|
|
637
|
+
# Memory Graph data structure errors
|
|
638
|
+
logger.warning(f"Memory Graph query error (data issue): {e}")
|
|
639
|
+
except OSError as e:
|
|
640
|
+
# File system errors accessing memory graph
|
|
641
|
+
logger.warning(f"Memory Graph query error (file system): {e}")
|
|
642
|
+
except Exception:
|
|
643
|
+
# INTENTIONAL: Memory Graph is optional - continue without it
|
|
644
|
+
logger.exception("Unexpected error querying Memory Graph")
|
|
645
|
+
|
|
646
|
+
# Build analysis task
|
|
647
|
+
task = self._build_analysis_task(code, file_path, context)
|
|
648
|
+
|
|
649
|
+
# Execute analysis
|
|
650
|
+
try:
|
|
651
|
+
result = await self._agents["analyzer"].invoke(task, context)
|
|
652
|
+
findings = self._parse_findings(result)
|
|
653
|
+
|
|
654
|
+
# Apply user preferences for prioritization
|
|
655
|
+
if self._user_profile:
|
|
656
|
+
findings = self._apply_user_preferences(findings)
|
|
657
|
+
|
|
658
|
+
except KeyError as e:
|
|
659
|
+
# Agent not initialized or missing in agents dict
|
|
660
|
+
logger.error(f"Analysis failed (agent not found): {e}")
|
|
661
|
+
return RefactoringReport(
|
|
662
|
+
target=file_path,
|
|
663
|
+
findings=[],
|
|
664
|
+
summary=f"Analysis failed - agent not initialized: {e}",
|
|
665
|
+
duration_seconds=time.time() - start_time,
|
|
666
|
+
agents_used=["analyzer"],
|
|
667
|
+
memory_graph_hits=memory_hits,
|
|
668
|
+
metadata={"error": str(e)},
|
|
669
|
+
)
|
|
670
|
+
except (ValueError, TypeError, RuntimeError) as e:
|
|
671
|
+
# Agent invocation errors (invalid input, API errors, etc.)
|
|
672
|
+
logger.error(f"Analysis failed (invocation error): {e}")
|
|
673
|
+
return RefactoringReport(
|
|
674
|
+
target=file_path,
|
|
675
|
+
findings=[],
|
|
676
|
+
summary=f"Analysis failed - agent error: {e}",
|
|
677
|
+
duration_seconds=time.time() - start_time,
|
|
678
|
+
agents_used=["analyzer"],
|
|
679
|
+
memory_graph_hits=memory_hits,
|
|
680
|
+
metadata={"error": str(e)},
|
|
681
|
+
)
|
|
682
|
+
except Exception:
|
|
683
|
+
# INTENTIONAL: Graceful degradation - return empty report rather than crashing
|
|
684
|
+
logger.exception("Unexpected error in refactoring analysis")
|
|
685
|
+
return RefactoringReport(
|
|
686
|
+
target=file_path,
|
|
687
|
+
findings=[],
|
|
688
|
+
summary="Analysis failed due to unexpected error",
|
|
689
|
+
duration_seconds=time.time() - start_time,
|
|
690
|
+
agents_used=["analyzer"],
|
|
691
|
+
memory_graph_hits=memory_hits,
|
|
692
|
+
metadata={"error": "unexpected_error"},
|
|
693
|
+
)
|
|
694
|
+
|
|
695
|
+
# Build report
|
|
696
|
+
duration = time.time() - start_time
|
|
697
|
+
report = RefactoringReport(
|
|
698
|
+
target=file_path,
|
|
699
|
+
findings=findings,
|
|
700
|
+
summary=self._generate_summary(findings),
|
|
701
|
+
duration_seconds=duration,
|
|
702
|
+
agents_used=list(self._agents.keys()),
|
|
703
|
+
memory_graph_hits=memory_hits,
|
|
704
|
+
metadata={
|
|
705
|
+
"depth": self.config.depth,
|
|
706
|
+
"framework": str(self._factory.framework.value) if self._factory else "unknown",
|
|
707
|
+
},
|
|
708
|
+
)
|
|
709
|
+
|
|
710
|
+
# Store in Memory Graph
|
|
711
|
+
if self._graph and self.config.memory_graph_enabled:
|
|
712
|
+
try:
|
|
713
|
+
self._graph.add_finding(
|
|
714
|
+
"refactoring_crew",
|
|
715
|
+
{
|
|
716
|
+
"type": "refactoring_analysis",
|
|
717
|
+
"name": f"refactor:{file_path}",
|
|
718
|
+
"description": report.summary,
|
|
719
|
+
"findings_count": len(findings),
|
|
720
|
+
},
|
|
721
|
+
)
|
|
722
|
+
self._graph._save()
|
|
723
|
+
except (AttributeError, KeyError, ValueError) as e:
|
|
724
|
+
# Memory Graph data structure errors
|
|
725
|
+
logger.warning(f"Error storing in Memory Graph (data issue): {e}")
|
|
726
|
+
except (OSError, PermissionError) as e:
|
|
727
|
+
# File system errors saving memory graph
|
|
728
|
+
logger.warning(f"Error storing in Memory Graph (file system): {e}")
|
|
729
|
+
except Exception:
|
|
730
|
+
# INTENTIONAL: Memory Graph storage is optional - continue without it
|
|
731
|
+
logger.exception("Unexpected error storing in Memory Graph")
|
|
732
|
+
|
|
733
|
+
return report
|
|
734
|
+
|
|
735
|
+
async def generate_refactor(
|
|
736
|
+
self,
|
|
737
|
+
finding: RefactoringFinding,
|
|
738
|
+
full_code: str,
|
|
739
|
+
) -> RefactoringFinding:
|
|
740
|
+
"""Generate refactored code for a specific finding.
|
|
741
|
+
|
|
742
|
+
Args:
|
|
743
|
+
finding: The finding to generate refactored code for
|
|
744
|
+
full_code: The complete source file content
|
|
745
|
+
|
|
746
|
+
Returns:
|
|
747
|
+
Updated finding with after_code populated
|
|
748
|
+
|
|
749
|
+
"""
|
|
750
|
+
await self._initialize()
|
|
751
|
+
|
|
752
|
+
task = self._build_refactor_task(finding, full_code)
|
|
753
|
+
|
|
754
|
+
try:
|
|
755
|
+
result = await self._agents["writer"].invoke(task)
|
|
756
|
+
after_code = self._parse_refactor_result(result)
|
|
757
|
+
finding.after_code = after_code
|
|
758
|
+
except KeyError as e:
|
|
759
|
+
# Agent not initialized or missing in agents dict
|
|
760
|
+
logger.error(f"Refactor generation failed (agent not found): {e}")
|
|
761
|
+
finding.metadata["generation_error"] = f"Agent not initialized: {e}"
|
|
762
|
+
except (ValueError, TypeError, RuntimeError) as e:
|
|
763
|
+
# Agent invocation errors (invalid input, API errors, etc.)
|
|
764
|
+
logger.error(f"Refactor generation failed (invocation error): {e}")
|
|
765
|
+
finding.metadata["generation_error"] = f"Agent error: {e}"
|
|
766
|
+
except Exception:
|
|
767
|
+
# INTENTIONAL: Graceful degradation - finding without after_code is still useful
|
|
768
|
+
logger.exception("Unexpected error in refactor generation")
|
|
769
|
+
finding.metadata["generation_error"] = "Unexpected error"
|
|
770
|
+
|
|
771
|
+
return finding
|
|
772
|
+
|
|
773
|
+
# =========================================================================
|
|
774
|
+
# Checkpoint & Rollback
|
|
775
|
+
# =========================================================================
|
|
776
|
+
|
|
777
|
+
def create_checkpoint(
|
|
778
|
+
self,
|
|
779
|
+
file_path: str,
|
|
780
|
+
content: str,
|
|
781
|
+
finding_id: str,
|
|
782
|
+
) -> CodeCheckpoint:
|
|
783
|
+
"""Create a checkpoint before applying a change.
|
|
784
|
+
|
|
785
|
+
Args:
|
|
786
|
+
file_path: Path to the file
|
|
787
|
+
content: Current content of the file
|
|
788
|
+
finding_id: ID of the finding being applied
|
|
789
|
+
|
|
790
|
+
Returns:
|
|
791
|
+
CodeCheckpoint that can be used for rollback
|
|
792
|
+
|
|
793
|
+
"""
|
|
794
|
+
checkpoint = CodeCheckpoint(
|
|
795
|
+
id=str(uuid.uuid4()),
|
|
796
|
+
file_path=file_path,
|
|
797
|
+
original_content=content,
|
|
798
|
+
timestamp=datetime.now().isoformat(),
|
|
799
|
+
finding_id=finding_id,
|
|
800
|
+
)
|
|
801
|
+
return checkpoint
|
|
802
|
+
|
|
803
|
+
def rollback(self, checkpoint: CodeCheckpoint) -> str:
|
|
804
|
+
"""Get the original content from a checkpoint.
|
|
805
|
+
|
|
806
|
+
Args:
|
|
807
|
+
checkpoint: The checkpoint to rollback to
|
|
808
|
+
|
|
809
|
+
Returns:
|
|
810
|
+
The original file content
|
|
811
|
+
|
|
812
|
+
"""
|
|
813
|
+
return checkpoint.original_content
|
|
814
|
+
|
|
815
|
+
# =========================================================================
|
|
816
|
+
# User Profile Management
|
|
817
|
+
# =========================================================================
|
|
818
|
+
|
|
819
|
+
def _load_user_profile(self) -> UserProfile:
|
|
820
|
+
"""Load user profile from disk."""
|
|
821
|
+
profile_path = Path(self.config.user_profile_path)
|
|
822
|
+
if profile_path.exists():
|
|
823
|
+
try:
|
|
824
|
+
with open(profile_path) as f:
|
|
825
|
+
data = json.load(f)
|
|
826
|
+
return UserProfile.from_dict(data)
|
|
827
|
+
except (OSError, PermissionError) as e:
|
|
828
|
+
# File system errors reading profile
|
|
829
|
+
logger.warning(f"Failed to load user profile (file system error): {e}")
|
|
830
|
+
except json.JSONDecodeError as e:
|
|
831
|
+
# Invalid JSON in profile file
|
|
832
|
+
logger.warning(f"Failed to load user profile (invalid JSON): {e}")
|
|
833
|
+
except (KeyError, ValueError, TypeError) as e:
|
|
834
|
+
# Profile data validation errors
|
|
835
|
+
logger.warning(f"Failed to load user profile (data error): {e}")
|
|
836
|
+
except Exception:
|
|
837
|
+
# INTENTIONAL: User profile is optional - start with default
|
|
838
|
+
logger.exception("Unexpected error loading user profile")
|
|
839
|
+
return UserProfile()
|
|
840
|
+
|
|
841
|
+
def save_user_profile(self) -> None:
|
|
842
|
+
"""Save user profile to disk."""
|
|
843
|
+
if not self._user_profile:
|
|
844
|
+
return
|
|
845
|
+
|
|
846
|
+
profile_path = Path(self.config.user_profile_path)
|
|
847
|
+
profile_path.parent.mkdir(parents=True, exist_ok=True)
|
|
848
|
+
|
|
849
|
+
self._user_profile.updated_at = datetime.now().isoformat()
|
|
850
|
+
|
|
851
|
+
try:
|
|
852
|
+
with open(profile_path, "w") as f:
|
|
853
|
+
json.dump(self._user_profile.to_dict(), f, indent=2)
|
|
854
|
+
except (OSError, PermissionError) as e:
|
|
855
|
+
# File system errors writing profile
|
|
856
|
+
logger.warning(f"Failed to save user profile (file system error): {e}")
|
|
857
|
+
except (TypeError, ValueError) as e:
|
|
858
|
+
# JSON serialization errors
|
|
859
|
+
logger.warning(f"Failed to save user profile (serialization error): {e}")
|
|
860
|
+
except Exception:
|
|
861
|
+
# INTENTIONAL: User profile save is optional - don't crash on failure
|
|
862
|
+
logger.exception("Unexpected error saving user profile")
|
|
863
|
+
|
|
864
|
+
def record_decision(self, finding: RefactoringFinding, accepted: bool) -> None:
|
|
865
|
+
"""Record user decision for learning.
|
|
866
|
+
|
|
867
|
+
Args:
|
|
868
|
+
finding: The finding that was accepted or rejected
|
|
869
|
+
accepted: True if user accepted, False if rejected
|
|
870
|
+
|
|
871
|
+
"""
|
|
872
|
+
if not self._user_profile:
|
|
873
|
+
return
|
|
874
|
+
|
|
875
|
+
cat = finding.category.value
|
|
876
|
+
|
|
877
|
+
if accepted:
|
|
878
|
+
self._user_profile.accepted_categories[cat] = (
|
|
879
|
+
self._user_profile.accepted_categories.get(cat, 0) + 1
|
|
880
|
+
)
|
|
881
|
+
else:
|
|
882
|
+
self._user_profile.rejected_categories[cat] = (
|
|
883
|
+
self._user_profile.rejected_categories.get(cat, 0) + 1
|
|
884
|
+
)
|
|
885
|
+
|
|
886
|
+
# Add to history
|
|
887
|
+
self._user_profile.history.append(
|
|
888
|
+
{
|
|
889
|
+
"session_id": str(uuid.uuid4())[:8],
|
|
890
|
+
"date": datetime.now().strftime("%Y-%m-%d"),
|
|
891
|
+
"file": finding.file_path,
|
|
892
|
+
"category": cat,
|
|
893
|
+
"accepted": accepted,
|
|
894
|
+
},
|
|
895
|
+
)
|
|
896
|
+
|
|
897
|
+
# Keep history bounded
|
|
898
|
+
if len(self._user_profile.history) > 100:
|
|
899
|
+
self._user_profile.history = self._user_profile.history[-100:]
|
|
900
|
+
|
|
901
|
+
self.save_user_profile()
|
|
902
|
+
|
|
903
|
+
# =========================================================================
|
|
904
|
+
# Private Helper Methods
|
|
905
|
+
# =========================================================================
|
|
906
|
+
|
|
907
|
+
def _build_analysis_task(self, code: str, file_path: str, context: dict) -> str:
|
|
908
|
+
"""Build the analysis task for the analyzer agent."""
|
|
909
|
+
depth_instructions = {
|
|
910
|
+
"quick": "Focus on obvious issues only. Skip minor improvements.",
|
|
911
|
+
"standard": "Balance thoroughness with practicality. Cover major patterns.",
|
|
912
|
+
"thorough": "Deep analysis including subtle improvements and edge cases.",
|
|
913
|
+
}
|
|
914
|
+
|
|
915
|
+
focus_list = ", ".join(self.config.focus_areas)
|
|
916
|
+
|
|
917
|
+
task = f"""Analyze the following code for refactoring opportunities.
|
|
918
|
+
|
|
919
|
+
File: {file_path}
|
|
920
|
+
Analysis Depth: {self.config.depth}
|
|
921
|
+
Instructions: {depth_instructions.get(self.config.depth, "standard")}
|
|
922
|
+
Focus Areas: {focus_list}
|
|
923
|
+
|
|
924
|
+
```
|
|
925
|
+
{code[:20000]}
|
|
926
|
+
```
|
|
927
|
+
|
|
928
|
+
Return a JSON array of findings. Each finding should have:
|
|
929
|
+
- id: unique identifier (use UUID format)
|
|
930
|
+
- title: brief description
|
|
931
|
+
- description: detailed explanation
|
|
932
|
+
- category: {focus_list}, or "other"
|
|
933
|
+
- severity: critical, high, medium, low, or info
|
|
934
|
+
- file_path: "{file_path}"
|
|
935
|
+
- start_line: starting line number
|
|
936
|
+
- end_line: ending line number
|
|
937
|
+
- before_code: the current code snippet
|
|
938
|
+
- confidence: 0.0 to 1.0
|
|
939
|
+
- estimated_impact: high, medium, or low
|
|
940
|
+
- rationale: why this refactoring is recommended
|
|
941
|
+
|
|
942
|
+
Prioritize by impact and confidence. Return at most 10 findings.
|
|
943
|
+
"""
|
|
944
|
+
|
|
945
|
+
if context.get("similar_refactorings"):
|
|
946
|
+
task += f"\n\nSimilar past refactorings found: {len(context['similar_refactorings'])}"
|
|
947
|
+
|
|
948
|
+
return task
|
|
949
|
+
|
|
950
|
+
def _build_refactor_task(self, finding: RefactoringFinding, full_code: str) -> str:
|
|
951
|
+
"""Build the refactoring task for the writer agent."""
|
|
952
|
+
return f"""Generate refactored code for the following finding.
|
|
953
|
+
|
|
954
|
+
Finding: {finding.title}
|
|
955
|
+
Category: {finding.category.value}
|
|
956
|
+
Description: {finding.description}
|
|
957
|
+
Lines: {finding.start_line} to {finding.end_line}
|
|
958
|
+
Rationale: {finding.rationale}
|
|
959
|
+
|
|
960
|
+
Current code (before):
|
|
961
|
+
```
|
|
962
|
+
{finding.before_code}
|
|
963
|
+
```
|
|
964
|
+
|
|
965
|
+
Full file context:
|
|
966
|
+
```
|
|
967
|
+
{full_code[:15000]}
|
|
968
|
+
```
|
|
969
|
+
|
|
970
|
+
Return a JSON object with:
|
|
971
|
+
- after_code: the complete refactored code snippet (to replace before_code)
|
|
972
|
+
- explanation: brief explanation of changes made
|
|
973
|
+
|
|
974
|
+
The refactored code MUST be syntactically valid and preserve all functionality.
|
|
975
|
+
"""
|
|
976
|
+
|
|
977
|
+
def _parse_findings(self, result: dict) -> list[RefactoringFinding]:
|
|
978
|
+
"""Parse findings from analyzer result."""
|
|
979
|
+
findings = []
|
|
980
|
+
|
|
981
|
+
output = result.get("output", "")
|
|
982
|
+
metadata = result.get("metadata", {})
|
|
983
|
+
|
|
984
|
+
# Try structured findings first
|
|
985
|
+
if "findings" in metadata:
|
|
986
|
+
for f in metadata["findings"]:
|
|
987
|
+
findings.append(RefactoringFinding.from_dict(f))
|
|
988
|
+
return findings
|
|
989
|
+
|
|
990
|
+
# Try to parse JSON from output
|
|
991
|
+
try:
|
|
992
|
+
# Look for JSON array in output
|
|
993
|
+
import re
|
|
994
|
+
|
|
995
|
+
json_match = re.search(r"\[[\s\S]*\]", output)
|
|
996
|
+
if json_match:
|
|
997
|
+
data = json.loads(json_match.group())
|
|
998
|
+
for f in data:
|
|
999
|
+
findings.append(RefactoringFinding.from_dict(f))
|
|
1000
|
+
return findings
|
|
1001
|
+
except json.JSONDecodeError:
|
|
1002
|
+
pass
|
|
1003
|
+
|
|
1004
|
+
# Fallback: create a single finding from text
|
|
1005
|
+
if output.strip():
|
|
1006
|
+
findings.append(
|
|
1007
|
+
RefactoringFinding(
|
|
1008
|
+
id=str(uuid.uuid4()),
|
|
1009
|
+
title="Analysis Result",
|
|
1010
|
+
description=output[:500],
|
|
1011
|
+
category=RefactoringCategory.OTHER,
|
|
1012
|
+
severity=Severity.INFO,
|
|
1013
|
+
file_path="",
|
|
1014
|
+
start_line=0,
|
|
1015
|
+
end_line=0,
|
|
1016
|
+
confidence=0.5,
|
|
1017
|
+
),
|
|
1018
|
+
)
|
|
1019
|
+
|
|
1020
|
+
return findings
|
|
1021
|
+
|
|
1022
|
+
def _parse_refactor_result(self, result: dict) -> str:
|
|
1023
|
+
"""Parse refactored code from writer result."""
|
|
1024
|
+
output = result.get("output", "")
|
|
1025
|
+
metadata = result.get("metadata", {})
|
|
1026
|
+
|
|
1027
|
+
# Check metadata first
|
|
1028
|
+
if "after_code" in metadata:
|
|
1029
|
+
return str(metadata["after_code"])
|
|
1030
|
+
|
|
1031
|
+
# Try to parse JSON from output
|
|
1032
|
+
try:
|
|
1033
|
+
import re
|
|
1034
|
+
|
|
1035
|
+
json_match = re.search(r"\{[\s\S]*\}", output)
|
|
1036
|
+
if json_match:
|
|
1037
|
+
data = json.loads(json_match.group())
|
|
1038
|
+
if "after_code" in data:
|
|
1039
|
+
return str(data["after_code"])
|
|
1040
|
+
except json.JSONDecodeError:
|
|
1041
|
+
pass
|
|
1042
|
+
|
|
1043
|
+
# Look for code blocks
|
|
1044
|
+
import re
|
|
1045
|
+
|
|
1046
|
+
code_match = re.search(r"```(?:\w+)?\n([\s\S]*?)```", output)
|
|
1047
|
+
if code_match:
|
|
1048
|
+
return code_match.group(1).strip()
|
|
1049
|
+
|
|
1050
|
+
# Return raw output as fallback
|
|
1051
|
+
return str(output).strip()
|
|
1052
|
+
|
|
1053
|
+
def _apply_user_preferences(
|
|
1054
|
+
self,
|
|
1055
|
+
findings: list[RefactoringFinding],
|
|
1056
|
+
) -> list[RefactoringFinding]:
|
|
1057
|
+
"""Apply user preferences to prioritize findings."""
|
|
1058
|
+
if not self._user_profile:
|
|
1059
|
+
return findings
|
|
1060
|
+
|
|
1061
|
+
user_profile = self._user_profile # Capture for closure with non-None type
|
|
1062
|
+
|
|
1063
|
+
def score(finding: RefactoringFinding) -> float:
|
|
1064
|
+
# Base score from impact
|
|
1065
|
+
impact_scores = {Impact.HIGH: 3.0, Impact.MEDIUM: 2.0, Impact.LOW: 1.0}
|
|
1066
|
+
base = impact_scores.get(finding.estimated_impact, 2.0)
|
|
1067
|
+
|
|
1068
|
+
# Adjust by user preference
|
|
1069
|
+
pref = user_profile.get_category_score(finding.category)
|
|
1070
|
+
adjusted = base * (0.5 + pref) # Range: 0.5x to 1.5x
|
|
1071
|
+
|
|
1072
|
+
# Adjust by confidence
|
|
1073
|
+
return adjusted * finding.confidence
|
|
1074
|
+
|
|
1075
|
+
return sorted(findings, key=score, reverse=True)
|
|
1076
|
+
|
|
1077
|
+
def _generate_summary(self, findings: list[RefactoringFinding]) -> str:
|
|
1078
|
+
"""Generate summary of analysis."""
|
|
1079
|
+
if not findings:
|
|
1080
|
+
return "No refactoring opportunities identified."
|
|
1081
|
+
|
|
1082
|
+
by_impact = {
|
|
1083
|
+
Impact.HIGH: sum(1 for f in findings if f.estimated_impact == Impact.HIGH),
|
|
1084
|
+
Impact.MEDIUM: sum(1 for f in findings if f.estimated_impact == Impact.MEDIUM),
|
|
1085
|
+
Impact.LOW: sum(1 for f in findings if f.estimated_impact == Impact.LOW),
|
|
1086
|
+
}
|
|
1087
|
+
|
|
1088
|
+
parts = [f"Found {len(findings)} refactoring opportunities:"]
|
|
1089
|
+
|
|
1090
|
+
if by_impact[Impact.HIGH] > 0:
|
|
1091
|
+
parts.append(f" - {by_impact[Impact.HIGH]} high impact")
|
|
1092
|
+
if by_impact[Impact.MEDIUM] > 0:
|
|
1093
|
+
parts.append(f" - {by_impact[Impact.MEDIUM]} medium impact")
|
|
1094
|
+
if by_impact[Impact.LOW] > 0:
|
|
1095
|
+
parts.append(f" - {by_impact[Impact.LOW]} low impact")
|
|
1096
|
+
|
|
1097
|
+
# Top categories
|
|
1098
|
+
by_cat: dict[str, int] = {}
|
|
1099
|
+
for f in findings:
|
|
1100
|
+
cat = f.category.value
|
|
1101
|
+
by_cat[cat] = by_cat.get(cat, 0) + 1
|
|
1102
|
+
|
|
1103
|
+
if by_cat:
|
|
1104
|
+
top = sorted(by_cat.items(), key=lambda x: x[1], reverse=True)[:3]
|
|
1105
|
+
parts.append("\nTop categories:")
|
|
1106
|
+
for cat, count in top:
|
|
1107
|
+
parts.append(f" - {cat}: {count}")
|
|
1108
|
+
|
|
1109
|
+
return "\n".join(parts)
|
|
1110
|
+
|
|
1111
|
+
# =========================================================================
|
|
1112
|
+
# Properties
|
|
1113
|
+
# =========================================================================
|
|
1114
|
+
|
|
1115
|
+
@property
|
|
1116
|
+
def agents(self) -> dict[str, Any]:
|
|
1117
|
+
"""Get the crew's agents."""
|
|
1118
|
+
return self._agents
|
|
1119
|
+
|
|
1120
|
+
@property
|
|
1121
|
+
def is_initialized(self) -> bool:
|
|
1122
|
+
"""Check if crew is initialized."""
|
|
1123
|
+
return self._initialized
|
|
1124
|
+
|
|
1125
|
+
@property
|
|
1126
|
+
def user_profile(self) -> UserProfile | None:
|
|
1127
|
+
"""Get the current user profile."""
|
|
1128
|
+
return self._user_profile
|