attune-ai 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/__init__.py +358 -0
- attune/adaptive/__init__.py +13 -0
- attune/adaptive/task_complexity.py +127 -0
- attune/agent_monitoring.py +414 -0
- attune/cache/__init__.py +117 -0
- attune/cache/base.py +166 -0
- attune/cache/dependency_manager.py +256 -0
- attune/cache/hash_only.py +251 -0
- attune/cache/hybrid.py +457 -0
- attune/cache/storage.py +285 -0
- attune/cache_monitor.py +356 -0
- attune/cache_stats.py +298 -0
- attune/cli/__init__.py +152 -0
- attune/cli/__main__.py +12 -0
- attune/cli/commands/__init__.py +1 -0
- attune/cli/commands/batch.py +264 -0
- attune/cli/commands/cache.py +248 -0
- attune/cli/commands/help.py +331 -0
- attune/cli/commands/info.py +140 -0
- attune/cli/commands/inspect.py +436 -0
- attune/cli/commands/inspection.py +57 -0
- attune/cli/commands/memory.py +48 -0
- attune/cli/commands/metrics.py +92 -0
- attune/cli/commands/orchestrate.py +184 -0
- attune/cli/commands/patterns.py +207 -0
- attune/cli/commands/profiling.py +202 -0
- attune/cli/commands/provider.py +98 -0
- attune/cli/commands/routing.py +285 -0
- attune/cli/commands/setup.py +96 -0
- attune/cli/commands/status.py +235 -0
- attune/cli/commands/sync.py +166 -0
- attune/cli/commands/tier.py +121 -0
- attune/cli/commands/utilities.py +114 -0
- attune/cli/commands/workflow.py +579 -0
- attune/cli/core.py +32 -0
- attune/cli/parsers/__init__.py +68 -0
- attune/cli/parsers/batch.py +118 -0
- attune/cli/parsers/cache.py +65 -0
- attune/cli/parsers/help.py +41 -0
- attune/cli/parsers/info.py +26 -0
- attune/cli/parsers/inspect.py +66 -0
- attune/cli/parsers/metrics.py +42 -0
- attune/cli/parsers/orchestrate.py +61 -0
- attune/cli/parsers/patterns.py +54 -0
- attune/cli/parsers/provider.py +40 -0
- attune/cli/parsers/routing.py +110 -0
- attune/cli/parsers/setup.py +42 -0
- attune/cli/parsers/status.py +47 -0
- attune/cli/parsers/sync.py +31 -0
- attune/cli/parsers/tier.py +33 -0
- attune/cli/parsers/workflow.py +77 -0
- attune/cli/utils/__init__.py +1 -0
- attune/cli/utils/data.py +242 -0
- attune/cli/utils/helpers.py +68 -0
- attune/cli_legacy.py +3957 -0
- attune/cli_minimal.py +1159 -0
- attune/cli_router.py +437 -0
- attune/cli_unified.py +814 -0
- attune/config/__init__.py +66 -0
- attune/config/xml_config.py +286 -0
- attune/config.py +545 -0
- attune/coordination.py +870 -0
- attune/core.py +1511 -0
- attune/core_modules/__init__.py +15 -0
- attune/cost_tracker.py +626 -0
- attune/dashboard/__init__.py +41 -0
- attune/dashboard/app.py +512 -0
- attune/dashboard/simple_server.py +435 -0
- attune/dashboard/standalone_server.py +547 -0
- attune/discovery.py +306 -0
- attune/emergence.py +306 -0
- attune/exceptions.py +123 -0
- attune/feedback_loops.py +373 -0
- attune/hot_reload/README.md +473 -0
- attune/hot_reload/__init__.py +62 -0
- attune/hot_reload/config.py +83 -0
- attune/hot_reload/integration.py +229 -0
- attune/hot_reload/reloader.py +298 -0
- attune/hot_reload/watcher.py +183 -0
- attune/hot_reload/websocket.py +177 -0
- attune/levels.py +577 -0
- attune/leverage_points.py +441 -0
- attune/logging_config.py +261 -0
- attune/mcp/__init__.py +10 -0
- attune/mcp/server.py +506 -0
- attune/memory/__init__.py +237 -0
- attune/memory/claude_memory.py +469 -0
- attune/memory/config.py +224 -0
- attune/memory/control_panel.py +1290 -0
- attune/memory/control_panel_support.py +145 -0
- attune/memory/cross_session.py +845 -0
- attune/memory/edges.py +179 -0
- attune/memory/encryption.py +159 -0
- attune/memory/file_session.py +770 -0
- attune/memory/graph.py +570 -0
- attune/memory/long_term.py +913 -0
- attune/memory/long_term_types.py +99 -0
- attune/memory/mixins/__init__.py +25 -0
- attune/memory/mixins/backend_init_mixin.py +249 -0
- attune/memory/mixins/capabilities_mixin.py +208 -0
- attune/memory/mixins/handoff_mixin.py +208 -0
- attune/memory/mixins/lifecycle_mixin.py +49 -0
- attune/memory/mixins/long_term_mixin.py +352 -0
- attune/memory/mixins/promotion_mixin.py +109 -0
- attune/memory/mixins/short_term_mixin.py +182 -0
- attune/memory/nodes.py +179 -0
- attune/memory/redis_bootstrap.py +540 -0
- attune/memory/security/__init__.py +31 -0
- attune/memory/security/audit_logger.py +932 -0
- attune/memory/security/pii_scrubber.py +640 -0
- attune/memory/security/secrets_detector.py +678 -0
- attune/memory/short_term.py +2192 -0
- attune/memory/simple_storage.py +302 -0
- attune/memory/storage/__init__.py +15 -0
- attune/memory/storage_backend.py +167 -0
- attune/memory/summary_index.py +583 -0
- attune/memory/types.py +446 -0
- attune/memory/unified.py +182 -0
- attune/meta_workflows/__init__.py +74 -0
- attune/meta_workflows/agent_creator.py +248 -0
- attune/meta_workflows/builtin_templates.py +567 -0
- attune/meta_workflows/cli_commands/__init__.py +56 -0
- attune/meta_workflows/cli_commands/agent_commands.py +321 -0
- attune/meta_workflows/cli_commands/analytics_commands.py +442 -0
- attune/meta_workflows/cli_commands/config_commands.py +232 -0
- attune/meta_workflows/cli_commands/memory_commands.py +182 -0
- attune/meta_workflows/cli_commands/template_commands.py +354 -0
- attune/meta_workflows/cli_commands/workflow_commands.py +382 -0
- attune/meta_workflows/cli_meta_workflows.py +59 -0
- attune/meta_workflows/form_engine.py +292 -0
- attune/meta_workflows/intent_detector.py +409 -0
- attune/meta_workflows/models.py +569 -0
- attune/meta_workflows/pattern_learner.py +738 -0
- attune/meta_workflows/plan_generator.py +384 -0
- attune/meta_workflows/session_context.py +397 -0
- attune/meta_workflows/template_registry.py +229 -0
- attune/meta_workflows/workflow.py +984 -0
- attune/metrics/__init__.py +12 -0
- attune/metrics/collector.py +31 -0
- attune/metrics/prompt_metrics.py +194 -0
- attune/models/__init__.py +172 -0
- attune/models/__main__.py +13 -0
- attune/models/adaptive_routing.py +437 -0
- attune/models/auth_cli.py +444 -0
- attune/models/auth_strategy.py +450 -0
- attune/models/cli.py +655 -0
- attune/models/empathy_executor.py +354 -0
- attune/models/executor.py +257 -0
- attune/models/fallback.py +762 -0
- attune/models/provider_config.py +282 -0
- attune/models/registry.py +472 -0
- attune/models/tasks.py +359 -0
- attune/models/telemetry/__init__.py +71 -0
- attune/models/telemetry/analytics.py +594 -0
- attune/models/telemetry/backend.py +196 -0
- attune/models/telemetry/data_models.py +431 -0
- attune/models/telemetry/storage.py +489 -0
- attune/models/token_estimator.py +420 -0
- attune/models/validation.py +280 -0
- attune/monitoring/__init__.py +52 -0
- attune/monitoring/alerts.py +946 -0
- attune/monitoring/alerts_cli.py +448 -0
- attune/monitoring/multi_backend.py +271 -0
- attune/monitoring/otel_backend.py +362 -0
- attune/optimization/__init__.py +19 -0
- attune/optimization/context_optimizer.py +272 -0
- attune/orchestration/__init__.py +67 -0
- attune/orchestration/agent_templates.py +707 -0
- attune/orchestration/config_store.py +499 -0
- attune/orchestration/execution_strategies.py +2111 -0
- attune/orchestration/meta_orchestrator.py +1168 -0
- attune/orchestration/pattern_learner.py +696 -0
- attune/orchestration/real_tools.py +931 -0
- attune/pattern_cache.py +187 -0
- attune/pattern_library.py +542 -0
- attune/patterns/debugging/all_patterns.json +81 -0
- attune/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- attune/patterns/refactoring_memory.json +89 -0
- attune/persistence.py +564 -0
- attune/platform_utils.py +265 -0
- attune/plugins/__init__.py +28 -0
- attune/plugins/base.py +361 -0
- attune/plugins/registry.py +268 -0
- attune/project_index/__init__.py +32 -0
- attune/project_index/cli.py +335 -0
- attune/project_index/index.py +667 -0
- attune/project_index/models.py +504 -0
- attune/project_index/reports.py +474 -0
- attune/project_index/scanner.py +777 -0
- attune/project_index/scanner_parallel.py +291 -0
- attune/prompts/__init__.py +61 -0
- attune/prompts/config.py +77 -0
- attune/prompts/context.py +177 -0
- attune/prompts/parser.py +285 -0
- attune/prompts/registry.py +313 -0
- attune/prompts/templates.py +208 -0
- attune/redis_config.py +302 -0
- attune/redis_memory.py +799 -0
- attune/resilience/__init__.py +56 -0
- attune/resilience/circuit_breaker.py +256 -0
- attune/resilience/fallback.py +179 -0
- attune/resilience/health.py +300 -0
- attune/resilience/retry.py +209 -0
- attune/resilience/timeout.py +135 -0
- attune/routing/__init__.py +43 -0
- attune/routing/chain_executor.py +433 -0
- attune/routing/classifier.py +217 -0
- attune/routing/smart_router.py +234 -0
- attune/routing/workflow_registry.py +343 -0
- attune/scaffolding/README.md +589 -0
- attune/scaffolding/__init__.py +35 -0
- attune/scaffolding/__main__.py +14 -0
- attune/scaffolding/cli.py +240 -0
- attune/scaffolding/templates/base_wizard.py.jinja2 +121 -0
- attune/scaffolding/templates/coach_wizard.py.jinja2 +321 -0
- attune/scaffolding/templates/domain_wizard.py.jinja2 +408 -0
- attune/scaffolding/templates/linear_flow_wizard.py.jinja2 +203 -0
- attune/socratic/__init__.py +256 -0
- attune/socratic/ab_testing.py +958 -0
- attune/socratic/blueprint.py +533 -0
- attune/socratic/cli.py +703 -0
- attune/socratic/collaboration.py +1114 -0
- attune/socratic/domain_templates.py +924 -0
- attune/socratic/embeddings.py +738 -0
- attune/socratic/engine.py +794 -0
- attune/socratic/explainer.py +682 -0
- attune/socratic/feedback.py +772 -0
- attune/socratic/forms.py +629 -0
- attune/socratic/generator.py +732 -0
- attune/socratic/llm_analyzer.py +637 -0
- attune/socratic/mcp_server.py +702 -0
- attune/socratic/session.py +312 -0
- attune/socratic/storage.py +667 -0
- attune/socratic/success.py +730 -0
- attune/socratic/visual_editor.py +860 -0
- attune/socratic/web_ui.py +958 -0
- attune/telemetry/__init__.py +39 -0
- attune/telemetry/agent_coordination.py +475 -0
- attune/telemetry/agent_tracking.py +367 -0
- attune/telemetry/approval_gates.py +545 -0
- attune/telemetry/cli.py +1231 -0
- attune/telemetry/commands/__init__.py +14 -0
- attune/telemetry/commands/dashboard_commands.py +696 -0
- attune/telemetry/event_streaming.py +409 -0
- attune/telemetry/feedback_loop.py +567 -0
- attune/telemetry/usage_tracker.py +591 -0
- attune/templates.py +754 -0
- attune/test_generator/__init__.py +38 -0
- attune/test_generator/__main__.py +14 -0
- attune/test_generator/cli.py +234 -0
- attune/test_generator/generator.py +355 -0
- attune/test_generator/risk_analyzer.py +216 -0
- attune/test_generator/templates/unit_test.py.jinja2 +272 -0
- attune/tier_recommender.py +384 -0
- attune/tools.py +183 -0
- attune/trust/__init__.py +28 -0
- attune/trust/circuit_breaker.py +579 -0
- attune/trust_building.py +527 -0
- attune/validation/__init__.py +19 -0
- attune/validation/xml_validator.py +281 -0
- attune/vscode_bridge.py +173 -0
- attune/workflow_commands.py +780 -0
- attune/workflow_patterns/__init__.py +33 -0
- attune/workflow_patterns/behavior.py +249 -0
- attune/workflow_patterns/core.py +76 -0
- attune/workflow_patterns/output.py +99 -0
- attune/workflow_patterns/registry.py +255 -0
- attune/workflow_patterns/structural.py +288 -0
- attune/workflows/__init__.py +539 -0
- attune/workflows/autonomous_test_gen.py +1268 -0
- attune/workflows/base.py +2667 -0
- attune/workflows/batch_processing.py +342 -0
- attune/workflows/bug_predict.py +1084 -0
- attune/workflows/builder.py +273 -0
- attune/workflows/caching.py +253 -0
- attune/workflows/code_review.py +1048 -0
- attune/workflows/code_review_adapters.py +312 -0
- attune/workflows/code_review_pipeline.py +722 -0
- attune/workflows/config.py +645 -0
- attune/workflows/dependency_check.py +644 -0
- attune/workflows/document_gen/__init__.py +25 -0
- attune/workflows/document_gen/config.py +30 -0
- attune/workflows/document_gen/report_formatter.py +162 -0
- attune/workflows/document_gen/workflow.py +1426 -0
- attune/workflows/document_manager.py +216 -0
- attune/workflows/document_manager_README.md +134 -0
- attune/workflows/documentation_orchestrator.py +1205 -0
- attune/workflows/history.py +510 -0
- attune/workflows/keyboard_shortcuts/__init__.py +39 -0
- attune/workflows/keyboard_shortcuts/generators.py +391 -0
- attune/workflows/keyboard_shortcuts/parsers.py +416 -0
- attune/workflows/keyboard_shortcuts/prompts.py +295 -0
- attune/workflows/keyboard_shortcuts/schema.py +193 -0
- attune/workflows/keyboard_shortcuts/workflow.py +509 -0
- attune/workflows/llm_base.py +363 -0
- attune/workflows/manage_docs.py +87 -0
- attune/workflows/manage_docs_README.md +134 -0
- attune/workflows/manage_documentation.py +821 -0
- attune/workflows/new_sample_workflow1.py +149 -0
- attune/workflows/new_sample_workflow1_README.md +150 -0
- attune/workflows/orchestrated_health_check.py +849 -0
- attune/workflows/orchestrated_release_prep.py +600 -0
- attune/workflows/output.py +413 -0
- attune/workflows/perf_audit.py +863 -0
- attune/workflows/pr_review.py +762 -0
- attune/workflows/progress.py +785 -0
- attune/workflows/progress_server.py +322 -0
- attune/workflows/progressive/README 2.md +454 -0
- attune/workflows/progressive/README.md +454 -0
- attune/workflows/progressive/__init__.py +82 -0
- attune/workflows/progressive/cli.py +219 -0
- attune/workflows/progressive/core.py +488 -0
- attune/workflows/progressive/orchestrator.py +723 -0
- attune/workflows/progressive/reports.py +520 -0
- attune/workflows/progressive/telemetry.py +274 -0
- attune/workflows/progressive/test_gen.py +495 -0
- attune/workflows/progressive/workflow.py +589 -0
- attune/workflows/refactor_plan.py +694 -0
- attune/workflows/release_prep.py +895 -0
- attune/workflows/release_prep_crew.py +969 -0
- attune/workflows/research_synthesis.py +404 -0
- attune/workflows/routing.py +168 -0
- attune/workflows/secure_release.py +593 -0
- attune/workflows/security_adapters.py +297 -0
- attune/workflows/security_audit.py +1329 -0
- attune/workflows/security_audit_phase3.py +355 -0
- attune/workflows/seo_optimization.py +633 -0
- attune/workflows/step_config.py +234 -0
- attune/workflows/telemetry_mixin.py +269 -0
- attune/workflows/test5.py +125 -0
- attune/workflows/test5_README.md +158 -0
- attune/workflows/test_coverage_boost_crew.py +849 -0
- attune/workflows/test_gen/__init__.py +52 -0
- attune/workflows/test_gen/ast_analyzer.py +249 -0
- attune/workflows/test_gen/config.py +88 -0
- attune/workflows/test_gen/data_models.py +38 -0
- attune/workflows/test_gen/report_formatter.py +289 -0
- attune/workflows/test_gen/test_templates.py +381 -0
- attune/workflows/test_gen/workflow.py +655 -0
- attune/workflows/test_gen.py +54 -0
- attune/workflows/test_gen_behavioral.py +477 -0
- attune/workflows/test_gen_parallel.py +341 -0
- attune/workflows/test_lifecycle.py +526 -0
- attune/workflows/test_maintenance.py +627 -0
- attune/workflows/test_maintenance_cli.py +590 -0
- attune/workflows/test_maintenance_crew.py +840 -0
- attune/workflows/test_runner.py +622 -0
- attune/workflows/tier_tracking.py +531 -0
- attune/workflows/xml_enhanced_crew.py +285 -0
- attune_ai-2.0.0.dist-info/METADATA +1026 -0
- attune_ai-2.0.0.dist-info/RECORD +457 -0
- attune_ai-2.0.0.dist-info/WHEEL +5 -0
- attune_ai-2.0.0.dist-info/entry_points.txt +26 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE +201 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- attune_ai-2.0.0.dist-info/top_level.txt +5 -0
- attune_healthcare/__init__.py +13 -0
- attune_healthcare/monitors/__init__.py +9 -0
- attune_healthcare/monitors/clinical_protocol_monitor.py +315 -0
- attune_healthcare/monitors/monitoring/__init__.py +44 -0
- attune_healthcare/monitors/monitoring/protocol_checker.py +300 -0
- attune_healthcare/monitors/monitoring/protocol_loader.py +214 -0
- attune_healthcare/monitors/monitoring/sensor_parsers.py +306 -0
- attune_healthcare/monitors/monitoring/trajectory_analyzer.py +389 -0
- attune_llm/README.md +553 -0
- attune_llm/__init__.py +28 -0
- attune_llm/agent_factory/__init__.py +53 -0
- attune_llm/agent_factory/adapters/__init__.py +85 -0
- attune_llm/agent_factory/adapters/autogen_adapter.py +312 -0
- attune_llm/agent_factory/adapters/crewai_adapter.py +483 -0
- attune_llm/agent_factory/adapters/haystack_adapter.py +298 -0
- attune_llm/agent_factory/adapters/langchain_adapter.py +362 -0
- attune_llm/agent_factory/adapters/langgraph_adapter.py +333 -0
- attune_llm/agent_factory/adapters/native.py +228 -0
- attune_llm/agent_factory/adapters/wizard_adapter.py +423 -0
- attune_llm/agent_factory/base.py +305 -0
- attune_llm/agent_factory/crews/__init__.py +67 -0
- attune_llm/agent_factory/crews/code_review.py +1113 -0
- attune_llm/agent_factory/crews/health_check.py +1262 -0
- attune_llm/agent_factory/crews/refactoring.py +1128 -0
- attune_llm/agent_factory/crews/security_audit.py +1018 -0
- attune_llm/agent_factory/decorators.py +287 -0
- attune_llm/agent_factory/factory.py +558 -0
- attune_llm/agent_factory/framework.py +193 -0
- attune_llm/agent_factory/memory_integration.py +328 -0
- attune_llm/agent_factory/resilient.py +320 -0
- attune_llm/agents_md/__init__.py +22 -0
- attune_llm/agents_md/loader.py +218 -0
- attune_llm/agents_md/parser.py +271 -0
- attune_llm/agents_md/registry.py +307 -0
- attune_llm/claude_memory.py +466 -0
- attune_llm/cli/__init__.py +8 -0
- attune_llm/cli/sync_claude.py +487 -0
- attune_llm/code_health.py +1313 -0
- attune_llm/commands/__init__.py +51 -0
- attune_llm/commands/context.py +375 -0
- attune_llm/commands/loader.py +301 -0
- attune_llm/commands/models.py +231 -0
- attune_llm/commands/parser.py +371 -0
- attune_llm/commands/registry.py +429 -0
- attune_llm/config/__init__.py +29 -0
- attune_llm/config/unified.py +291 -0
- attune_llm/context/__init__.py +22 -0
- attune_llm/context/compaction.py +455 -0
- attune_llm/context/manager.py +434 -0
- attune_llm/contextual_patterns.py +361 -0
- attune_llm/core.py +907 -0
- attune_llm/git_pattern_extractor.py +435 -0
- attune_llm/hooks/__init__.py +24 -0
- attune_llm/hooks/config.py +306 -0
- attune_llm/hooks/executor.py +289 -0
- attune_llm/hooks/registry.py +302 -0
- attune_llm/hooks/scripts/__init__.py +39 -0
- attune_llm/hooks/scripts/evaluate_session.py +201 -0
- attune_llm/hooks/scripts/first_time_init.py +285 -0
- attune_llm/hooks/scripts/pre_compact.py +207 -0
- attune_llm/hooks/scripts/session_end.py +183 -0
- attune_llm/hooks/scripts/session_start.py +163 -0
- attune_llm/hooks/scripts/suggest_compact.py +225 -0
- attune_llm/learning/__init__.py +30 -0
- attune_llm/learning/evaluator.py +438 -0
- attune_llm/learning/extractor.py +514 -0
- attune_llm/learning/storage.py +560 -0
- attune_llm/levels.py +227 -0
- attune_llm/pattern_confidence.py +414 -0
- attune_llm/pattern_resolver.py +272 -0
- attune_llm/pattern_summary.py +350 -0
- attune_llm/providers.py +967 -0
- attune_llm/routing/__init__.py +32 -0
- attune_llm/routing/model_router.py +362 -0
- attune_llm/security/IMPLEMENTATION_SUMMARY.md +413 -0
- attune_llm/security/PHASE2_COMPLETE.md +384 -0
- attune_llm/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- attune_llm/security/QUICK_REFERENCE.md +316 -0
- attune_llm/security/README.md +262 -0
- attune_llm/security/__init__.py +62 -0
- attune_llm/security/audit_logger.py +929 -0
- attune_llm/security/audit_logger_example.py +152 -0
- attune_llm/security/pii_scrubber.py +640 -0
- attune_llm/security/secrets_detector.py +678 -0
- attune_llm/security/secrets_detector_example.py +304 -0
- attune_llm/security/secure_memdocs.py +1192 -0
- attune_llm/security/secure_memdocs_example.py +278 -0
- attune_llm/session_status.py +745 -0
- attune_llm/state.py +246 -0
- attune_llm/utils/__init__.py +5 -0
- attune_llm/utils/tokens.py +349 -0
- attune_software/SOFTWARE_PLUGIN_README.md +57 -0
- attune_software/__init__.py +13 -0
- attune_software/cli/__init__.py +120 -0
- attune_software/cli/inspect.py +362 -0
- attune_software/cli.py +574 -0
- attune_software/plugin.py +188 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
|
@@ -0,0 +1,694 @@
|
|
|
1
|
+
"""Refactor Planning Workflow
|
|
2
|
+
|
|
3
|
+
Prioritizes tech debt based on trajectory analysis and impact assessment.
|
|
4
|
+
Uses historical tech debt data to identify trends and hotspots.
|
|
5
|
+
|
|
6
|
+
Stages:
|
|
7
|
+
1. scan (CHEAP) - Scan for TODOs, FIXMEs, HACKs, complexity
|
|
8
|
+
2. analyze (CAPABLE) - Analyze debt trajectory from patterns
|
|
9
|
+
3. prioritize (CAPABLE) - Score by impact, effort, and risk
|
|
10
|
+
4. plan (PREMIUM) - Generate prioritized refactoring roadmap (conditional)
|
|
11
|
+
|
|
12
|
+
Copyright 2025 Smart-AI-Memory
|
|
13
|
+
Licensed under Fair Source License 0.9
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
import heapq
|
|
17
|
+
import json
|
|
18
|
+
import logging
|
|
19
|
+
import re
|
|
20
|
+
from pathlib import Path
|
|
21
|
+
from typing import Any
|
|
22
|
+
|
|
23
|
+
from .base import BaseWorkflow, ModelTier
|
|
24
|
+
from .step_config import WorkflowStepConfig
|
|
25
|
+
|
|
26
|
+
logger = logging.getLogger(__name__)
|
|
27
|
+
|
|
28
|
+
# Define step configurations for executor-based execution
|
|
29
|
+
REFACTOR_PLAN_STEPS = {
|
|
30
|
+
"plan": WorkflowStepConfig(
|
|
31
|
+
name="plan",
|
|
32
|
+
task_type="architectural_decision", # Premium tier task
|
|
33
|
+
tier_hint="premium",
|
|
34
|
+
description="Generate prioritized refactoring roadmap",
|
|
35
|
+
max_tokens=3000,
|
|
36
|
+
),
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
# Debt markers and their severity
|
|
40
|
+
DEBT_MARKERS = {
|
|
41
|
+
"TODO": {"severity": "low", "weight": 1},
|
|
42
|
+
"FIXME": {"severity": "medium", "weight": 3},
|
|
43
|
+
"HACK": {"severity": "high", "weight": 5},
|
|
44
|
+
"XXX": {"severity": "medium", "weight": 3},
|
|
45
|
+
"BUG": {"severity": "high", "weight": 5},
|
|
46
|
+
"OPTIMIZE": {"severity": "low", "weight": 2},
|
|
47
|
+
"REFACTOR": {"severity": "medium", "weight": 3},
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class RefactorPlanWorkflow(BaseWorkflow):
|
|
52
|
+
"""Prioritize tech debt with trajectory analysis.
|
|
53
|
+
|
|
54
|
+
Analyzes tech debt trends over time to identify growing
|
|
55
|
+
problem areas and generate prioritized refactoring plans.
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
name = "refactor-plan"
|
|
59
|
+
description = "Prioritize tech debt based on trajectory and impact"
|
|
60
|
+
stages = ["scan", "analyze", "prioritize", "plan"]
|
|
61
|
+
tier_map = {
|
|
62
|
+
"scan": ModelTier.CHEAP,
|
|
63
|
+
"analyze": ModelTier.CAPABLE,
|
|
64
|
+
"prioritize": ModelTier.CAPABLE,
|
|
65
|
+
"plan": ModelTier.PREMIUM,
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
def __init__(
|
|
69
|
+
self,
|
|
70
|
+
patterns_dir: str = "./patterns",
|
|
71
|
+
min_debt_for_premium: int = 50,
|
|
72
|
+
use_crew_for_analysis: bool = True,
|
|
73
|
+
crew_config: dict | None = None,
|
|
74
|
+
**kwargs: Any,
|
|
75
|
+
):
|
|
76
|
+
"""Initialize refactor planning workflow.
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
patterns_dir: Directory containing tech debt history
|
|
80
|
+
min_debt_for_premium: Minimum debt items to use premium planning
|
|
81
|
+
use_crew_for_analysis: Use RefactoringCrew for enhanced code analysis (default: True)
|
|
82
|
+
crew_config: Configuration dict for RefactoringCrew
|
|
83
|
+
**kwargs: Additional arguments passed to BaseWorkflow
|
|
84
|
+
|
|
85
|
+
"""
|
|
86
|
+
super().__init__(**kwargs)
|
|
87
|
+
self.patterns_dir = patterns_dir
|
|
88
|
+
self.min_debt_for_premium = min_debt_for_premium
|
|
89
|
+
self.use_crew_for_analysis = use_crew_for_analysis
|
|
90
|
+
self.crew_config = crew_config or {}
|
|
91
|
+
self._total_debt: int = 0
|
|
92
|
+
self._debt_history: list[dict] = []
|
|
93
|
+
self._crew: Any = None
|
|
94
|
+
self._crew_available = False
|
|
95
|
+
self._load_debt_history()
|
|
96
|
+
|
|
97
|
+
def _load_debt_history(self) -> None:
|
|
98
|
+
"""Load tech debt history from pattern library."""
|
|
99
|
+
debt_file = Path(self.patterns_dir) / "tech_debt.json"
|
|
100
|
+
if debt_file.exists():
|
|
101
|
+
try:
|
|
102
|
+
with open(debt_file) as f:
|
|
103
|
+
data = json.load(f)
|
|
104
|
+
self._debt_history = data.get("snapshots", [])
|
|
105
|
+
except (json.JSONDecodeError, OSError):
|
|
106
|
+
pass
|
|
107
|
+
|
|
108
|
+
async def _initialize_crew(self) -> None:
|
|
109
|
+
"""Initialize the RefactoringCrew."""
|
|
110
|
+
if self._crew is not None:
|
|
111
|
+
return
|
|
112
|
+
|
|
113
|
+
try:
|
|
114
|
+
from attune_llm.agent_factory.crews.refactoring import RefactoringCrew
|
|
115
|
+
|
|
116
|
+
self._crew = RefactoringCrew()
|
|
117
|
+
self._crew_available = True
|
|
118
|
+
logger.info("RefactoringCrew initialized successfully")
|
|
119
|
+
except ImportError as e:
|
|
120
|
+
logger.warning(f"RefactoringCrew not available: {e}")
|
|
121
|
+
self._crew_available = False
|
|
122
|
+
|
|
123
|
+
def should_skip_stage(self, stage_name: str, input_data: Any) -> tuple[bool, str | None]:
|
|
124
|
+
"""Downgrade plan stage if debt is low.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
stage_name: Name of the stage to check
|
|
128
|
+
input_data: Current workflow data
|
|
129
|
+
|
|
130
|
+
Returns:
|
|
131
|
+
Tuple of (should_skip, reason)
|
|
132
|
+
|
|
133
|
+
"""
|
|
134
|
+
if stage_name == "plan":
|
|
135
|
+
if self._total_debt < self.min_debt_for_premium:
|
|
136
|
+
self.tier_map["plan"] = ModelTier.CAPABLE
|
|
137
|
+
return False, None
|
|
138
|
+
return False, None
|
|
139
|
+
|
|
140
|
+
async def run_stage(
|
|
141
|
+
self,
|
|
142
|
+
stage_name: str,
|
|
143
|
+
tier: ModelTier,
|
|
144
|
+
input_data: Any,
|
|
145
|
+
) -> tuple[Any, int, int]:
|
|
146
|
+
"""Route to specific stage implementation."""
|
|
147
|
+
if stage_name == "scan":
|
|
148
|
+
return await self._scan(input_data, tier)
|
|
149
|
+
if stage_name == "analyze":
|
|
150
|
+
return await self._analyze(input_data, tier)
|
|
151
|
+
if stage_name == "prioritize":
|
|
152
|
+
return await self._prioritize(input_data, tier)
|
|
153
|
+
if stage_name == "plan":
|
|
154
|
+
return await self._plan(input_data, tier)
|
|
155
|
+
raise ValueError(f"Unknown stage: {stage_name}")
|
|
156
|
+
|
|
157
|
+
async def _scan(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
|
|
158
|
+
"""Scan codebase for tech debt markers.
|
|
159
|
+
|
|
160
|
+
Finds TODOs, FIXMEs, HACKs and other debt indicators.
|
|
161
|
+
"""
|
|
162
|
+
target_path = input_data.get("path", ".")
|
|
163
|
+
file_types = input_data.get("file_types", [".py", ".ts", ".tsx", ".js"])
|
|
164
|
+
|
|
165
|
+
debt_items: list[dict] = []
|
|
166
|
+
files_scanned = 0
|
|
167
|
+
|
|
168
|
+
target = Path(target_path)
|
|
169
|
+
if target.exists():
|
|
170
|
+
for ext in file_types:
|
|
171
|
+
for file_path in target.rglob(f"*{ext}"):
|
|
172
|
+
if any(
|
|
173
|
+
skip in str(file_path)
|
|
174
|
+
for skip in [".git", "node_modules", "__pycache__", "venv"]
|
|
175
|
+
):
|
|
176
|
+
continue
|
|
177
|
+
|
|
178
|
+
try:
|
|
179
|
+
content = file_path.read_text(errors="ignore")
|
|
180
|
+
files_scanned += 1
|
|
181
|
+
|
|
182
|
+
for marker, info in DEBT_MARKERS.items():
|
|
183
|
+
pattern = rf"#\s*{marker}[:\s]*(.*?)(?:\n|$)"
|
|
184
|
+
for match in re.finditer(pattern, content, re.IGNORECASE):
|
|
185
|
+
line_num = content[: match.start()].count("\n") + 1
|
|
186
|
+
debt_items.append(
|
|
187
|
+
{
|
|
188
|
+
"file": str(file_path),
|
|
189
|
+
"line": line_num,
|
|
190
|
+
"marker": marker,
|
|
191
|
+
"message": match.group(1).strip()[:100],
|
|
192
|
+
"severity": info["severity"],
|
|
193
|
+
"weight": info["weight"],
|
|
194
|
+
},
|
|
195
|
+
)
|
|
196
|
+
except OSError:
|
|
197
|
+
continue
|
|
198
|
+
|
|
199
|
+
self._total_debt = len(debt_items)
|
|
200
|
+
|
|
201
|
+
# Group by file
|
|
202
|
+
by_file: dict[str, int] = {}
|
|
203
|
+
for item in debt_items:
|
|
204
|
+
f = item["file"]
|
|
205
|
+
by_file[f] = by_file.get(f, 0) + 1
|
|
206
|
+
|
|
207
|
+
# By marker type
|
|
208
|
+
by_marker: dict[str, int] = {}
|
|
209
|
+
for item in debt_items:
|
|
210
|
+
m = item["marker"]
|
|
211
|
+
by_marker[m] = by_marker.get(m, 0) + 1
|
|
212
|
+
|
|
213
|
+
input_tokens = len(str(input_data)) // 4
|
|
214
|
+
output_tokens = len(str(debt_items)) // 4
|
|
215
|
+
|
|
216
|
+
return (
|
|
217
|
+
{
|
|
218
|
+
"debt_items": debt_items,
|
|
219
|
+
"total_debt": self._total_debt,
|
|
220
|
+
"files_scanned": files_scanned,
|
|
221
|
+
"by_file": dict(heapq.nlargest(20, by_file.items(), key=lambda x: x[1])),
|
|
222
|
+
"by_marker": by_marker,
|
|
223
|
+
**input_data,
|
|
224
|
+
},
|
|
225
|
+
input_tokens,
|
|
226
|
+
output_tokens,
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
async def _analyze(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
|
|
230
|
+
"""Analyze debt trajectory from historical data.
|
|
231
|
+
|
|
232
|
+
Compares current debt with historical snapshots to
|
|
233
|
+
identify trends and growing problem areas.
|
|
234
|
+
"""
|
|
235
|
+
current_total = input_data.get("total_debt", 0)
|
|
236
|
+
by_file = input_data.get("by_file", {})
|
|
237
|
+
|
|
238
|
+
# Analyze trajectory
|
|
239
|
+
trajectory = "stable"
|
|
240
|
+
velocity = 0.0
|
|
241
|
+
|
|
242
|
+
if self._debt_history and len(self._debt_history) >= 2:
|
|
243
|
+
oldest = self._debt_history[0].get("total_items", 0)
|
|
244
|
+
newest = self._debt_history[-1].get("total_items", 0)
|
|
245
|
+
|
|
246
|
+
change = newest - oldest
|
|
247
|
+
if change > 10:
|
|
248
|
+
trajectory = "increasing"
|
|
249
|
+
elif change < -10:
|
|
250
|
+
trajectory = "decreasing"
|
|
251
|
+
|
|
252
|
+
# Calculate velocity (items per snapshot)
|
|
253
|
+
velocity = change / len(self._debt_history)
|
|
254
|
+
|
|
255
|
+
# Identify hotspots (files with most debt and increasing)
|
|
256
|
+
hotspots: list[dict] = []
|
|
257
|
+
for file_path, count in list(by_file.items())[:10]:
|
|
258
|
+
hotspots.append(
|
|
259
|
+
{
|
|
260
|
+
"file": file_path,
|
|
261
|
+
"debt_count": count,
|
|
262
|
+
"trend": "stable", # Would compare with history
|
|
263
|
+
},
|
|
264
|
+
)
|
|
265
|
+
|
|
266
|
+
analysis = {
|
|
267
|
+
"trajectory": trajectory,
|
|
268
|
+
"velocity": round(velocity, 2),
|
|
269
|
+
"current_total": current_total,
|
|
270
|
+
"historical_snapshots": len(self._debt_history),
|
|
271
|
+
"hotspots": hotspots,
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
input_tokens = len(str(input_data)) // 4
|
|
275
|
+
output_tokens = len(str(analysis)) // 4
|
|
276
|
+
|
|
277
|
+
return (
|
|
278
|
+
{
|
|
279
|
+
"analysis": analysis,
|
|
280
|
+
**input_data,
|
|
281
|
+
},
|
|
282
|
+
input_tokens,
|
|
283
|
+
output_tokens,
|
|
284
|
+
)
|
|
285
|
+
|
|
286
|
+
async def _prioritize(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
|
|
287
|
+
"""Score debt items by impact, effort, and risk.
|
|
288
|
+
|
|
289
|
+
Calculates priority scores considering multiple factors.
|
|
290
|
+
When use_crew_for_analysis=True, uses RefactoringCrew for
|
|
291
|
+
enhanced refactoring opportunity detection.
|
|
292
|
+
"""
|
|
293
|
+
await self._initialize_crew()
|
|
294
|
+
|
|
295
|
+
debt_items = input_data.get("debt_items", [])
|
|
296
|
+
analysis = input_data.get("analysis", {})
|
|
297
|
+
hotspots = {h["file"] for h in analysis.get("hotspots", [])}
|
|
298
|
+
|
|
299
|
+
prioritized: list[dict] = []
|
|
300
|
+
for item in debt_items:
|
|
301
|
+
# Calculate priority score
|
|
302
|
+
base_weight = item.get("weight", 1)
|
|
303
|
+
|
|
304
|
+
# Bonus for hotspot files
|
|
305
|
+
hotspot_bonus = 2 if item["file"] in hotspots else 0
|
|
306
|
+
|
|
307
|
+
# Severity factor
|
|
308
|
+
severity_factor = {
|
|
309
|
+
"high": 3,
|
|
310
|
+
"medium": 2,
|
|
311
|
+
"low": 1,
|
|
312
|
+
}.get(item.get("severity", "low"), 1)
|
|
313
|
+
|
|
314
|
+
priority_score = (base_weight * severity_factor) + hotspot_bonus
|
|
315
|
+
|
|
316
|
+
prioritized.append(
|
|
317
|
+
{
|
|
318
|
+
**item,
|
|
319
|
+
"priority_score": priority_score,
|
|
320
|
+
"is_hotspot": item["file"] in hotspots,
|
|
321
|
+
},
|
|
322
|
+
)
|
|
323
|
+
|
|
324
|
+
# Sort by priority
|
|
325
|
+
prioritized.sort(key=lambda x: -x["priority_score"])
|
|
326
|
+
|
|
327
|
+
# Group into priority tiers (single pass instead of 3 scans)
|
|
328
|
+
high_priority: list[dict] = []
|
|
329
|
+
medium_priority: list[dict] = []
|
|
330
|
+
low_priority: list[dict] = []
|
|
331
|
+
for p in prioritized:
|
|
332
|
+
score = p["priority_score"]
|
|
333
|
+
if score >= 10:
|
|
334
|
+
high_priority.append(p)
|
|
335
|
+
elif score >= 5:
|
|
336
|
+
medium_priority.append(p)
|
|
337
|
+
else:
|
|
338
|
+
low_priority.append(p)
|
|
339
|
+
|
|
340
|
+
# Use crew for enhanced refactoring analysis if available
|
|
341
|
+
crew_enhanced = False
|
|
342
|
+
crew_findings = []
|
|
343
|
+
if self.use_crew_for_analysis and self._crew_available:
|
|
344
|
+
try:
|
|
345
|
+
# Analyze hotspot files with the crew
|
|
346
|
+
for hotspot in list(hotspots)[:5]: # Analyze top 5 hotspots
|
|
347
|
+
try:
|
|
348
|
+
code_content = Path(hotspot).read_text(errors="ignore")
|
|
349
|
+
crew_result = await self._crew.analyze(code=code_content, file_path=hotspot)
|
|
350
|
+
if crew_result and crew_result.findings:
|
|
351
|
+
crew_enhanced = True
|
|
352
|
+
# Convert crew findings to workflow format
|
|
353
|
+
for finding in crew_result.findings:
|
|
354
|
+
crew_findings.append(
|
|
355
|
+
{
|
|
356
|
+
"file": finding.file_path or hotspot,
|
|
357
|
+
"line": finding.start_line or 0,
|
|
358
|
+
"marker": "REFACTOR",
|
|
359
|
+
"message": finding.title,
|
|
360
|
+
"description": finding.description,
|
|
361
|
+
"severity": finding.severity.value,
|
|
362
|
+
"category": finding.category.value,
|
|
363
|
+
"priority_score": (
|
|
364
|
+
15 if finding.severity.value == "high" else 10
|
|
365
|
+
),
|
|
366
|
+
"is_hotspot": True,
|
|
367
|
+
"source": "crew",
|
|
368
|
+
}
|
|
369
|
+
)
|
|
370
|
+
except Exception as e:
|
|
371
|
+
logger.debug(f"Crew analysis failed for {hotspot}: {e}")
|
|
372
|
+
continue
|
|
373
|
+
|
|
374
|
+
# Add crew findings to high priority if they're high severity
|
|
375
|
+
if crew_findings:
|
|
376
|
+
for cf in crew_findings:
|
|
377
|
+
if cf["priority_score"] >= 10:
|
|
378
|
+
high_priority.append(cf)
|
|
379
|
+
except Exception as e:
|
|
380
|
+
logger.warning(f"Crew analysis failed: {e}")
|
|
381
|
+
|
|
382
|
+
input_tokens = len(str(input_data)) // 4
|
|
383
|
+
output_tokens = len(str(prioritized)) // 4
|
|
384
|
+
|
|
385
|
+
return (
|
|
386
|
+
{
|
|
387
|
+
"prioritized_items": prioritized[:50], # Top 50
|
|
388
|
+
"high_priority": high_priority[:20],
|
|
389
|
+
"medium_priority": medium_priority[:20],
|
|
390
|
+
"low_priority_count": len(low_priority),
|
|
391
|
+
"crew_enhanced": crew_enhanced,
|
|
392
|
+
"crew_findings_count": len(crew_findings),
|
|
393
|
+
**input_data,
|
|
394
|
+
},
|
|
395
|
+
input_tokens,
|
|
396
|
+
output_tokens,
|
|
397
|
+
)
|
|
398
|
+
|
|
399
|
+
async def _plan(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
|
|
400
|
+
"""Generate prioritized refactoring roadmap using LLM.
|
|
401
|
+
|
|
402
|
+
Creates actionable refactoring plan based on priorities.
|
|
403
|
+
|
|
404
|
+
Supports XML-enhanced prompts when enabled in workflow config.
|
|
405
|
+
"""
|
|
406
|
+
high_priority = input_data.get("high_priority", [])
|
|
407
|
+
medium_priority = input_data.get("medium_priority", [])
|
|
408
|
+
analysis = input_data.get("analysis", {})
|
|
409
|
+
target = input_data.get("target", "")
|
|
410
|
+
|
|
411
|
+
# Build high priority summary for LLM
|
|
412
|
+
high_summary = []
|
|
413
|
+
for item in high_priority[:15]:
|
|
414
|
+
high_summary.append(
|
|
415
|
+
f"- {item.get('file')}:{item.get('line')} [{item.get('marker')}] "
|
|
416
|
+
f"{item.get('message', '')[:50]}",
|
|
417
|
+
)
|
|
418
|
+
|
|
419
|
+
# Build input payload for prompt
|
|
420
|
+
input_payload = f"""Target: {target or "codebase"}
|
|
421
|
+
|
|
422
|
+
Total Debt Items: {input_data.get("total_debt", 0)}
|
|
423
|
+
Trajectory: {analysis.get("trajectory", "unknown")}
|
|
424
|
+
Velocity: {analysis.get("velocity", 0)} items/snapshot
|
|
425
|
+
|
|
426
|
+
High Priority Items ({len(high_priority)}):
|
|
427
|
+
{chr(10).join(high_summary) if high_summary else "None"}
|
|
428
|
+
|
|
429
|
+
Medium Priority Items: {len(medium_priority)}
|
|
430
|
+
Hotspot Files: {json.dumps([h.get("file") for h in analysis.get("hotspots", [])[:5]], indent=2)}"""
|
|
431
|
+
|
|
432
|
+
# Check if XML prompts are enabled
|
|
433
|
+
if self._is_xml_enabled():
|
|
434
|
+
# Use XML-enhanced prompt
|
|
435
|
+
user_message = self._render_xml_prompt(
|
|
436
|
+
role="software architect specializing in technical debt management",
|
|
437
|
+
goal="Generate a prioritized refactoring roadmap to reduce technical debt",
|
|
438
|
+
instructions=[
|
|
439
|
+
"Analyze the debt trajectory and identify root causes",
|
|
440
|
+
"Create a phased roadmap with clear milestones",
|
|
441
|
+
"Prioritize items by impact and effort",
|
|
442
|
+
"Provide specific refactoring strategies for each phase",
|
|
443
|
+
"Include prevention measures to stop new debt accumulation",
|
|
444
|
+
],
|
|
445
|
+
constraints=[
|
|
446
|
+
"Be specific about which files to refactor",
|
|
447
|
+
"Include effort estimates (high/medium/low)",
|
|
448
|
+
"Focus on sustainable debt reduction",
|
|
449
|
+
],
|
|
450
|
+
input_type="tech_debt_analysis",
|
|
451
|
+
input_payload=input_payload,
|
|
452
|
+
extra={
|
|
453
|
+
"total_debt": input_data.get("total_debt", 0),
|
|
454
|
+
"trajectory": analysis.get("trajectory", "unknown"),
|
|
455
|
+
},
|
|
456
|
+
)
|
|
457
|
+
system = None # XML prompt includes all context
|
|
458
|
+
else:
|
|
459
|
+
# Use legacy plain text prompts
|
|
460
|
+
system = """You are a software architect specializing in technical debt management.
|
|
461
|
+
Create a prioritized refactoring roadmap based on the debt analysis.
|
|
462
|
+
|
|
463
|
+
For each phase:
|
|
464
|
+
1. Define clear goals and milestones
|
|
465
|
+
2. Prioritize by impact and effort
|
|
466
|
+
3. Provide specific refactoring strategies
|
|
467
|
+
4. Include prevention measures
|
|
468
|
+
|
|
469
|
+
Be specific and actionable."""
|
|
470
|
+
|
|
471
|
+
user_message = f"""Generate a refactoring roadmap for this tech debt:
|
|
472
|
+
|
|
473
|
+
{input_payload}
|
|
474
|
+
|
|
475
|
+
Create a phased approach to reduce debt sustainably."""
|
|
476
|
+
|
|
477
|
+
# Try executor-based execution first (Phase 3 pattern)
|
|
478
|
+
if self._executor is not None or self._api_key:
|
|
479
|
+
try:
|
|
480
|
+
step = REFACTOR_PLAN_STEPS["plan"]
|
|
481
|
+
response, input_tokens, output_tokens, cost = await self.run_step_with_executor(
|
|
482
|
+
step=step,
|
|
483
|
+
prompt=user_message,
|
|
484
|
+
system=system,
|
|
485
|
+
)
|
|
486
|
+
except (RuntimeError, ValueError, TypeError, KeyError, AttributeError) as e:
|
|
487
|
+
# INTENTIONAL: Graceful fallback to legacy _call_llm if executor fails
|
|
488
|
+
# Catches executor/API/parsing errors during new execution path
|
|
489
|
+
logger.warning(f"Executor failed, falling back to legacy path: {e}")
|
|
490
|
+
response, input_tokens, output_tokens = await self._call_llm(
|
|
491
|
+
tier,
|
|
492
|
+
system or "",
|
|
493
|
+
user_message,
|
|
494
|
+
max_tokens=3000,
|
|
495
|
+
)
|
|
496
|
+
else:
|
|
497
|
+
# Legacy path for backward compatibility
|
|
498
|
+
response, input_tokens, output_tokens = await self._call_llm(
|
|
499
|
+
tier,
|
|
500
|
+
system or "",
|
|
501
|
+
user_message,
|
|
502
|
+
max_tokens=3000,
|
|
503
|
+
)
|
|
504
|
+
|
|
505
|
+
# Parse XML response if enforcement is enabled
|
|
506
|
+
parsed_data = self._parse_xml_response(response)
|
|
507
|
+
|
|
508
|
+
# Summary
|
|
509
|
+
summary = {
|
|
510
|
+
"total_debt": input_data.get("total_debt", 0),
|
|
511
|
+
"trajectory": analysis.get("trajectory", "unknown"),
|
|
512
|
+
"high_priority_count": len(high_priority),
|
|
513
|
+
}
|
|
514
|
+
|
|
515
|
+
result: dict = {
|
|
516
|
+
"refactoring_plan": response,
|
|
517
|
+
"summary": summary,
|
|
518
|
+
"model_tier_used": tier.value,
|
|
519
|
+
}
|
|
520
|
+
|
|
521
|
+
# Merge parsed XML data if available
|
|
522
|
+
if parsed_data.get("xml_parsed"):
|
|
523
|
+
result.update(
|
|
524
|
+
{
|
|
525
|
+
"xml_parsed": True,
|
|
526
|
+
"plan_summary": parsed_data.get("summary"),
|
|
527
|
+
"findings": parsed_data.get("findings", []),
|
|
528
|
+
"checklist": parsed_data.get("checklist", []),
|
|
529
|
+
},
|
|
530
|
+
)
|
|
531
|
+
|
|
532
|
+
# Add formatted report for human readability
|
|
533
|
+
result["formatted_report"] = format_refactor_plan_report(result, input_data)
|
|
534
|
+
|
|
535
|
+
return (
|
|
536
|
+
result,
|
|
537
|
+
input_tokens,
|
|
538
|
+
output_tokens,
|
|
539
|
+
)
|
|
540
|
+
|
|
541
|
+
|
|
542
|
+
def format_refactor_plan_report(result: dict, input_data: dict) -> str:
|
|
543
|
+
"""Format refactor plan output as a human-readable report.
|
|
544
|
+
|
|
545
|
+
Args:
|
|
546
|
+
result: The plan stage result
|
|
547
|
+
input_data: Input data from previous stages
|
|
548
|
+
|
|
549
|
+
Returns:
|
|
550
|
+
Formatted report string
|
|
551
|
+
|
|
552
|
+
"""
|
|
553
|
+
lines = []
|
|
554
|
+
|
|
555
|
+
# Header with trajectory
|
|
556
|
+
summary = result.get("summary", {})
|
|
557
|
+
total_debt = summary.get("total_debt", 0)
|
|
558
|
+
trajectory = summary.get("trajectory", "unknown")
|
|
559
|
+
high_priority_count = summary.get("high_priority_count", 0)
|
|
560
|
+
|
|
561
|
+
# Trajectory icon
|
|
562
|
+
if trajectory == "increasing":
|
|
563
|
+
traj_icon = "📈"
|
|
564
|
+
traj_text = "INCREASING"
|
|
565
|
+
elif trajectory == "decreasing":
|
|
566
|
+
traj_icon = "📉"
|
|
567
|
+
traj_text = "DECREASING"
|
|
568
|
+
else:
|
|
569
|
+
traj_icon = "➡️"
|
|
570
|
+
traj_text = "STABLE"
|
|
571
|
+
|
|
572
|
+
lines.append("=" * 60)
|
|
573
|
+
lines.append("REFACTOR PLAN REPORT")
|
|
574
|
+
lines.append("=" * 60)
|
|
575
|
+
lines.append("")
|
|
576
|
+
lines.append(f"Total Tech Debt Items: {total_debt}")
|
|
577
|
+
lines.append(f"Trajectory: {traj_icon} {traj_text}")
|
|
578
|
+
lines.append(f"High Priority Items: {high_priority_count}")
|
|
579
|
+
lines.append("")
|
|
580
|
+
|
|
581
|
+
# Scan summary
|
|
582
|
+
by_marker: dict[str, int] = input_data.get("by_marker", {})
|
|
583
|
+
files_scanned = input_data.get("files_scanned", 0)
|
|
584
|
+
|
|
585
|
+
lines.append("-" * 60)
|
|
586
|
+
lines.append("DEBT SCAN SUMMARY")
|
|
587
|
+
lines.append("-" * 60)
|
|
588
|
+
lines.append(f"Files Scanned: {files_scanned}")
|
|
589
|
+
if by_marker:
|
|
590
|
+
lines.append("By Marker Type:")
|
|
591
|
+
for marker, count in sorted(by_marker.items(), key=lambda x: -x[1]):
|
|
592
|
+
marker_info = DEBT_MARKERS.get(marker, {"severity": "low", "weight": 1})
|
|
593
|
+
severity = str(marker_info.get("severity", "low"))
|
|
594
|
+
sev_icon = {"high": "🔴", "medium": "🟡", "low": "🟢"}.get(severity, "⚪")
|
|
595
|
+
lines.append(f" {sev_icon} {marker}: {count}")
|
|
596
|
+
lines.append("")
|
|
597
|
+
|
|
598
|
+
# Analysis
|
|
599
|
+
analysis = input_data.get("analysis", {})
|
|
600
|
+
if analysis:
|
|
601
|
+
lines.append("-" * 60)
|
|
602
|
+
lines.append("TRAJECTORY ANALYSIS")
|
|
603
|
+
lines.append("-" * 60)
|
|
604
|
+
velocity = analysis.get("velocity", 0)
|
|
605
|
+
snapshots = analysis.get("historical_snapshots", 0)
|
|
606
|
+
|
|
607
|
+
lines.append(f"Historical Snapshots: {snapshots}")
|
|
608
|
+
if velocity != 0:
|
|
609
|
+
velocity_text = f"+{velocity}" if velocity > 0 else str(velocity)
|
|
610
|
+
lines.append(f"Velocity: {velocity_text} items/snapshot")
|
|
611
|
+
lines.append("")
|
|
612
|
+
|
|
613
|
+
# Hotspots
|
|
614
|
+
hotspots = analysis.get("hotspots", [])
|
|
615
|
+
if hotspots:
|
|
616
|
+
lines.append("-" * 60)
|
|
617
|
+
lines.append("🔥 HOTSPOT FILES")
|
|
618
|
+
lines.append("-" * 60)
|
|
619
|
+
for h in hotspots[:10]:
|
|
620
|
+
file_path = h.get("file", "unknown")
|
|
621
|
+
debt_count = h.get("debt_count", 0)
|
|
622
|
+
lines.append(f" • {file_path}")
|
|
623
|
+
lines.append(f" {debt_count} debt items")
|
|
624
|
+
lines.append("")
|
|
625
|
+
|
|
626
|
+
# High priority items
|
|
627
|
+
high_priority = input_data.get("high_priority", [])
|
|
628
|
+
if high_priority:
|
|
629
|
+
lines.append("-" * 60)
|
|
630
|
+
lines.append("🔴 HIGH PRIORITY ITEMS")
|
|
631
|
+
lines.append("-" * 60)
|
|
632
|
+
for item in high_priority[:10]:
|
|
633
|
+
file_path = item.get("file", "unknown")
|
|
634
|
+
line = item.get("line", "?")
|
|
635
|
+
marker = item.get("marker", "DEBT")
|
|
636
|
+
message = item.get("message", "")[:50]
|
|
637
|
+
score = item.get("priority_score", 0)
|
|
638
|
+
hotspot = "🔥" if item.get("is_hotspot") else ""
|
|
639
|
+
lines.append(f" [{marker}] {file_path}:{line} {hotspot}")
|
|
640
|
+
lines.append(f" {message} (score: {score})")
|
|
641
|
+
if len(high_priority) > 10:
|
|
642
|
+
lines.append(f" ... and {len(high_priority) - 10} more")
|
|
643
|
+
lines.append("")
|
|
644
|
+
|
|
645
|
+
# Refactoring plan from LLM
|
|
646
|
+
refactoring_plan = result.get("refactoring_plan", "")
|
|
647
|
+
if refactoring_plan and not refactoring_plan.startswith("[Simulated"):
|
|
648
|
+
lines.append("-" * 60)
|
|
649
|
+
lines.append("REFACTORING ROADMAP")
|
|
650
|
+
lines.append("-" * 60)
|
|
651
|
+
if len(refactoring_plan) > 2000:
|
|
652
|
+
lines.append(refactoring_plan[:2000] + "...")
|
|
653
|
+
else:
|
|
654
|
+
lines.append(refactoring_plan)
|
|
655
|
+
lines.append("")
|
|
656
|
+
|
|
657
|
+
# Footer
|
|
658
|
+
lines.append("=" * 60)
|
|
659
|
+
model_tier = result.get("model_tier_used", "unknown")
|
|
660
|
+
lines.append(f"Analyzed {total_debt} debt items using {model_tier} tier model")
|
|
661
|
+
lines.append("=" * 60)
|
|
662
|
+
|
|
663
|
+
return "\n".join(lines)
|
|
664
|
+
|
|
665
|
+
|
|
666
|
+
def main():
|
|
667
|
+
"""CLI entry point for refactor planning workflow."""
|
|
668
|
+
import asyncio
|
|
669
|
+
|
|
670
|
+
async def run():
|
|
671
|
+
workflow = RefactorPlanWorkflow()
|
|
672
|
+
result = await workflow.execute(path=".", file_types=[".py"])
|
|
673
|
+
|
|
674
|
+
print("\nRefactor Plan Results")
|
|
675
|
+
print("=" * 50)
|
|
676
|
+
print(f"Provider: {result.provider}")
|
|
677
|
+
print(f"Success: {result.success}")
|
|
678
|
+
|
|
679
|
+
summary = result.final_output.get("summary", {})
|
|
680
|
+
print(f"Total Debt: {summary.get('total_debt', 0)} items")
|
|
681
|
+
print(f"Trajectory: {summary.get('trajectory', 'N/A')}")
|
|
682
|
+
print(f"High Priority: {summary.get('high_priority_count', 0)}")
|
|
683
|
+
|
|
684
|
+
print("\nCost Report:")
|
|
685
|
+
print(f" Total Cost: ${result.cost_report.total_cost:.4f}")
|
|
686
|
+
savings = result.cost_report.savings
|
|
687
|
+
pct = result.cost_report.savings_percent
|
|
688
|
+
print(f" Savings: ${savings:.4f} ({pct:.1f}%)")
|
|
689
|
+
|
|
690
|
+
asyncio.run(run())
|
|
691
|
+
|
|
692
|
+
|
|
693
|
+
if __name__ == "__main__":
|
|
694
|
+
main()
|