attune-ai 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/__init__.py +358 -0
- attune/adaptive/__init__.py +13 -0
- attune/adaptive/task_complexity.py +127 -0
- attune/agent_monitoring.py +414 -0
- attune/cache/__init__.py +117 -0
- attune/cache/base.py +166 -0
- attune/cache/dependency_manager.py +256 -0
- attune/cache/hash_only.py +251 -0
- attune/cache/hybrid.py +457 -0
- attune/cache/storage.py +285 -0
- attune/cache_monitor.py +356 -0
- attune/cache_stats.py +298 -0
- attune/cli/__init__.py +152 -0
- attune/cli/__main__.py +12 -0
- attune/cli/commands/__init__.py +1 -0
- attune/cli/commands/batch.py +264 -0
- attune/cli/commands/cache.py +248 -0
- attune/cli/commands/help.py +331 -0
- attune/cli/commands/info.py +140 -0
- attune/cli/commands/inspect.py +436 -0
- attune/cli/commands/inspection.py +57 -0
- attune/cli/commands/memory.py +48 -0
- attune/cli/commands/metrics.py +92 -0
- attune/cli/commands/orchestrate.py +184 -0
- attune/cli/commands/patterns.py +207 -0
- attune/cli/commands/profiling.py +202 -0
- attune/cli/commands/provider.py +98 -0
- attune/cli/commands/routing.py +285 -0
- attune/cli/commands/setup.py +96 -0
- attune/cli/commands/status.py +235 -0
- attune/cli/commands/sync.py +166 -0
- attune/cli/commands/tier.py +121 -0
- attune/cli/commands/utilities.py +114 -0
- attune/cli/commands/workflow.py +579 -0
- attune/cli/core.py +32 -0
- attune/cli/parsers/__init__.py +68 -0
- attune/cli/parsers/batch.py +118 -0
- attune/cli/parsers/cache.py +65 -0
- attune/cli/parsers/help.py +41 -0
- attune/cli/parsers/info.py +26 -0
- attune/cli/parsers/inspect.py +66 -0
- attune/cli/parsers/metrics.py +42 -0
- attune/cli/parsers/orchestrate.py +61 -0
- attune/cli/parsers/patterns.py +54 -0
- attune/cli/parsers/provider.py +40 -0
- attune/cli/parsers/routing.py +110 -0
- attune/cli/parsers/setup.py +42 -0
- attune/cli/parsers/status.py +47 -0
- attune/cli/parsers/sync.py +31 -0
- attune/cli/parsers/tier.py +33 -0
- attune/cli/parsers/workflow.py +77 -0
- attune/cli/utils/__init__.py +1 -0
- attune/cli/utils/data.py +242 -0
- attune/cli/utils/helpers.py +68 -0
- attune/cli_legacy.py +3957 -0
- attune/cli_minimal.py +1159 -0
- attune/cli_router.py +437 -0
- attune/cli_unified.py +814 -0
- attune/config/__init__.py +66 -0
- attune/config/xml_config.py +286 -0
- attune/config.py +545 -0
- attune/coordination.py +870 -0
- attune/core.py +1511 -0
- attune/core_modules/__init__.py +15 -0
- attune/cost_tracker.py +626 -0
- attune/dashboard/__init__.py +41 -0
- attune/dashboard/app.py +512 -0
- attune/dashboard/simple_server.py +435 -0
- attune/dashboard/standalone_server.py +547 -0
- attune/discovery.py +306 -0
- attune/emergence.py +306 -0
- attune/exceptions.py +123 -0
- attune/feedback_loops.py +373 -0
- attune/hot_reload/README.md +473 -0
- attune/hot_reload/__init__.py +62 -0
- attune/hot_reload/config.py +83 -0
- attune/hot_reload/integration.py +229 -0
- attune/hot_reload/reloader.py +298 -0
- attune/hot_reload/watcher.py +183 -0
- attune/hot_reload/websocket.py +177 -0
- attune/levels.py +577 -0
- attune/leverage_points.py +441 -0
- attune/logging_config.py +261 -0
- attune/mcp/__init__.py +10 -0
- attune/mcp/server.py +506 -0
- attune/memory/__init__.py +237 -0
- attune/memory/claude_memory.py +469 -0
- attune/memory/config.py +224 -0
- attune/memory/control_panel.py +1290 -0
- attune/memory/control_panel_support.py +145 -0
- attune/memory/cross_session.py +845 -0
- attune/memory/edges.py +179 -0
- attune/memory/encryption.py +159 -0
- attune/memory/file_session.py +770 -0
- attune/memory/graph.py +570 -0
- attune/memory/long_term.py +913 -0
- attune/memory/long_term_types.py +99 -0
- attune/memory/mixins/__init__.py +25 -0
- attune/memory/mixins/backend_init_mixin.py +249 -0
- attune/memory/mixins/capabilities_mixin.py +208 -0
- attune/memory/mixins/handoff_mixin.py +208 -0
- attune/memory/mixins/lifecycle_mixin.py +49 -0
- attune/memory/mixins/long_term_mixin.py +352 -0
- attune/memory/mixins/promotion_mixin.py +109 -0
- attune/memory/mixins/short_term_mixin.py +182 -0
- attune/memory/nodes.py +179 -0
- attune/memory/redis_bootstrap.py +540 -0
- attune/memory/security/__init__.py +31 -0
- attune/memory/security/audit_logger.py +932 -0
- attune/memory/security/pii_scrubber.py +640 -0
- attune/memory/security/secrets_detector.py +678 -0
- attune/memory/short_term.py +2192 -0
- attune/memory/simple_storage.py +302 -0
- attune/memory/storage/__init__.py +15 -0
- attune/memory/storage_backend.py +167 -0
- attune/memory/summary_index.py +583 -0
- attune/memory/types.py +446 -0
- attune/memory/unified.py +182 -0
- attune/meta_workflows/__init__.py +74 -0
- attune/meta_workflows/agent_creator.py +248 -0
- attune/meta_workflows/builtin_templates.py +567 -0
- attune/meta_workflows/cli_commands/__init__.py +56 -0
- attune/meta_workflows/cli_commands/agent_commands.py +321 -0
- attune/meta_workflows/cli_commands/analytics_commands.py +442 -0
- attune/meta_workflows/cli_commands/config_commands.py +232 -0
- attune/meta_workflows/cli_commands/memory_commands.py +182 -0
- attune/meta_workflows/cli_commands/template_commands.py +354 -0
- attune/meta_workflows/cli_commands/workflow_commands.py +382 -0
- attune/meta_workflows/cli_meta_workflows.py +59 -0
- attune/meta_workflows/form_engine.py +292 -0
- attune/meta_workflows/intent_detector.py +409 -0
- attune/meta_workflows/models.py +569 -0
- attune/meta_workflows/pattern_learner.py +738 -0
- attune/meta_workflows/plan_generator.py +384 -0
- attune/meta_workflows/session_context.py +397 -0
- attune/meta_workflows/template_registry.py +229 -0
- attune/meta_workflows/workflow.py +984 -0
- attune/metrics/__init__.py +12 -0
- attune/metrics/collector.py +31 -0
- attune/metrics/prompt_metrics.py +194 -0
- attune/models/__init__.py +172 -0
- attune/models/__main__.py +13 -0
- attune/models/adaptive_routing.py +437 -0
- attune/models/auth_cli.py +444 -0
- attune/models/auth_strategy.py +450 -0
- attune/models/cli.py +655 -0
- attune/models/empathy_executor.py +354 -0
- attune/models/executor.py +257 -0
- attune/models/fallback.py +762 -0
- attune/models/provider_config.py +282 -0
- attune/models/registry.py +472 -0
- attune/models/tasks.py +359 -0
- attune/models/telemetry/__init__.py +71 -0
- attune/models/telemetry/analytics.py +594 -0
- attune/models/telemetry/backend.py +196 -0
- attune/models/telemetry/data_models.py +431 -0
- attune/models/telemetry/storage.py +489 -0
- attune/models/token_estimator.py +420 -0
- attune/models/validation.py +280 -0
- attune/monitoring/__init__.py +52 -0
- attune/monitoring/alerts.py +946 -0
- attune/monitoring/alerts_cli.py +448 -0
- attune/monitoring/multi_backend.py +271 -0
- attune/monitoring/otel_backend.py +362 -0
- attune/optimization/__init__.py +19 -0
- attune/optimization/context_optimizer.py +272 -0
- attune/orchestration/__init__.py +67 -0
- attune/orchestration/agent_templates.py +707 -0
- attune/orchestration/config_store.py +499 -0
- attune/orchestration/execution_strategies.py +2111 -0
- attune/orchestration/meta_orchestrator.py +1168 -0
- attune/orchestration/pattern_learner.py +696 -0
- attune/orchestration/real_tools.py +931 -0
- attune/pattern_cache.py +187 -0
- attune/pattern_library.py +542 -0
- attune/patterns/debugging/all_patterns.json +81 -0
- attune/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- attune/patterns/refactoring_memory.json +89 -0
- attune/persistence.py +564 -0
- attune/platform_utils.py +265 -0
- attune/plugins/__init__.py +28 -0
- attune/plugins/base.py +361 -0
- attune/plugins/registry.py +268 -0
- attune/project_index/__init__.py +32 -0
- attune/project_index/cli.py +335 -0
- attune/project_index/index.py +667 -0
- attune/project_index/models.py +504 -0
- attune/project_index/reports.py +474 -0
- attune/project_index/scanner.py +777 -0
- attune/project_index/scanner_parallel.py +291 -0
- attune/prompts/__init__.py +61 -0
- attune/prompts/config.py +77 -0
- attune/prompts/context.py +177 -0
- attune/prompts/parser.py +285 -0
- attune/prompts/registry.py +313 -0
- attune/prompts/templates.py +208 -0
- attune/redis_config.py +302 -0
- attune/redis_memory.py +799 -0
- attune/resilience/__init__.py +56 -0
- attune/resilience/circuit_breaker.py +256 -0
- attune/resilience/fallback.py +179 -0
- attune/resilience/health.py +300 -0
- attune/resilience/retry.py +209 -0
- attune/resilience/timeout.py +135 -0
- attune/routing/__init__.py +43 -0
- attune/routing/chain_executor.py +433 -0
- attune/routing/classifier.py +217 -0
- attune/routing/smart_router.py +234 -0
- attune/routing/workflow_registry.py +343 -0
- attune/scaffolding/README.md +589 -0
- attune/scaffolding/__init__.py +35 -0
- attune/scaffolding/__main__.py +14 -0
- attune/scaffolding/cli.py +240 -0
- attune/scaffolding/templates/base_wizard.py.jinja2 +121 -0
- attune/scaffolding/templates/coach_wizard.py.jinja2 +321 -0
- attune/scaffolding/templates/domain_wizard.py.jinja2 +408 -0
- attune/scaffolding/templates/linear_flow_wizard.py.jinja2 +203 -0
- attune/socratic/__init__.py +256 -0
- attune/socratic/ab_testing.py +958 -0
- attune/socratic/blueprint.py +533 -0
- attune/socratic/cli.py +703 -0
- attune/socratic/collaboration.py +1114 -0
- attune/socratic/domain_templates.py +924 -0
- attune/socratic/embeddings.py +738 -0
- attune/socratic/engine.py +794 -0
- attune/socratic/explainer.py +682 -0
- attune/socratic/feedback.py +772 -0
- attune/socratic/forms.py +629 -0
- attune/socratic/generator.py +732 -0
- attune/socratic/llm_analyzer.py +637 -0
- attune/socratic/mcp_server.py +702 -0
- attune/socratic/session.py +312 -0
- attune/socratic/storage.py +667 -0
- attune/socratic/success.py +730 -0
- attune/socratic/visual_editor.py +860 -0
- attune/socratic/web_ui.py +958 -0
- attune/telemetry/__init__.py +39 -0
- attune/telemetry/agent_coordination.py +475 -0
- attune/telemetry/agent_tracking.py +367 -0
- attune/telemetry/approval_gates.py +545 -0
- attune/telemetry/cli.py +1231 -0
- attune/telemetry/commands/__init__.py +14 -0
- attune/telemetry/commands/dashboard_commands.py +696 -0
- attune/telemetry/event_streaming.py +409 -0
- attune/telemetry/feedback_loop.py +567 -0
- attune/telemetry/usage_tracker.py +591 -0
- attune/templates.py +754 -0
- attune/test_generator/__init__.py +38 -0
- attune/test_generator/__main__.py +14 -0
- attune/test_generator/cli.py +234 -0
- attune/test_generator/generator.py +355 -0
- attune/test_generator/risk_analyzer.py +216 -0
- attune/test_generator/templates/unit_test.py.jinja2 +272 -0
- attune/tier_recommender.py +384 -0
- attune/tools.py +183 -0
- attune/trust/__init__.py +28 -0
- attune/trust/circuit_breaker.py +579 -0
- attune/trust_building.py +527 -0
- attune/validation/__init__.py +19 -0
- attune/validation/xml_validator.py +281 -0
- attune/vscode_bridge.py +173 -0
- attune/workflow_commands.py +780 -0
- attune/workflow_patterns/__init__.py +33 -0
- attune/workflow_patterns/behavior.py +249 -0
- attune/workflow_patterns/core.py +76 -0
- attune/workflow_patterns/output.py +99 -0
- attune/workflow_patterns/registry.py +255 -0
- attune/workflow_patterns/structural.py +288 -0
- attune/workflows/__init__.py +539 -0
- attune/workflows/autonomous_test_gen.py +1268 -0
- attune/workflows/base.py +2667 -0
- attune/workflows/batch_processing.py +342 -0
- attune/workflows/bug_predict.py +1084 -0
- attune/workflows/builder.py +273 -0
- attune/workflows/caching.py +253 -0
- attune/workflows/code_review.py +1048 -0
- attune/workflows/code_review_adapters.py +312 -0
- attune/workflows/code_review_pipeline.py +722 -0
- attune/workflows/config.py +645 -0
- attune/workflows/dependency_check.py +644 -0
- attune/workflows/document_gen/__init__.py +25 -0
- attune/workflows/document_gen/config.py +30 -0
- attune/workflows/document_gen/report_formatter.py +162 -0
- attune/workflows/document_gen/workflow.py +1426 -0
- attune/workflows/document_manager.py +216 -0
- attune/workflows/document_manager_README.md +134 -0
- attune/workflows/documentation_orchestrator.py +1205 -0
- attune/workflows/history.py +510 -0
- attune/workflows/keyboard_shortcuts/__init__.py +39 -0
- attune/workflows/keyboard_shortcuts/generators.py +391 -0
- attune/workflows/keyboard_shortcuts/parsers.py +416 -0
- attune/workflows/keyboard_shortcuts/prompts.py +295 -0
- attune/workflows/keyboard_shortcuts/schema.py +193 -0
- attune/workflows/keyboard_shortcuts/workflow.py +509 -0
- attune/workflows/llm_base.py +363 -0
- attune/workflows/manage_docs.py +87 -0
- attune/workflows/manage_docs_README.md +134 -0
- attune/workflows/manage_documentation.py +821 -0
- attune/workflows/new_sample_workflow1.py +149 -0
- attune/workflows/new_sample_workflow1_README.md +150 -0
- attune/workflows/orchestrated_health_check.py +849 -0
- attune/workflows/orchestrated_release_prep.py +600 -0
- attune/workflows/output.py +413 -0
- attune/workflows/perf_audit.py +863 -0
- attune/workflows/pr_review.py +762 -0
- attune/workflows/progress.py +785 -0
- attune/workflows/progress_server.py +322 -0
- attune/workflows/progressive/README 2.md +454 -0
- attune/workflows/progressive/README.md +454 -0
- attune/workflows/progressive/__init__.py +82 -0
- attune/workflows/progressive/cli.py +219 -0
- attune/workflows/progressive/core.py +488 -0
- attune/workflows/progressive/orchestrator.py +723 -0
- attune/workflows/progressive/reports.py +520 -0
- attune/workflows/progressive/telemetry.py +274 -0
- attune/workflows/progressive/test_gen.py +495 -0
- attune/workflows/progressive/workflow.py +589 -0
- attune/workflows/refactor_plan.py +694 -0
- attune/workflows/release_prep.py +895 -0
- attune/workflows/release_prep_crew.py +969 -0
- attune/workflows/research_synthesis.py +404 -0
- attune/workflows/routing.py +168 -0
- attune/workflows/secure_release.py +593 -0
- attune/workflows/security_adapters.py +297 -0
- attune/workflows/security_audit.py +1329 -0
- attune/workflows/security_audit_phase3.py +355 -0
- attune/workflows/seo_optimization.py +633 -0
- attune/workflows/step_config.py +234 -0
- attune/workflows/telemetry_mixin.py +269 -0
- attune/workflows/test5.py +125 -0
- attune/workflows/test5_README.md +158 -0
- attune/workflows/test_coverage_boost_crew.py +849 -0
- attune/workflows/test_gen/__init__.py +52 -0
- attune/workflows/test_gen/ast_analyzer.py +249 -0
- attune/workflows/test_gen/config.py +88 -0
- attune/workflows/test_gen/data_models.py +38 -0
- attune/workflows/test_gen/report_formatter.py +289 -0
- attune/workflows/test_gen/test_templates.py +381 -0
- attune/workflows/test_gen/workflow.py +655 -0
- attune/workflows/test_gen.py +54 -0
- attune/workflows/test_gen_behavioral.py +477 -0
- attune/workflows/test_gen_parallel.py +341 -0
- attune/workflows/test_lifecycle.py +526 -0
- attune/workflows/test_maintenance.py +627 -0
- attune/workflows/test_maintenance_cli.py +590 -0
- attune/workflows/test_maintenance_crew.py +840 -0
- attune/workflows/test_runner.py +622 -0
- attune/workflows/tier_tracking.py +531 -0
- attune/workflows/xml_enhanced_crew.py +285 -0
- attune_ai-2.0.0.dist-info/METADATA +1026 -0
- attune_ai-2.0.0.dist-info/RECORD +457 -0
- attune_ai-2.0.0.dist-info/WHEEL +5 -0
- attune_ai-2.0.0.dist-info/entry_points.txt +26 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE +201 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- attune_ai-2.0.0.dist-info/top_level.txt +5 -0
- attune_healthcare/__init__.py +13 -0
- attune_healthcare/monitors/__init__.py +9 -0
- attune_healthcare/monitors/clinical_protocol_monitor.py +315 -0
- attune_healthcare/monitors/monitoring/__init__.py +44 -0
- attune_healthcare/monitors/monitoring/protocol_checker.py +300 -0
- attune_healthcare/monitors/monitoring/protocol_loader.py +214 -0
- attune_healthcare/monitors/monitoring/sensor_parsers.py +306 -0
- attune_healthcare/monitors/monitoring/trajectory_analyzer.py +389 -0
- attune_llm/README.md +553 -0
- attune_llm/__init__.py +28 -0
- attune_llm/agent_factory/__init__.py +53 -0
- attune_llm/agent_factory/adapters/__init__.py +85 -0
- attune_llm/agent_factory/adapters/autogen_adapter.py +312 -0
- attune_llm/agent_factory/adapters/crewai_adapter.py +483 -0
- attune_llm/agent_factory/adapters/haystack_adapter.py +298 -0
- attune_llm/agent_factory/adapters/langchain_adapter.py +362 -0
- attune_llm/agent_factory/adapters/langgraph_adapter.py +333 -0
- attune_llm/agent_factory/adapters/native.py +228 -0
- attune_llm/agent_factory/adapters/wizard_adapter.py +423 -0
- attune_llm/agent_factory/base.py +305 -0
- attune_llm/agent_factory/crews/__init__.py +67 -0
- attune_llm/agent_factory/crews/code_review.py +1113 -0
- attune_llm/agent_factory/crews/health_check.py +1262 -0
- attune_llm/agent_factory/crews/refactoring.py +1128 -0
- attune_llm/agent_factory/crews/security_audit.py +1018 -0
- attune_llm/agent_factory/decorators.py +287 -0
- attune_llm/agent_factory/factory.py +558 -0
- attune_llm/agent_factory/framework.py +193 -0
- attune_llm/agent_factory/memory_integration.py +328 -0
- attune_llm/agent_factory/resilient.py +320 -0
- attune_llm/agents_md/__init__.py +22 -0
- attune_llm/agents_md/loader.py +218 -0
- attune_llm/agents_md/parser.py +271 -0
- attune_llm/agents_md/registry.py +307 -0
- attune_llm/claude_memory.py +466 -0
- attune_llm/cli/__init__.py +8 -0
- attune_llm/cli/sync_claude.py +487 -0
- attune_llm/code_health.py +1313 -0
- attune_llm/commands/__init__.py +51 -0
- attune_llm/commands/context.py +375 -0
- attune_llm/commands/loader.py +301 -0
- attune_llm/commands/models.py +231 -0
- attune_llm/commands/parser.py +371 -0
- attune_llm/commands/registry.py +429 -0
- attune_llm/config/__init__.py +29 -0
- attune_llm/config/unified.py +291 -0
- attune_llm/context/__init__.py +22 -0
- attune_llm/context/compaction.py +455 -0
- attune_llm/context/manager.py +434 -0
- attune_llm/contextual_patterns.py +361 -0
- attune_llm/core.py +907 -0
- attune_llm/git_pattern_extractor.py +435 -0
- attune_llm/hooks/__init__.py +24 -0
- attune_llm/hooks/config.py +306 -0
- attune_llm/hooks/executor.py +289 -0
- attune_llm/hooks/registry.py +302 -0
- attune_llm/hooks/scripts/__init__.py +39 -0
- attune_llm/hooks/scripts/evaluate_session.py +201 -0
- attune_llm/hooks/scripts/first_time_init.py +285 -0
- attune_llm/hooks/scripts/pre_compact.py +207 -0
- attune_llm/hooks/scripts/session_end.py +183 -0
- attune_llm/hooks/scripts/session_start.py +163 -0
- attune_llm/hooks/scripts/suggest_compact.py +225 -0
- attune_llm/learning/__init__.py +30 -0
- attune_llm/learning/evaluator.py +438 -0
- attune_llm/learning/extractor.py +514 -0
- attune_llm/learning/storage.py +560 -0
- attune_llm/levels.py +227 -0
- attune_llm/pattern_confidence.py +414 -0
- attune_llm/pattern_resolver.py +272 -0
- attune_llm/pattern_summary.py +350 -0
- attune_llm/providers.py +967 -0
- attune_llm/routing/__init__.py +32 -0
- attune_llm/routing/model_router.py +362 -0
- attune_llm/security/IMPLEMENTATION_SUMMARY.md +413 -0
- attune_llm/security/PHASE2_COMPLETE.md +384 -0
- attune_llm/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- attune_llm/security/QUICK_REFERENCE.md +316 -0
- attune_llm/security/README.md +262 -0
- attune_llm/security/__init__.py +62 -0
- attune_llm/security/audit_logger.py +929 -0
- attune_llm/security/audit_logger_example.py +152 -0
- attune_llm/security/pii_scrubber.py +640 -0
- attune_llm/security/secrets_detector.py +678 -0
- attune_llm/security/secrets_detector_example.py +304 -0
- attune_llm/security/secure_memdocs.py +1192 -0
- attune_llm/security/secure_memdocs_example.py +278 -0
- attune_llm/session_status.py +745 -0
- attune_llm/state.py +246 -0
- attune_llm/utils/__init__.py +5 -0
- attune_llm/utils/tokens.py +349 -0
- attune_software/SOFTWARE_PLUGIN_README.md +57 -0
- attune_software/__init__.py +13 -0
- attune_software/cli/__init__.py +120 -0
- attune_software/cli/inspect.py +362 -0
- attune_software/cli.py +574 -0
- attune_software/plugin.py +188 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
|
@@ -0,0 +1,1048 @@
|
|
|
1
|
+
"""Code Review Workflow
|
|
2
|
+
|
|
3
|
+
A tiered code analysis pipeline:
|
|
4
|
+
1. Haiku: Classify change type (cheap, fast)
|
|
5
|
+
2. Sonnet: Security scan + bug pattern matching
|
|
6
|
+
3. Opus: Architectural review (conditional on complexity)
|
|
7
|
+
|
|
8
|
+
Copyright 2025 Smart-AI-Memory
|
|
9
|
+
Licensed under Fair Source License 0.9
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from typing import Any
|
|
13
|
+
|
|
14
|
+
from .base import BaseWorkflow, ModelTier
|
|
15
|
+
from .step_config import WorkflowStepConfig
|
|
16
|
+
|
|
17
|
+
# Define step configurations for executor-based execution
|
|
18
|
+
CODE_REVIEW_STEPS = {
|
|
19
|
+
"architect_review": WorkflowStepConfig(
|
|
20
|
+
name="architect_review",
|
|
21
|
+
task_type="architectural_decision", # Premium tier task
|
|
22
|
+
tier_hint="premium",
|
|
23
|
+
description="Comprehensive architectural code review",
|
|
24
|
+
max_tokens=3000,
|
|
25
|
+
),
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class CodeReviewWorkflow(BaseWorkflow):
|
|
30
|
+
"""Multi-tier code review workflow.
|
|
31
|
+
|
|
32
|
+
Uses cheap models for classification, capable models for security
|
|
33
|
+
and bug scanning, and premium models only for complex architectural
|
|
34
|
+
reviews (10+ files or core module changes).
|
|
35
|
+
|
|
36
|
+
Usage:
|
|
37
|
+
workflow = CodeReviewWorkflow()
|
|
38
|
+
result = await workflow.execute(
|
|
39
|
+
diff="...",
|
|
40
|
+
files_changed=["src/main.py", "tests/test_main.py"],
|
|
41
|
+
is_core_module=False
|
|
42
|
+
)
|
|
43
|
+
"""
|
|
44
|
+
|
|
45
|
+
name = "code-review"
|
|
46
|
+
description = "Tiered code analysis with conditional premium review"
|
|
47
|
+
stages = ["classify", "scan", "architect_review"]
|
|
48
|
+
tier_map = {
|
|
49
|
+
"classify": ModelTier.CHEAP,
|
|
50
|
+
"scan": ModelTier.CAPABLE,
|
|
51
|
+
"architect_review": ModelTier.PREMIUM,
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
def __init__(
|
|
55
|
+
self,
|
|
56
|
+
file_threshold: int = 10,
|
|
57
|
+
core_modules: list[str] | None = None,
|
|
58
|
+
use_crew: bool = True,
|
|
59
|
+
crew_config: dict | None = None,
|
|
60
|
+
enable_auth_strategy: bool = True,
|
|
61
|
+
**kwargs: Any,
|
|
62
|
+
):
|
|
63
|
+
"""Initialize workflow.
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
file_threshold: Number of files above which premium review is used.
|
|
67
|
+
core_modules: List of module paths considered "core" (trigger premium).
|
|
68
|
+
use_crew: Enable CodeReviewCrew for comprehensive 5-agent analysis (default: True).
|
|
69
|
+
crew_config: Configuration dict for CodeReviewCrew.
|
|
70
|
+
enable_auth_strategy: If True, use intelligent subscription vs API routing
|
|
71
|
+
based on module size (default True).
|
|
72
|
+
|
|
73
|
+
"""
|
|
74
|
+
super().__init__(**kwargs)
|
|
75
|
+
self.file_threshold = file_threshold
|
|
76
|
+
self.core_modules = core_modules or [
|
|
77
|
+
"src/core/",
|
|
78
|
+
"src/security/",
|
|
79
|
+
"src/auth/",
|
|
80
|
+
"empathy_os/core.py",
|
|
81
|
+
"empathy_os/security/",
|
|
82
|
+
]
|
|
83
|
+
self.use_crew = use_crew
|
|
84
|
+
self.crew_config = crew_config or {}
|
|
85
|
+
self.enable_auth_strategy = enable_auth_strategy
|
|
86
|
+
self._needs_architect_review: bool = False
|
|
87
|
+
self._change_type: str = "unknown"
|
|
88
|
+
self._crew: Any = None
|
|
89
|
+
self._crew_available = False
|
|
90
|
+
self._auth_mode_used: str | None = None
|
|
91
|
+
|
|
92
|
+
# Dynamically configure stages based on crew setting
|
|
93
|
+
if use_crew:
|
|
94
|
+
self.stages = ["classify", "crew_review", "scan", "architect_review"]
|
|
95
|
+
self.tier_map = {
|
|
96
|
+
"classify": ModelTier.CHEAP,
|
|
97
|
+
"crew_review": ModelTier.CAPABLE, # Changed from PREMIUM to CAPABLE
|
|
98
|
+
"scan": ModelTier.CAPABLE,
|
|
99
|
+
"architect_review": ModelTier.PREMIUM,
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
async def _initialize_crew(self) -> None:
|
|
103
|
+
"""Initialize the CodeReviewCrew."""
|
|
104
|
+
if self._crew is not None:
|
|
105
|
+
return
|
|
106
|
+
|
|
107
|
+
try:
|
|
108
|
+
import logging
|
|
109
|
+
|
|
110
|
+
from attune_llm.agent_factory.crews.code_review import CodeReviewCrew
|
|
111
|
+
|
|
112
|
+
self._crew = CodeReviewCrew()
|
|
113
|
+
self._crew_available = True
|
|
114
|
+
logging.getLogger(__name__).info("CodeReviewCrew initialized successfully")
|
|
115
|
+
except ImportError as e:
|
|
116
|
+
import logging
|
|
117
|
+
|
|
118
|
+
logging.getLogger(__name__).warning(f"CodeReviewCrew not available: {e}")
|
|
119
|
+
self._crew_available = False
|
|
120
|
+
|
|
121
|
+
def should_skip_stage(self, stage_name: str, input_data: Any) -> tuple[bool, str | None]:
|
|
122
|
+
"""Skip stages when appropriate."""
|
|
123
|
+
# Skip all stages after classify if there was an input error
|
|
124
|
+
if isinstance(input_data, dict) and input_data.get("error"):
|
|
125
|
+
if stage_name != "classify":
|
|
126
|
+
return True, "Skipped due to input validation error"
|
|
127
|
+
|
|
128
|
+
# Skip crew review if crew is not available
|
|
129
|
+
if stage_name == "crew_review" and not self._crew_available:
|
|
130
|
+
return True, "CodeReviewCrew not available"
|
|
131
|
+
|
|
132
|
+
# Skip architectural review if change is simple
|
|
133
|
+
if stage_name == "architect_review" and not self._needs_architect_review:
|
|
134
|
+
return True, "Simple change - architectural review not needed"
|
|
135
|
+
return False, None
|
|
136
|
+
|
|
137
|
+
def _gather_project_context(self) -> str:
|
|
138
|
+
"""Gather project context for project-level reviews.
|
|
139
|
+
|
|
140
|
+
Reads project metadata and key files to provide context to the LLM.
|
|
141
|
+
Returns formatted project context string, or empty string if no context found.
|
|
142
|
+
"""
|
|
143
|
+
import os
|
|
144
|
+
from pathlib import Path
|
|
145
|
+
|
|
146
|
+
context_parts = []
|
|
147
|
+
cwd = Path.cwd()
|
|
148
|
+
|
|
149
|
+
# Get project name from directory or config files
|
|
150
|
+
project_name = cwd.name
|
|
151
|
+
context_parts.append(f"# Project: {project_name}")
|
|
152
|
+
context_parts.append(f"# Path: {cwd}")
|
|
153
|
+
context_parts.append("")
|
|
154
|
+
|
|
155
|
+
# Check for pyproject.toml
|
|
156
|
+
pyproject = cwd / "pyproject.toml"
|
|
157
|
+
if pyproject.exists():
|
|
158
|
+
try:
|
|
159
|
+
content = pyproject.read_text()[:2000]
|
|
160
|
+
context_parts.append("## pyproject.toml")
|
|
161
|
+
context_parts.append("```toml")
|
|
162
|
+
context_parts.append(content)
|
|
163
|
+
context_parts.append("```")
|
|
164
|
+
context_parts.append("")
|
|
165
|
+
except OSError:
|
|
166
|
+
pass
|
|
167
|
+
|
|
168
|
+
# Check for package.json
|
|
169
|
+
package_json = cwd / "package.json"
|
|
170
|
+
if package_json.exists():
|
|
171
|
+
try:
|
|
172
|
+
content = package_json.read_text()[:2000]
|
|
173
|
+
context_parts.append("## package.json")
|
|
174
|
+
context_parts.append("```json")
|
|
175
|
+
context_parts.append(content)
|
|
176
|
+
context_parts.append("```")
|
|
177
|
+
context_parts.append("")
|
|
178
|
+
except OSError:
|
|
179
|
+
pass
|
|
180
|
+
|
|
181
|
+
# Check for README
|
|
182
|
+
for readme_name in ["README.md", "README.rst", "README.txt", "README"]:
|
|
183
|
+
readme = cwd / readme_name
|
|
184
|
+
if readme.exists():
|
|
185
|
+
try:
|
|
186
|
+
content = readme.read_text()[:3000]
|
|
187
|
+
context_parts.append(f"## {readme_name}")
|
|
188
|
+
context_parts.append(content)
|
|
189
|
+
context_parts.append("")
|
|
190
|
+
break
|
|
191
|
+
except OSError:
|
|
192
|
+
pass
|
|
193
|
+
|
|
194
|
+
# Get directory structure (top 2 levels)
|
|
195
|
+
context_parts.append("## Project Structure")
|
|
196
|
+
context_parts.append("```")
|
|
197
|
+
try:
|
|
198
|
+
for root, dirs, files in os.walk(cwd):
|
|
199
|
+
# Skip hidden and common ignored directories
|
|
200
|
+
dirs[:] = [
|
|
201
|
+
d
|
|
202
|
+
for d in dirs
|
|
203
|
+
if not d.startswith(".")
|
|
204
|
+
and d
|
|
205
|
+
not in (
|
|
206
|
+
"node_modules",
|
|
207
|
+
"__pycache__",
|
|
208
|
+
"venv",
|
|
209
|
+
".venv",
|
|
210
|
+
"dist",
|
|
211
|
+
"build",
|
|
212
|
+
".git",
|
|
213
|
+
".tox",
|
|
214
|
+
".pytest_cache",
|
|
215
|
+
".mypy_cache",
|
|
216
|
+
"htmlcov",
|
|
217
|
+
)
|
|
218
|
+
]
|
|
219
|
+
level = root.replace(str(cwd), "").count(os.sep)
|
|
220
|
+
if level < 2:
|
|
221
|
+
indent = " " * level
|
|
222
|
+
folder_name = os.path.basename(root) or project_name
|
|
223
|
+
context_parts.append(f"{indent}{folder_name}/")
|
|
224
|
+
# Show key files at this level
|
|
225
|
+
key_files = [
|
|
226
|
+
f
|
|
227
|
+
for f in files
|
|
228
|
+
if f.endswith(
|
|
229
|
+
(".py", ".ts", ".js", ".json", ".yaml", ".yml", ".toml", ".md"),
|
|
230
|
+
)
|
|
231
|
+
and not f.startswith(".")
|
|
232
|
+
][:10]
|
|
233
|
+
for f in key_files:
|
|
234
|
+
context_parts.append(f"{indent} {f}")
|
|
235
|
+
if level >= 2:
|
|
236
|
+
break
|
|
237
|
+
except OSError:
|
|
238
|
+
context_parts.append("(Unable to read directory structure)")
|
|
239
|
+
context_parts.append("```")
|
|
240
|
+
|
|
241
|
+
# Return empty if we only have the header
|
|
242
|
+
if len(context_parts) <= 3:
|
|
243
|
+
return ""
|
|
244
|
+
|
|
245
|
+
return "\n".join(context_parts)
|
|
246
|
+
|
|
247
|
+
async def run_stage(
|
|
248
|
+
self,
|
|
249
|
+
stage_name: str,
|
|
250
|
+
tier: ModelTier,
|
|
251
|
+
input_data: Any,
|
|
252
|
+
) -> tuple[Any, int, int]:
|
|
253
|
+
"""Execute a code review stage."""
|
|
254
|
+
if stage_name == "classify":
|
|
255
|
+
return await self._classify(input_data, tier)
|
|
256
|
+
if stage_name == "crew_review":
|
|
257
|
+
return await self._crew_review(input_data, tier)
|
|
258
|
+
if stage_name == "scan":
|
|
259
|
+
return await self._scan(input_data, tier)
|
|
260
|
+
if stage_name == "architect_review":
|
|
261
|
+
return await self._architect_review(input_data, tier)
|
|
262
|
+
raise ValueError(f"Unknown stage: {stage_name}")
|
|
263
|
+
|
|
264
|
+
async def _classify(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
|
|
265
|
+
"""Classify the type of change."""
|
|
266
|
+
diff = input_data.get("diff", "")
|
|
267
|
+
target = input_data.get("target", "")
|
|
268
|
+
files_changed = input_data.get("files_changed", [])
|
|
269
|
+
|
|
270
|
+
# If target provided instead of diff, use it as the code to review
|
|
271
|
+
code_to_review = diff or target
|
|
272
|
+
|
|
273
|
+
# Handle project-level review when target is "." or empty
|
|
274
|
+
if not code_to_review or code_to_review.strip() in (".", "", "./"):
|
|
275
|
+
# Gather project context for project-level review
|
|
276
|
+
project_context = self._gather_project_context()
|
|
277
|
+
if not project_context:
|
|
278
|
+
# Return early with helpful error message if no context found
|
|
279
|
+
return (
|
|
280
|
+
{
|
|
281
|
+
"classification": "ERROR: No code provided for review",
|
|
282
|
+
"error": True,
|
|
283
|
+
"error_message": (
|
|
284
|
+
"No code was provided for review. Please ensure you:\n"
|
|
285
|
+
"1. Have a file open in the editor, OR\n"
|
|
286
|
+
"2. Select a specific file to review, OR\n"
|
|
287
|
+
'3. Provide code content directly via --input \'{"diff": "..."}\'\n\n'
|
|
288
|
+
"Tip: Use 'Select File...' option in the workflow picker."
|
|
289
|
+
),
|
|
290
|
+
"change_type": "none",
|
|
291
|
+
"files_changed": [],
|
|
292
|
+
"file_count": 0,
|
|
293
|
+
"needs_architect_review": False,
|
|
294
|
+
"is_core_module": False,
|
|
295
|
+
"code_to_review": "",
|
|
296
|
+
},
|
|
297
|
+
0,
|
|
298
|
+
0,
|
|
299
|
+
)
|
|
300
|
+
code_to_review = project_context
|
|
301
|
+
# Mark as project-level review
|
|
302
|
+
input_data["is_project_review"] = True
|
|
303
|
+
|
|
304
|
+
# === AUTH STRATEGY INTEGRATION ===
|
|
305
|
+
if self.enable_auth_strategy:
|
|
306
|
+
try:
|
|
307
|
+
import logging
|
|
308
|
+
from pathlib import Path
|
|
309
|
+
|
|
310
|
+
from attune.models import (
|
|
311
|
+
count_lines_of_code,
|
|
312
|
+
get_auth_strategy,
|
|
313
|
+
get_module_size_category,
|
|
314
|
+
)
|
|
315
|
+
|
|
316
|
+
logger = logging.getLogger(__name__)
|
|
317
|
+
|
|
318
|
+
# Calculate module size (for file) or total LOC (for directory)
|
|
319
|
+
target_path = target or diff
|
|
320
|
+
total_lines = 0
|
|
321
|
+
if target_path:
|
|
322
|
+
target_obj = Path(target_path)
|
|
323
|
+
if target_obj.exists():
|
|
324
|
+
if target_obj.is_file():
|
|
325
|
+
total_lines = count_lines_of_code(target_obj)
|
|
326
|
+
elif target_obj.is_dir():
|
|
327
|
+
for py_file in target_obj.rglob("*.py"):
|
|
328
|
+
try:
|
|
329
|
+
total_lines += count_lines_of_code(py_file)
|
|
330
|
+
except Exception:
|
|
331
|
+
pass
|
|
332
|
+
|
|
333
|
+
if total_lines > 0:
|
|
334
|
+
strategy = get_auth_strategy()
|
|
335
|
+
recommended_mode = strategy.get_recommended_mode(total_lines)
|
|
336
|
+
self._auth_mode_used = recommended_mode.value
|
|
337
|
+
|
|
338
|
+
size_category = get_module_size_category(total_lines)
|
|
339
|
+
logger.info(
|
|
340
|
+
f"Code review target: {target_path} ({total_lines:,} LOC, {size_category})"
|
|
341
|
+
)
|
|
342
|
+
logger.info(f"Recommended auth mode: {recommended_mode.value}")
|
|
343
|
+
|
|
344
|
+
cost_estimate = strategy.estimate_cost(total_lines, recommended_mode)
|
|
345
|
+
if recommended_mode.value == "subscription":
|
|
346
|
+
logger.info(f"Cost: {cost_estimate['quota_cost']}")
|
|
347
|
+
else:
|
|
348
|
+
logger.info(f"Cost: ~${cost_estimate['monetary_cost']:.4f}")
|
|
349
|
+
|
|
350
|
+
except Exception as e:
|
|
351
|
+
logger = logging.getLogger(__name__)
|
|
352
|
+
logger.warning(f"Auth strategy detection failed: {e}")
|
|
353
|
+
|
|
354
|
+
system = """You are a code review classifier. Analyze the code and classify:
|
|
355
|
+
1. Change type: bug_fix, feature, refactor, docs, test, config, or security
|
|
356
|
+
2. Complexity: low, medium, high
|
|
357
|
+
3. Risk level: low, medium, high
|
|
358
|
+
|
|
359
|
+
Respond with a brief classification summary."""
|
|
360
|
+
|
|
361
|
+
user_message = f"""Classify this code change:
|
|
362
|
+
|
|
363
|
+
Files: {", ".join(files_changed) if files_changed else "Not specified"}
|
|
364
|
+
|
|
365
|
+
Code:
|
|
366
|
+
{code_to_review[:4000]}"""
|
|
367
|
+
|
|
368
|
+
response, input_tokens, output_tokens = await self._call_llm(
|
|
369
|
+
tier,
|
|
370
|
+
system,
|
|
371
|
+
user_message,
|
|
372
|
+
max_tokens=500,
|
|
373
|
+
)
|
|
374
|
+
|
|
375
|
+
# Parse response to determine if architect review needed
|
|
376
|
+
is_high_complexity = "high" in response.lower() and (
|
|
377
|
+
"complexity" in response.lower() or "risk" in response.lower()
|
|
378
|
+
)
|
|
379
|
+
is_core = (
|
|
380
|
+
any(any(core in f for core in self.core_modules) for f in files_changed)
|
|
381
|
+
if files_changed
|
|
382
|
+
else False
|
|
383
|
+
)
|
|
384
|
+
|
|
385
|
+
self._needs_architect_review = (
|
|
386
|
+
len(files_changed) >= self.file_threshold
|
|
387
|
+
or is_core
|
|
388
|
+
or is_high_complexity
|
|
389
|
+
or input_data.get("is_core_module", False)
|
|
390
|
+
)
|
|
391
|
+
|
|
392
|
+
return (
|
|
393
|
+
{
|
|
394
|
+
"classification": response,
|
|
395
|
+
"change_type": "feature", # Will be refined by LLM
|
|
396
|
+
"files_changed": files_changed,
|
|
397
|
+
"file_count": len(files_changed),
|
|
398
|
+
"needs_architect_review": self._needs_architect_review,
|
|
399
|
+
"is_core_module": is_core,
|
|
400
|
+
"code_to_review": code_to_review,
|
|
401
|
+
},
|
|
402
|
+
input_tokens,
|
|
403
|
+
output_tokens,
|
|
404
|
+
)
|
|
405
|
+
|
|
406
|
+
async def _crew_review(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
|
|
407
|
+
"""Run CodeReviewCrew for comprehensive 5-agent analysis.
|
|
408
|
+
|
|
409
|
+
This stage uses the CodeReviewCrew (Review Lead, Security Analyst,
|
|
410
|
+
Architecture Reviewer, Quality Analyst, Performance Reviewer) for
|
|
411
|
+
deep code analysis with memory graph integration.
|
|
412
|
+
|
|
413
|
+
Falls back gracefully if CodeReviewCrew is not available.
|
|
414
|
+
"""
|
|
415
|
+
await self._initialize_crew()
|
|
416
|
+
|
|
417
|
+
try:
|
|
418
|
+
from .code_review_adapters import (
|
|
419
|
+
_check_crew_available,
|
|
420
|
+
_get_crew_review,
|
|
421
|
+
crew_report_to_workflow_format,
|
|
422
|
+
)
|
|
423
|
+
except ImportError:
|
|
424
|
+
# Crew adapters removed - return fallback
|
|
425
|
+
return (
|
|
426
|
+
{
|
|
427
|
+
"crew_review": {
|
|
428
|
+
"available": False,
|
|
429
|
+
"fallback": True,
|
|
430
|
+
"reason": "Crew adapters not installed",
|
|
431
|
+
},
|
|
432
|
+
**input_data,
|
|
433
|
+
},
|
|
434
|
+
0,
|
|
435
|
+
0,
|
|
436
|
+
)
|
|
437
|
+
|
|
438
|
+
# Get code to review
|
|
439
|
+
diff = input_data.get("diff", "") or input_data.get("code_to_review", "")
|
|
440
|
+
files_changed = input_data.get("files_changed", [])
|
|
441
|
+
|
|
442
|
+
# Check if crew is available
|
|
443
|
+
if not self._crew_available or not _check_crew_available():
|
|
444
|
+
return (
|
|
445
|
+
{
|
|
446
|
+
"crew_review": {
|
|
447
|
+
"available": False,
|
|
448
|
+
"fallback": True,
|
|
449
|
+
"reason": "CodeReviewCrew not installed or failed to initialize",
|
|
450
|
+
},
|
|
451
|
+
**input_data,
|
|
452
|
+
},
|
|
453
|
+
0,
|
|
454
|
+
0,
|
|
455
|
+
)
|
|
456
|
+
|
|
457
|
+
# Run CodeReviewCrew
|
|
458
|
+
report = await _get_crew_review(
|
|
459
|
+
diff=diff,
|
|
460
|
+
files_changed=files_changed,
|
|
461
|
+
config=self.crew_config,
|
|
462
|
+
)
|
|
463
|
+
|
|
464
|
+
if report is None:
|
|
465
|
+
return (
|
|
466
|
+
{
|
|
467
|
+
"crew_review": {
|
|
468
|
+
"available": True,
|
|
469
|
+
"fallback": True,
|
|
470
|
+
"reason": "CodeReviewCrew review failed or timed out",
|
|
471
|
+
},
|
|
472
|
+
**input_data,
|
|
473
|
+
},
|
|
474
|
+
0,
|
|
475
|
+
0,
|
|
476
|
+
)
|
|
477
|
+
|
|
478
|
+
# Convert crew report to workflow format
|
|
479
|
+
crew_results = crew_report_to_workflow_format(report)
|
|
480
|
+
|
|
481
|
+
# Update needs_architect_review based on crew findings
|
|
482
|
+
has_blocking = crew_results.get("has_blocking_issues", False)
|
|
483
|
+
critical_count = len(crew_results.get("assessment", {}).get("critical_findings", []))
|
|
484
|
+
high_count = len(crew_results.get("assessment", {}).get("high_findings", []))
|
|
485
|
+
|
|
486
|
+
if has_blocking or critical_count > 0 or high_count > 2:
|
|
487
|
+
self._needs_architect_review = True
|
|
488
|
+
|
|
489
|
+
crew_review_result = {
|
|
490
|
+
"available": True,
|
|
491
|
+
"fallback": False,
|
|
492
|
+
"findings": crew_results.get("findings", []),
|
|
493
|
+
"finding_count": crew_results.get("finding_count", 0),
|
|
494
|
+
"verdict": crew_results.get("verdict", "approve"),
|
|
495
|
+
"quality_score": crew_results.get("quality_score", 100),
|
|
496
|
+
"has_blocking_issues": has_blocking,
|
|
497
|
+
"critical_count": critical_count,
|
|
498
|
+
"high_count": high_count,
|
|
499
|
+
"summary": crew_results.get("summary", ""),
|
|
500
|
+
"agents_used": crew_results.get("agents_used", []),
|
|
501
|
+
"memory_graph_hits": crew_results.get("memory_graph_hits", 0),
|
|
502
|
+
"review_duration_seconds": crew_results.get("review_duration_seconds", 0),
|
|
503
|
+
}
|
|
504
|
+
|
|
505
|
+
# Estimate tokens (crew uses internal LLM calls)
|
|
506
|
+
input_tokens = len(diff) // 4
|
|
507
|
+
output_tokens = len(str(crew_review_result)) // 4
|
|
508
|
+
|
|
509
|
+
return (
|
|
510
|
+
{
|
|
511
|
+
"crew_review": crew_review_result,
|
|
512
|
+
"needs_architect_review": self._needs_architect_review,
|
|
513
|
+
**input_data,
|
|
514
|
+
},
|
|
515
|
+
input_tokens,
|
|
516
|
+
output_tokens,
|
|
517
|
+
)
|
|
518
|
+
|
|
519
|
+
async def _scan(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
|
|
520
|
+
"""Security scan and bug pattern matching.
|
|
521
|
+
|
|
522
|
+
When external_audit_results is provided in input_data (e.g., from
|
|
523
|
+
SecurityAuditCrew), these findings are merged with the LLM analysis
|
|
524
|
+
and can trigger architect_review if critical issues are found.
|
|
525
|
+
"""
|
|
526
|
+
code_to_review = input_data.get("code_to_review", input_data.get("diff", ""))
|
|
527
|
+
classification = input_data.get("classification", "")
|
|
528
|
+
files_changed = input_data.get("files_changed", input_data.get("files", []))
|
|
529
|
+
|
|
530
|
+
# Check for external audit results (e.g., from SecurityAuditCrew)
|
|
531
|
+
external_audit = input_data.get("external_audit_results")
|
|
532
|
+
|
|
533
|
+
system = """You are a security and code quality expert. Analyze the code for:
|
|
534
|
+
|
|
535
|
+
1. SECURITY ISSUES (OWASP Top 10):
|
|
536
|
+
- SQL Injection, XSS, Command Injection
|
|
537
|
+
- Hardcoded secrets, API keys, passwords
|
|
538
|
+
- Insecure deserialization
|
|
539
|
+
- Authentication/authorization flaws
|
|
540
|
+
|
|
541
|
+
2. BUG PATTERNS:
|
|
542
|
+
- Null/undefined references
|
|
543
|
+
- Resource leaks
|
|
544
|
+
- Race conditions
|
|
545
|
+
- Error handling issues
|
|
546
|
+
|
|
547
|
+
3. CODE QUALITY:
|
|
548
|
+
- Code smells
|
|
549
|
+
- Maintainability issues
|
|
550
|
+
- Performance concerns
|
|
551
|
+
|
|
552
|
+
For each issue found, provide:
|
|
553
|
+
- Severity (critical/high/medium/low)
|
|
554
|
+
- Location (if identifiable)
|
|
555
|
+
- Description
|
|
556
|
+
- Recommendation
|
|
557
|
+
|
|
558
|
+
Be thorough but focused on actionable findings."""
|
|
559
|
+
|
|
560
|
+
# If external audit provided, include it in the prompt for context
|
|
561
|
+
external_context = ""
|
|
562
|
+
if external_audit:
|
|
563
|
+
external_summary = external_audit.get("summary", "")
|
|
564
|
+
external_findings = external_audit.get("findings", [])
|
|
565
|
+
if external_summary or external_findings:
|
|
566
|
+
# Build findings list efficiently (avoid O(n²) string concat)
|
|
567
|
+
finding_lines = []
|
|
568
|
+
for finding in external_findings[:10]: # Top 10
|
|
569
|
+
sev = finding.get("severity", "unknown").upper()
|
|
570
|
+
title = finding.get("title", "N/A")
|
|
571
|
+
desc = finding.get("description", "")[:100]
|
|
572
|
+
finding_lines.append(f"- [{sev}] {title}: {desc}")
|
|
573
|
+
|
|
574
|
+
external_context = f"""
|
|
575
|
+
|
|
576
|
+
## External Security Audit Results
|
|
577
|
+
Summary: {external_summary}
|
|
578
|
+
|
|
579
|
+
Findings ({len(external_findings)} total):
|
|
580
|
+
{chr(10).join(finding_lines)}
|
|
581
|
+
|
|
582
|
+
Verify these findings and identify additional issues."""
|
|
583
|
+
|
|
584
|
+
user_message = f"""Review this code for security and quality issues:
|
|
585
|
+
|
|
586
|
+
Previous classification: {classification}
|
|
587
|
+
{external_context}
|
|
588
|
+
Code to review:
|
|
589
|
+
{code_to_review[:6000]}"""
|
|
590
|
+
|
|
591
|
+
response, input_tokens, output_tokens = await self._call_llm(
|
|
592
|
+
tier,
|
|
593
|
+
system,
|
|
594
|
+
user_message,
|
|
595
|
+
max_tokens=2048,
|
|
596
|
+
)
|
|
597
|
+
|
|
598
|
+
# Extract structured findings from LLM response
|
|
599
|
+
llm_findings = self._extract_findings_from_response(
|
|
600
|
+
response=response,
|
|
601
|
+
files_changed=files_changed or [],
|
|
602
|
+
code_context=code_to_review[:1000], # First 1000 chars for context
|
|
603
|
+
)
|
|
604
|
+
|
|
605
|
+
# Check if critical issues found in LLM response
|
|
606
|
+
has_critical = "critical" in response.lower() or "high" in response.lower()
|
|
607
|
+
|
|
608
|
+
# Merge external audit findings if provided
|
|
609
|
+
security_findings: list[dict] = []
|
|
610
|
+
external_has_critical = False
|
|
611
|
+
|
|
612
|
+
if external_audit:
|
|
613
|
+
merged_response, security_findings, external_has_critical = self._merge_external_audit(
|
|
614
|
+
response,
|
|
615
|
+
external_audit,
|
|
616
|
+
)
|
|
617
|
+
response = merged_response
|
|
618
|
+
has_critical = has_critical or external_has_critical
|
|
619
|
+
|
|
620
|
+
# Combine LLM findings with security findings
|
|
621
|
+
all_findings = llm_findings + security_findings
|
|
622
|
+
|
|
623
|
+
# Calculate summary statistics
|
|
624
|
+
summary: dict[str, Any] = {
|
|
625
|
+
"total_findings": len(all_findings),
|
|
626
|
+
"by_severity": {},
|
|
627
|
+
"by_category": {},
|
|
628
|
+
"files_affected": list({f.get("file", "") for f in all_findings if f.get("file")}),
|
|
629
|
+
}
|
|
630
|
+
|
|
631
|
+
# Count by severity
|
|
632
|
+
for finding in all_findings:
|
|
633
|
+
sev = finding.get("severity", "info")
|
|
634
|
+
summary["by_severity"][sev] = summary["by_severity"].get(sev, 0) + 1
|
|
635
|
+
|
|
636
|
+
# Count by category
|
|
637
|
+
for finding in all_findings:
|
|
638
|
+
cat = finding.get("category", "other")
|
|
639
|
+
summary["by_category"][cat] = summary["by_category"].get(cat, 0) + 1
|
|
640
|
+
|
|
641
|
+
# Add helpful message if no findings
|
|
642
|
+
if len(all_findings) == 0:
|
|
643
|
+
summary["message"] = (
|
|
644
|
+
"No security or quality issues found in scan. "
|
|
645
|
+
"Code will proceed to architectural review."
|
|
646
|
+
)
|
|
647
|
+
|
|
648
|
+
# Calculate security score
|
|
649
|
+
security_score = 70 if has_critical else 90
|
|
650
|
+
|
|
651
|
+
# Determine preliminary verdict based on scan
|
|
652
|
+
if has_critical:
|
|
653
|
+
preliminary_verdict = "request_changes"
|
|
654
|
+
elif security_score >= 90:
|
|
655
|
+
preliminary_verdict = "approve"
|
|
656
|
+
else:
|
|
657
|
+
preliminary_verdict = "approve_with_suggestions"
|
|
658
|
+
|
|
659
|
+
result = {
|
|
660
|
+
"scan_results": response,
|
|
661
|
+
"findings": all_findings, # NEW: structured findings for UI
|
|
662
|
+
"summary": summary, # NEW: summary statistics
|
|
663
|
+
"security_findings": security_findings, # Keep for backward compat
|
|
664
|
+
"bug_patterns": [],
|
|
665
|
+
"quality_issues": [],
|
|
666
|
+
"has_critical_issues": has_critical,
|
|
667
|
+
"security_score": security_score,
|
|
668
|
+
"verdict": preliminary_verdict, # Add verdict for when architect_review is skipped
|
|
669
|
+
"needs_architect_review": input_data.get("needs_architect_review", False)
|
|
670
|
+
or has_critical,
|
|
671
|
+
"code_to_review": code_to_review,
|
|
672
|
+
"classification": classification,
|
|
673
|
+
"external_audit_included": external_audit is not None,
|
|
674
|
+
"external_audit_risk_score": (
|
|
675
|
+
external_audit.get("risk_score", 0) if external_audit else 0
|
|
676
|
+
),
|
|
677
|
+
"auth_mode_used": self._auth_mode_used, # Track auth mode
|
|
678
|
+
"model_tier_used": tier.value, # Track model tier
|
|
679
|
+
}
|
|
680
|
+
|
|
681
|
+
# Generate formatted report (for when architect_review is skipped)
|
|
682
|
+
formatted_report = format_code_review_report(result, input_data)
|
|
683
|
+
result["formatted_report"] = formatted_report
|
|
684
|
+
result["display_output"] = formatted_report
|
|
685
|
+
|
|
686
|
+
return (result, input_tokens, output_tokens)
|
|
687
|
+
|
|
688
|
+
def _merge_external_audit(
|
|
689
|
+
self,
|
|
690
|
+
llm_response: str,
|
|
691
|
+
external_audit: dict,
|
|
692
|
+
) -> tuple[str, list, bool]:
|
|
693
|
+
"""Merge external SecurityAuditCrew results into scan output.
|
|
694
|
+
|
|
695
|
+
Args:
|
|
696
|
+
llm_response: Response from LLM security scan
|
|
697
|
+
external_audit: External audit dict (from SecurityAuditCrew.to_dict())
|
|
698
|
+
|
|
699
|
+
Returns:
|
|
700
|
+
Tuple of (merged_response, security_findings, has_critical)
|
|
701
|
+
|
|
702
|
+
"""
|
|
703
|
+
findings = external_audit.get("findings", [])
|
|
704
|
+
summary = external_audit.get("summary", "")
|
|
705
|
+
risk_score = external_audit.get("risk_score", 0)
|
|
706
|
+
|
|
707
|
+
# Check for critical/high findings
|
|
708
|
+
has_critical = any(f.get("severity") in ("critical", "high") for f in findings)
|
|
709
|
+
|
|
710
|
+
# Build merged response
|
|
711
|
+
merged_sections = [llm_response]
|
|
712
|
+
|
|
713
|
+
if summary or findings:
|
|
714
|
+
# Build crew section efficiently (avoid O(n²) string concat)
|
|
715
|
+
parts = ["\n\n## SecurityAuditCrew Analysis\n"]
|
|
716
|
+
if summary:
|
|
717
|
+
parts.append(f"\n{summary}\n")
|
|
718
|
+
|
|
719
|
+
parts.append(f"\n**Risk Score**: {risk_score}/100\n")
|
|
720
|
+
|
|
721
|
+
if findings:
|
|
722
|
+
critical = [f for f in findings if f.get("severity") == "critical"]
|
|
723
|
+
high = [f for f in findings if f.get("severity") == "high"]
|
|
724
|
+
|
|
725
|
+
if critical:
|
|
726
|
+
parts.append("\n### Critical Findings\n")
|
|
727
|
+
for f in critical:
|
|
728
|
+
title = f"- **{f.get('title', 'N/A')}**"
|
|
729
|
+
if f.get("file"):
|
|
730
|
+
title += f" ({f.get('file')}:{f.get('line', '?')})"
|
|
731
|
+
parts.append(title)
|
|
732
|
+
parts.append(f"\n {f.get('description', '')[:200]}\n")
|
|
733
|
+
if f.get("remediation"):
|
|
734
|
+
parts.append(f" *Fix*: {f.get('remediation')[:150]}\n")
|
|
735
|
+
|
|
736
|
+
if high:
|
|
737
|
+
parts.append("\n### High Severity Findings\n")
|
|
738
|
+
for f in high[:5]: # Top 5
|
|
739
|
+
title = f"- **{f.get('title', 'N/A')}**"
|
|
740
|
+
if f.get("file"):
|
|
741
|
+
title += f" ({f.get('file')}:{f.get('line', '?')})"
|
|
742
|
+
parts.append(title)
|
|
743
|
+
parts.append(f"\n {f.get('description', '')[:150]}\n")
|
|
744
|
+
|
|
745
|
+
merged_sections.append("".join(parts))
|
|
746
|
+
|
|
747
|
+
return "\n".join(merged_sections), findings, has_critical
|
|
748
|
+
|
|
749
|
+
async def _architect_review(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
|
|
750
|
+
"""Deep architectural review.
|
|
751
|
+
|
|
752
|
+
Supports XML-enhanced prompts when enabled in workflow config.
|
|
753
|
+
"""
|
|
754
|
+
code_to_review = input_data.get("code_to_review", "")
|
|
755
|
+
scan_results = input_data.get("scan_results", "")
|
|
756
|
+
classification = input_data.get("classification", "")
|
|
757
|
+
|
|
758
|
+
# Build input payload
|
|
759
|
+
input_payload = f"""Classification: {classification}
|
|
760
|
+
|
|
761
|
+
Security Scan Results:
|
|
762
|
+
{scan_results[:2000]}
|
|
763
|
+
|
|
764
|
+
Code:
|
|
765
|
+
{code_to_review[:4000]}"""
|
|
766
|
+
|
|
767
|
+
# Check if XML prompts are enabled
|
|
768
|
+
if self._is_xml_enabled():
|
|
769
|
+
user_message = self._render_xml_prompt(
|
|
770
|
+
role="senior software architect",
|
|
771
|
+
goal="Perform comprehensive code review with architectural assessment",
|
|
772
|
+
instructions=[
|
|
773
|
+
"Assess design patterns used (or missing)",
|
|
774
|
+
"Evaluate SOLID principles compliance",
|
|
775
|
+
"Check separation of concerns",
|
|
776
|
+
"Analyze coupling and cohesion",
|
|
777
|
+
"Provide specific improvement recommendations with examples",
|
|
778
|
+
"Suggest refactoring and testing improvements",
|
|
779
|
+
"Provide verdict: approve, approve_with_suggestions, or reject",
|
|
780
|
+
],
|
|
781
|
+
constraints=[
|
|
782
|
+
"Be specific and actionable",
|
|
783
|
+
"Reference file locations where possible",
|
|
784
|
+
"Prioritize issues by impact",
|
|
785
|
+
],
|
|
786
|
+
input_type="code",
|
|
787
|
+
input_payload=input_payload,
|
|
788
|
+
)
|
|
789
|
+
system = None
|
|
790
|
+
else:
|
|
791
|
+
system = """You are a senior software architect. Provide a comprehensive review:
|
|
792
|
+
|
|
793
|
+
1. ARCHITECTURAL ASSESSMENT:
|
|
794
|
+
- Design patterns used (or missing)
|
|
795
|
+
- SOLID principles compliance
|
|
796
|
+
- Separation of concerns
|
|
797
|
+
- Coupling and cohesion
|
|
798
|
+
|
|
799
|
+
2. RECOMMENDATIONS:
|
|
800
|
+
- Specific improvements with examples
|
|
801
|
+
- Refactoring suggestions
|
|
802
|
+
- Testing recommendations
|
|
803
|
+
|
|
804
|
+
3. VERDICT:
|
|
805
|
+
- APPROVE: Code is production-ready
|
|
806
|
+
- APPROVE_WITH_SUGGESTIONS: Minor improvements recommended
|
|
807
|
+
- REQUEST_CHANGES: Issues must be addressed
|
|
808
|
+
- REJECT: Fundamental problems
|
|
809
|
+
|
|
810
|
+
Provide actionable, specific feedback."""
|
|
811
|
+
|
|
812
|
+
user_message = f"""Perform an architectural review:
|
|
813
|
+
|
|
814
|
+
{input_payload}"""
|
|
815
|
+
|
|
816
|
+
# Try executor-based execution first (Phase 3 pattern)
|
|
817
|
+
if self._executor is not None or self._api_key:
|
|
818
|
+
try:
|
|
819
|
+
step = CODE_REVIEW_STEPS["architect_review"]
|
|
820
|
+
response, input_tokens, output_tokens, cost = await self.run_step_with_executor(
|
|
821
|
+
step=step,
|
|
822
|
+
prompt=user_message,
|
|
823
|
+
system=system,
|
|
824
|
+
)
|
|
825
|
+
except Exception:
|
|
826
|
+
# Fall back to legacy _call_llm if executor fails
|
|
827
|
+
response, input_tokens, output_tokens = await self._call_llm(
|
|
828
|
+
tier,
|
|
829
|
+
system or "",
|
|
830
|
+
user_message,
|
|
831
|
+
max_tokens=3000,
|
|
832
|
+
)
|
|
833
|
+
else:
|
|
834
|
+
# Legacy path for backward compatibility
|
|
835
|
+
response, input_tokens, output_tokens = await self._call_llm(
|
|
836
|
+
tier,
|
|
837
|
+
system or "",
|
|
838
|
+
user_message,
|
|
839
|
+
max_tokens=3000,
|
|
840
|
+
)
|
|
841
|
+
|
|
842
|
+
# Parse XML response if enforcement is enabled
|
|
843
|
+
parsed_data = self._parse_xml_response(response)
|
|
844
|
+
|
|
845
|
+
# Determine verdict from response or parsed data
|
|
846
|
+
verdict = "approve_with_suggestions"
|
|
847
|
+
if parsed_data.get("xml_parsed"):
|
|
848
|
+
extra = parsed_data.get("_parsed_response")
|
|
849
|
+
if extra and hasattr(extra, "extra"):
|
|
850
|
+
parsed_verdict = extra.extra.get("verdict", "").lower()
|
|
851
|
+
if parsed_verdict in [
|
|
852
|
+
"approve",
|
|
853
|
+
"approve_with_suggestions",
|
|
854
|
+
"request_changes",
|
|
855
|
+
"reject",
|
|
856
|
+
]:
|
|
857
|
+
verdict = parsed_verdict
|
|
858
|
+
|
|
859
|
+
if verdict == "approve_with_suggestions":
|
|
860
|
+
# Fall back to text parsing
|
|
861
|
+
if "REQUEST_CHANGES" in response.upper() or "REJECT" in response.upper():
|
|
862
|
+
verdict = "request_changes"
|
|
863
|
+
elif "APPROVE" in response.upper() and "SUGGESTIONS" not in response.upper():
|
|
864
|
+
verdict = "approve"
|
|
865
|
+
|
|
866
|
+
result: dict = {
|
|
867
|
+
"architectural_review": response,
|
|
868
|
+
"verdict": verdict,
|
|
869
|
+
"recommendations": [],
|
|
870
|
+
"model_tier_used": tier.value,
|
|
871
|
+
"auth_mode_used": self._auth_mode_used,
|
|
872
|
+
}
|
|
873
|
+
|
|
874
|
+
# Merge parsed XML data if available
|
|
875
|
+
if parsed_data.get("xml_parsed"):
|
|
876
|
+
result.update(
|
|
877
|
+
{
|
|
878
|
+
"xml_parsed": True,
|
|
879
|
+
"summary": parsed_data.get("summary"),
|
|
880
|
+
"findings": parsed_data.get("findings", []),
|
|
881
|
+
"checklist": parsed_data.get("checklist", []),
|
|
882
|
+
},
|
|
883
|
+
)
|
|
884
|
+
|
|
885
|
+
# Add formatted report for human readability
|
|
886
|
+
formatted_report = format_code_review_report(result, input_data)
|
|
887
|
+
result["formatted_report"] = formatted_report
|
|
888
|
+
|
|
889
|
+
# Also add as top-level display_output for better UX
|
|
890
|
+
result["display_output"] = formatted_report
|
|
891
|
+
|
|
892
|
+
return (result, input_tokens, output_tokens)
|
|
893
|
+
|
|
894
|
+
|
|
895
|
+
def format_code_review_report(result: dict, input_data: dict) -> str:
|
|
896
|
+
"""Format code review output as a human-readable report.
|
|
897
|
+
|
|
898
|
+
Args:
|
|
899
|
+
result: The architect_review stage result
|
|
900
|
+
input_data: Input data from previous stages
|
|
901
|
+
|
|
902
|
+
Returns:
|
|
903
|
+
Formatted report string
|
|
904
|
+
|
|
905
|
+
"""
|
|
906
|
+
lines = []
|
|
907
|
+
|
|
908
|
+
# Check for input validation error
|
|
909
|
+
if input_data.get("error"):
|
|
910
|
+
lines.append("=" * 60)
|
|
911
|
+
lines.append("CODE REVIEW - INPUT ERROR")
|
|
912
|
+
lines.append("=" * 60)
|
|
913
|
+
lines.append("")
|
|
914
|
+
lines.append(input_data.get("error_message", "No code provided for review."))
|
|
915
|
+
lines.append("")
|
|
916
|
+
lines.append("=" * 60)
|
|
917
|
+
return "\n".join(lines)
|
|
918
|
+
|
|
919
|
+
# Header
|
|
920
|
+
verdict = result.get("verdict", "unknown").upper().replace("_", " ")
|
|
921
|
+
verdict_icon = {
|
|
922
|
+
"APPROVE": "✅",
|
|
923
|
+
"APPROVE WITH SUGGESTIONS": "🔶",
|
|
924
|
+
"REQUEST CHANGES": "⚠️",
|
|
925
|
+
"REJECT": "❌",
|
|
926
|
+
}.get(verdict, "❓")
|
|
927
|
+
|
|
928
|
+
lines.append("=" * 60)
|
|
929
|
+
lines.append("CODE REVIEW REPORT")
|
|
930
|
+
lines.append("=" * 60)
|
|
931
|
+
lines.append("")
|
|
932
|
+
lines.append(f"Verdict: {verdict_icon} {verdict}")
|
|
933
|
+
lines.append("")
|
|
934
|
+
|
|
935
|
+
# Classification summary
|
|
936
|
+
classification = input_data.get("classification", "")
|
|
937
|
+
if classification:
|
|
938
|
+
lines.append("-" * 60)
|
|
939
|
+
lines.append("CLASSIFICATION")
|
|
940
|
+
lines.append("-" * 60)
|
|
941
|
+
lines.append(classification[:500])
|
|
942
|
+
lines.append("")
|
|
943
|
+
|
|
944
|
+
# Security scan results
|
|
945
|
+
has_critical = input_data.get("has_critical_issues", False)
|
|
946
|
+
security_score = input_data.get("security_score", 100)
|
|
947
|
+
security_icon = "🔴" if has_critical else ("🟡" if security_score < 90 else "🟢")
|
|
948
|
+
|
|
949
|
+
lines.append("-" * 60)
|
|
950
|
+
lines.append("SECURITY ANALYSIS")
|
|
951
|
+
lines.append("-" * 60)
|
|
952
|
+
lines.append(f"Security Score: {security_icon} {security_score}/100")
|
|
953
|
+
lines.append(f"Critical Issues: {'Yes' if has_critical else 'No'}")
|
|
954
|
+
lines.append("")
|
|
955
|
+
|
|
956
|
+
# Security findings
|
|
957
|
+
security_findings = input_data.get("security_findings", [])
|
|
958
|
+
if security_findings:
|
|
959
|
+
lines.append("Security Findings:")
|
|
960
|
+
for finding in security_findings[:10]:
|
|
961
|
+
severity = finding.get("severity", "unknown").upper()
|
|
962
|
+
title = finding.get("title", "N/A")
|
|
963
|
+
sev_icon = {"CRITICAL": "🔴", "HIGH": "🟠", "MEDIUM": "🟡", "LOW": "🟢"}.get(
|
|
964
|
+
severity,
|
|
965
|
+
"⚪",
|
|
966
|
+
)
|
|
967
|
+
lines.append(f" {sev_icon} [{severity}] {title}")
|
|
968
|
+
lines.append("")
|
|
969
|
+
|
|
970
|
+
# Scan results summary
|
|
971
|
+
scan_results = input_data.get("scan_results", "")
|
|
972
|
+
if scan_results:
|
|
973
|
+
lines.append("Scan Summary:")
|
|
974
|
+
# Truncate scan results for readability
|
|
975
|
+
summary = scan_results[:800]
|
|
976
|
+
if len(scan_results) > 800:
|
|
977
|
+
summary += "..."
|
|
978
|
+
lines.append(summary)
|
|
979
|
+
lines.append("")
|
|
980
|
+
|
|
981
|
+
# Architectural review
|
|
982
|
+
arch_review = result.get("architectural_review", "")
|
|
983
|
+
if arch_review:
|
|
984
|
+
lines.append("-" * 60)
|
|
985
|
+
lines.append("ARCHITECTURAL REVIEW")
|
|
986
|
+
lines.append("-" * 60)
|
|
987
|
+
lines.append(arch_review)
|
|
988
|
+
lines.append("")
|
|
989
|
+
|
|
990
|
+
# Recommendations
|
|
991
|
+
recommendations = result.get("recommendations", [])
|
|
992
|
+
if recommendations:
|
|
993
|
+
lines.append("-" * 60)
|
|
994
|
+
lines.append("RECOMMENDATIONS")
|
|
995
|
+
lines.append("-" * 60)
|
|
996
|
+
for i, rec in enumerate(recommendations, 1):
|
|
997
|
+
lines.append(f"{i}. {rec}")
|
|
998
|
+
lines.append("")
|
|
999
|
+
|
|
1000
|
+
# Crew review results (if available)
|
|
1001
|
+
crew_review = input_data.get("crew_review", {})
|
|
1002
|
+
if crew_review and crew_review.get("available") and not crew_review.get("fallback"):
|
|
1003
|
+
lines.append("-" * 60)
|
|
1004
|
+
lines.append("CREW REVIEW ANALYSIS")
|
|
1005
|
+
lines.append("-" * 60)
|
|
1006
|
+
lines.append(f"Quality Score: {crew_review.get('quality_score', 'N/A')}/100")
|
|
1007
|
+
lines.append(f"Finding Count: {crew_review.get('finding_count', 0)}")
|
|
1008
|
+
agents = crew_review.get("agents_used", [])
|
|
1009
|
+
if agents:
|
|
1010
|
+
lines.append(f"Agents Used: {', '.join(agents)}")
|
|
1011
|
+
summary = crew_review.get("summary", "")
|
|
1012
|
+
if summary:
|
|
1013
|
+
lines.append(f"Summary: {summary[:300]}")
|
|
1014
|
+
lines.append("")
|
|
1015
|
+
|
|
1016
|
+
# Check if we have any meaningful content to show
|
|
1017
|
+
content_sections = [
|
|
1018
|
+
input_data.get("classification"),
|
|
1019
|
+
input_data.get("security_findings"),
|
|
1020
|
+
input_data.get("scan_results"),
|
|
1021
|
+
result.get("architectural_review"),
|
|
1022
|
+
result.get("recommendations"),
|
|
1023
|
+
]
|
|
1024
|
+
has_content = any(content_sections)
|
|
1025
|
+
|
|
1026
|
+
# If no content was generated, add a helpful message
|
|
1027
|
+
if not has_content and len(lines) < 15: # Just header/footer, no real content
|
|
1028
|
+
lines.append("-" * 60)
|
|
1029
|
+
lines.append("NO ISSUES FOUND")
|
|
1030
|
+
lines.append("-" * 60)
|
|
1031
|
+
lines.append("")
|
|
1032
|
+
lines.append("The code review workflow completed but found no issues to report.")
|
|
1033
|
+
lines.append("This could mean:")
|
|
1034
|
+
lines.append(" • No code was provided for review (check input parameters)")
|
|
1035
|
+
lines.append(" • The code is clean and follows best practices")
|
|
1036
|
+
lines.append(" • The workflow needs configuration (check .attune/workflows.yaml)")
|
|
1037
|
+
lines.append("")
|
|
1038
|
+
lines.append("Tip: Try running with a specific file or diff:")
|
|
1039
|
+
lines.append(' empathy workflow run code-review --input \'{"target": "path/to/file.py"}\'')
|
|
1040
|
+
lines.append("")
|
|
1041
|
+
|
|
1042
|
+
# Footer
|
|
1043
|
+
lines.append("=" * 60)
|
|
1044
|
+
model_tier = result.get("model_tier_used", "unknown")
|
|
1045
|
+
lines.append(f"Review completed using {model_tier} tier model")
|
|
1046
|
+
lines.append("=" * 60)
|
|
1047
|
+
|
|
1048
|
+
return "\n".join(lines)
|