attune-ai 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/__init__.py +358 -0
- attune/adaptive/__init__.py +13 -0
- attune/adaptive/task_complexity.py +127 -0
- attune/agent_monitoring.py +414 -0
- attune/cache/__init__.py +117 -0
- attune/cache/base.py +166 -0
- attune/cache/dependency_manager.py +256 -0
- attune/cache/hash_only.py +251 -0
- attune/cache/hybrid.py +457 -0
- attune/cache/storage.py +285 -0
- attune/cache_monitor.py +356 -0
- attune/cache_stats.py +298 -0
- attune/cli/__init__.py +152 -0
- attune/cli/__main__.py +12 -0
- attune/cli/commands/__init__.py +1 -0
- attune/cli/commands/batch.py +264 -0
- attune/cli/commands/cache.py +248 -0
- attune/cli/commands/help.py +331 -0
- attune/cli/commands/info.py +140 -0
- attune/cli/commands/inspect.py +436 -0
- attune/cli/commands/inspection.py +57 -0
- attune/cli/commands/memory.py +48 -0
- attune/cli/commands/metrics.py +92 -0
- attune/cli/commands/orchestrate.py +184 -0
- attune/cli/commands/patterns.py +207 -0
- attune/cli/commands/profiling.py +202 -0
- attune/cli/commands/provider.py +98 -0
- attune/cli/commands/routing.py +285 -0
- attune/cli/commands/setup.py +96 -0
- attune/cli/commands/status.py +235 -0
- attune/cli/commands/sync.py +166 -0
- attune/cli/commands/tier.py +121 -0
- attune/cli/commands/utilities.py +114 -0
- attune/cli/commands/workflow.py +579 -0
- attune/cli/core.py +32 -0
- attune/cli/parsers/__init__.py +68 -0
- attune/cli/parsers/batch.py +118 -0
- attune/cli/parsers/cache.py +65 -0
- attune/cli/parsers/help.py +41 -0
- attune/cli/parsers/info.py +26 -0
- attune/cli/parsers/inspect.py +66 -0
- attune/cli/parsers/metrics.py +42 -0
- attune/cli/parsers/orchestrate.py +61 -0
- attune/cli/parsers/patterns.py +54 -0
- attune/cli/parsers/provider.py +40 -0
- attune/cli/parsers/routing.py +110 -0
- attune/cli/parsers/setup.py +42 -0
- attune/cli/parsers/status.py +47 -0
- attune/cli/parsers/sync.py +31 -0
- attune/cli/parsers/tier.py +33 -0
- attune/cli/parsers/workflow.py +77 -0
- attune/cli/utils/__init__.py +1 -0
- attune/cli/utils/data.py +242 -0
- attune/cli/utils/helpers.py +68 -0
- attune/cli_legacy.py +3957 -0
- attune/cli_minimal.py +1159 -0
- attune/cli_router.py +437 -0
- attune/cli_unified.py +814 -0
- attune/config/__init__.py +66 -0
- attune/config/xml_config.py +286 -0
- attune/config.py +545 -0
- attune/coordination.py +870 -0
- attune/core.py +1511 -0
- attune/core_modules/__init__.py +15 -0
- attune/cost_tracker.py +626 -0
- attune/dashboard/__init__.py +41 -0
- attune/dashboard/app.py +512 -0
- attune/dashboard/simple_server.py +435 -0
- attune/dashboard/standalone_server.py +547 -0
- attune/discovery.py +306 -0
- attune/emergence.py +306 -0
- attune/exceptions.py +123 -0
- attune/feedback_loops.py +373 -0
- attune/hot_reload/README.md +473 -0
- attune/hot_reload/__init__.py +62 -0
- attune/hot_reload/config.py +83 -0
- attune/hot_reload/integration.py +229 -0
- attune/hot_reload/reloader.py +298 -0
- attune/hot_reload/watcher.py +183 -0
- attune/hot_reload/websocket.py +177 -0
- attune/levels.py +577 -0
- attune/leverage_points.py +441 -0
- attune/logging_config.py +261 -0
- attune/mcp/__init__.py +10 -0
- attune/mcp/server.py +506 -0
- attune/memory/__init__.py +237 -0
- attune/memory/claude_memory.py +469 -0
- attune/memory/config.py +224 -0
- attune/memory/control_panel.py +1290 -0
- attune/memory/control_panel_support.py +145 -0
- attune/memory/cross_session.py +845 -0
- attune/memory/edges.py +179 -0
- attune/memory/encryption.py +159 -0
- attune/memory/file_session.py +770 -0
- attune/memory/graph.py +570 -0
- attune/memory/long_term.py +913 -0
- attune/memory/long_term_types.py +99 -0
- attune/memory/mixins/__init__.py +25 -0
- attune/memory/mixins/backend_init_mixin.py +249 -0
- attune/memory/mixins/capabilities_mixin.py +208 -0
- attune/memory/mixins/handoff_mixin.py +208 -0
- attune/memory/mixins/lifecycle_mixin.py +49 -0
- attune/memory/mixins/long_term_mixin.py +352 -0
- attune/memory/mixins/promotion_mixin.py +109 -0
- attune/memory/mixins/short_term_mixin.py +182 -0
- attune/memory/nodes.py +179 -0
- attune/memory/redis_bootstrap.py +540 -0
- attune/memory/security/__init__.py +31 -0
- attune/memory/security/audit_logger.py +932 -0
- attune/memory/security/pii_scrubber.py +640 -0
- attune/memory/security/secrets_detector.py +678 -0
- attune/memory/short_term.py +2192 -0
- attune/memory/simple_storage.py +302 -0
- attune/memory/storage/__init__.py +15 -0
- attune/memory/storage_backend.py +167 -0
- attune/memory/summary_index.py +583 -0
- attune/memory/types.py +446 -0
- attune/memory/unified.py +182 -0
- attune/meta_workflows/__init__.py +74 -0
- attune/meta_workflows/agent_creator.py +248 -0
- attune/meta_workflows/builtin_templates.py +567 -0
- attune/meta_workflows/cli_commands/__init__.py +56 -0
- attune/meta_workflows/cli_commands/agent_commands.py +321 -0
- attune/meta_workflows/cli_commands/analytics_commands.py +442 -0
- attune/meta_workflows/cli_commands/config_commands.py +232 -0
- attune/meta_workflows/cli_commands/memory_commands.py +182 -0
- attune/meta_workflows/cli_commands/template_commands.py +354 -0
- attune/meta_workflows/cli_commands/workflow_commands.py +382 -0
- attune/meta_workflows/cli_meta_workflows.py +59 -0
- attune/meta_workflows/form_engine.py +292 -0
- attune/meta_workflows/intent_detector.py +409 -0
- attune/meta_workflows/models.py +569 -0
- attune/meta_workflows/pattern_learner.py +738 -0
- attune/meta_workflows/plan_generator.py +384 -0
- attune/meta_workflows/session_context.py +397 -0
- attune/meta_workflows/template_registry.py +229 -0
- attune/meta_workflows/workflow.py +984 -0
- attune/metrics/__init__.py +12 -0
- attune/metrics/collector.py +31 -0
- attune/metrics/prompt_metrics.py +194 -0
- attune/models/__init__.py +172 -0
- attune/models/__main__.py +13 -0
- attune/models/adaptive_routing.py +437 -0
- attune/models/auth_cli.py +444 -0
- attune/models/auth_strategy.py +450 -0
- attune/models/cli.py +655 -0
- attune/models/empathy_executor.py +354 -0
- attune/models/executor.py +257 -0
- attune/models/fallback.py +762 -0
- attune/models/provider_config.py +282 -0
- attune/models/registry.py +472 -0
- attune/models/tasks.py +359 -0
- attune/models/telemetry/__init__.py +71 -0
- attune/models/telemetry/analytics.py +594 -0
- attune/models/telemetry/backend.py +196 -0
- attune/models/telemetry/data_models.py +431 -0
- attune/models/telemetry/storage.py +489 -0
- attune/models/token_estimator.py +420 -0
- attune/models/validation.py +280 -0
- attune/monitoring/__init__.py +52 -0
- attune/monitoring/alerts.py +946 -0
- attune/monitoring/alerts_cli.py +448 -0
- attune/monitoring/multi_backend.py +271 -0
- attune/monitoring/otel_backend.py +362 -0
- attune/optimization/__init__.py +19 -0
- attune/optimization/context_optimizer.py +272 -0
- attune/orchestration/__init__.py +67 -0
- attune/orchestration/agent_templates.py +707 -0
- attune/orchestration/config_store.py +499 -0
- attune/orchestration/execution_strategies.py +2111 -0
- attune/orchestration/meta_orchestrator.py +1168 -0
- attune/orchestration/pattern_learner.py +696 -0
- attune/orchestration/real_tools.py +931 -0
- attune/pattern_cache.py +187 -0
- attune/pattern_library.py +542 -0
- attune/patterns/debugging/all_patterns.json +81 -0
- attune/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- attune/patterns/refactoring_memory.json +89 -0
- attune/persistence.py +564 -0
- attune/platform_utils.py +265 -0
- attune/plugins/__init__.py +28 -0
- attune/plugins/base.py +361 -0
- attune/plugins/registry.py +268 -0
- attune/project_index/__init__.py +32 -0
- attune/project_index/cli.py +335 -0
- attune/project_index/index.py +667 -0
- attune/project_index/models.py +504 -0
- attune/project_index/reports.py +474 -0
- attune/project_index/scanner.py +777 -0
- attune/project_index/scanner_parallel.py +291 -0
- attune/prompts/__init__.py +61 -0
- attune/prompts/config.py +77 -0
- attune/prompts/context.py +177 -0
- attune/prompts/parser.py +285 -0
- attune/prompts/registry.py +313 -0
- attune/prompts/templates.py +208 -0
- attune/redis_config.py +302 -0
- attune/redis_memory.py +799 -0
- attune/resilience/__init__.py +56 -0
- attune/resilience/circuit_breaker.py +256 -0
- attune/resilience/fallback.py +179 -0
- attune/resilience/health.py +300 -0
- attune/resilience/retry.py +209 -0
- attune/resilience/timeout.py +135 -0
- attune/routing/__init__.py +43 -0
- attune/routing/chain_executor.py +433 -0
- attune/routing/classifier.py +217 -0
- attune/routing/smart_router.py +234 -0
- attune/routing/workflow_registry.py +343 -0
- attune/scaffolding/README.md +589 -0
- attune/scaffolding/__init__.py +35 -0
- attune/scaffolding/__main__.py +14 -0
- attune/scaffolding/cli.py +240 -0
- attune/scaffolding/templates/base_wizard.py.jinja2 +121 -0
- attune/scaffolding/templates/coach_wizard.py.jinja2 +321 -0
- attune/scaffolding/templates/domain_wizard.py.jinja2 +408 -0
- attune/scaffolding/templates/linear_flow_wizard.py.jinja2 +203 -0
- attune/socratic/__init__.py +256 -0
- attune/socratic/ab_testing.py +958 -0
- attune/socratic/blueprint.py +533 -0
- attune/socratic/cli.py +703 -0
- attune/socratic/collaboration.py +1114 -0
- attune/socratic/domain_templates.py +924 -0
- attune/socratic/embeddings.py +738 -0
- attune/socratic/engine.py +794 -0
- attune/socratic/explainer.py +682 -0
- attune/socratic/feedback.py +772 -0
- attune/socratic/forms.py +629 -0
- attune/socratic/generator.py +732 -0
- attune/socratic/llm_analyzer.py +637 -0
- attune/socratic/mcp_server.py +702 -0
- attune/socratic/session.py +312 -0
- attune/socratic/storage.py +667 -0
- attune/socratic/success.py +730 -0
- attune/socratic/visual_editor.py +860 -0
- attune/socratic/web_ui.py +958 -0
- attune/telemetry/__init__.py +39 -0
- attune/telemetry/agent_coordination.py +475 -0
- attune/telemetry/agent_tracking.py +367 -0
- attune/telemetry/approval_gates.py +545 -0
- attune/telemetry/cli.py +1231 -0
- attune/telemetry/commands/__init__.py +14 -0
- attune/telemetry/commands/dashboard_commands.py +696 -0
- attune/telemetry/event_streaming.py +409 -0
- attune/telemetry/feedback_loop.py +567 -0
- attune/telemetry/usage_tracker.py +591 -0
- attune/templates.py +754 -0
- attune/test_generator/__init__.py +38 -0
- attune/test_generator/__main__.py +14 -0
- attune/test_generator/cli.py +234 -0
- attune/test_generator/generator.py +355 -0
- attune/test_generator/risk_analyzer.py +216 -0
- attune/test_generator/templates/unit_test.py.jinja2 +272 -0
- attune/tier_recommender.py +384 -0
- attune/tools.py +183 -0
- attune/trust/__init__.py +28 -0
- attune/trust/circuit_breaker.py +579 -0
- attune/trust_building.py +527 -0
- attune/validation/__init__.py +19 -0
- attune/validation/xml_validator.py +281 -0
- attune/vscode_bridge.py +173 -0
- attune/workflow_commands.py +780 -0
- attune/workflow_patterns/__init__.py +33 -0
- attune/workflow_patterns/behavior.py +249 -0
- attune/workflow_patterns/core.py +76 -0
- attune/workflow_patterns/output.py +99 -0
- attune/workflow_patterns/registry.py +255 -0
- attune/workflow_patterns/structural.py +288 -0
- attune/workflows/__init__.py +539 -0
- attune/workflows/autonomous_test_gen.py +1268 -0
- attune/workflows/base.py +2667 -0
- attune/workflows/batch_processing.py +342 -0
- attune/workflows/bug_predict.py +1084 -0
- attune/workflows/builder.py +273 -0
- attune/workflows/caching.py +253 -0
- attune/workflows/code_review.py +1048 -0
- attune/workflows/code_review_adapters.py +312 -0
- attune/workflows/code_review_pipeline.py +722 -0
- attune/workflows/config.py +645 -0
- attune/workflows/dependency_check.py +644 -0
- attune/workflows/document_gen/__init__.py +25 -0
- attune/workflows/document_gen/config.py +30 -0
- attune/workflows/document_gen/report_formatter.py +162 -0
- attune/workflows/document_gen/workflow.py +1426 -0
- attune/workflows/document_manager.py +216 -0
- attune/workflows/document_manager_README.md +134 -0
- attune/workflows/documentation_orchestrator.py +1205 -0
- attune/workflows/history.py +510 -0
- attune/workflows/keyboard_shortcuts/__init__.py +39 -0
- attune/workflows/keyboard_shortcuts/generators.py +391 -0
- attune/workflows/keyboard_shortcuts/parsers.py +416 -0
- attune/workflows/keyboard_shortcuts/prompts.py +295 -0
- attune/workflows/keyboard_shortcuts/schema.py +193 -0
- attune/workflows/keyboard_shortcuts/workflow.py +509 -0
- attune/workflows/llm_base.py +363 -0
- attune/workflows/manage_docs.py +87 -0
- attune/workflows/manage_docs_README.md +134 -0
- attune/workflows/manage_documentation.py +821 -0
- attune/workflows/new_sample_workflow1.py +149 -0
- attune/workflows/new_sample_workflow1_README.md +150 -0
- attune/workflows/orchestrated_health_check.py +849 -0
- attune/workflows/orchestrated_release_prep.py +600 -0
- attune/workflows/output.py +413 -0
- attune/workflows/perf_audit.py +863 -0
- attune/workflows/pr_review.py +762 -0
- attune/workflows/progress.py +785 -0
- attune/workflows/progress_server.py +322 -0
- attune/workflows/progressive/README 2.md +454 -0
- attune/workflows/progressive/README.md +454 -0
- attune/workflows/progressive/__init__.py +82 -0
- attune/workflows/progressive/cli.py +219 -0
- attune/workflows/progressive/core.py +488 -0
- attune/workflows/progressive/orchestrator.py +723 -0
- attune/workflows/progressive/reports.py +520 -0
- attune/workflows/progressive/telemetry.py +274 -0
- attune/workflows/progressive/test_gen.py +495 -0
- attune/workflows/progressive/workflow.py +589 -0
- attune/workflows/refactor_plan.py +694 -0
- attune/workflows/release_prep.py +895 -0
- attune/workflows/release_prep_crew.py +969 -0
- attune/workflows/research_synthesis.py +404 -0
- attune/workflows/routing.py +168 -0
- attune/workflows/secure_release.py +593 -0
- attune/workflows/security_adapters.py +297 -0
- attune/workflows/security_audit.py +1329 -0
- attune/workflows/security_audit_phase3.py +355 -0
- attune/workflows/seo_optimization.py +633 -0
- attune/workflows/step_config.py +234 -0
- attune/workflows/telemetry_mixin.py +269 -0
- attune/workflows/test5.py +125 -0
- attune/workflows/test5_README.md +158 -0
- attune/workflows/test_coverage_boost_crew.py +849 -0
- attune/workflows/test_gen/__init__.py +52 -0
- attune/workflows/test_gen/ast_analyzer.py +249 -0
- attune/workflows/test_gen/config.py +88 -0
- attune/workflows/test_gen/data_models.py +38 -0
- attune/workflows/test_gen/report_formatter.py +289 -0
- attune/workflows/test_gen/test_templates.py +381 -0
- attune/workflows/test_gen/workflow.py +655 -0
- attune/workflows/test_gen.py +54 -0
- attune/workflows/test_gen_behavioral.py +477 -0
- attune/workflows/test_gen_parallel.py +341 -0
- attune/workflows/test_lifecycle.py +526 -0
- attune/workflows/test_maintenance.py +627 -0
- attune/workflows/test_maintenance_cli.py +590 -0
- attune/workflows/test_maintenance_crew.py +840 -0
- attune/workflows/test_runner.py +622 -0
- attune/workflows/tier_tracking.py +531 -0
- attune/workflows/xml_enhanced_crew.py +285 -0
- attune_ai-2.0.0.dist-info/METADATA +1026 -0
- attune_ai-2.0.0.dist-info/RECORD +457 -0
- attune_ai-2.0.0.dist-info/WHEEL +5 -0
- attune_ai-2.0.0.dist-info/entry_points.txt +26 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE +201 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- attune_ai-2.0.0.dist-info/top_level.txt +5 -0
- attune_healthcare/__init__.py +13 -0
- attune_healthcare/monitors/__init__.py +9 -0
- attune_healthcare/monitors/clinical_protocol_monitor.py +315 -0
- attune_healthcare/monitors/monitoring/__init__.py +44 -0
- attune_healthcare/monitors/monitoring/protocol_checker.py +300 -0
- attune_healthcare/monitors/monitoring/protocol_loader.py +214 -0
- attune_healthcare/monitors/monitoring/sensor_parsers.py +306 -0
- attune_healthcare/monitors/monitoring/trajectory_analyzer.py +389 -0
- attune_llm/README.md +553 -0
- attune_llm/__init__.py +28 -0
- attune_llm/agent_factory/__init__.py +53 -0
- attune_llm/agent_factory/adapters/__init__.py +85 -0
- attune_llm/agent_factory/adapters/autogen_adapter.py +312 -0
- attune_llm/agent_factory/adapters/crewai_adapter.py +483 -0
- attune_llm/agent_factory/adapters/haystack_adapter.py +298 -0
- attune_llm/agent_factory/adapters/langchain_adapter.py +362 -0
- attune_llm/agent_factory/adapters/langgraph_adapter.py +333 -0
- attune_llm/agent_factory/adapters/native.py +228 -0
- attune_llm/agent_factory/adapters/wizard_adapter.py +423 -0
- attune_llm/agent_factory/base.py +305 -0
- attune_llm/agent_factory/crews/__init__.py +67 -0
- attune_llm/agent_factory/crews/code_review.py +1113 -0
- attune_llm/agent_factory/crews/health_check.py +1262 -0
- attune_llm/agent_factory/crews/refactoring.py +1128 -0
- attune_llm/agent_factory/crews/security_audit.py +1018 -0
- attune_llm/agent_factory/decorators.py +287 -0
- attune_llm/agent_factory/factory.py +558 -0
- attune_llm/agent_factory/framework.py +193 -0
- attune_llm/agent_factory/memory_integration.py +328 -0
- attune_llm/agent_factory/resilient.py +320 -0
- attune_llm/agents_md/__init__.py +22 -0
- attune_llm/agents_md/loader.py +218 -0
- attune_llm/agents_md/parser.py +271 -0
- attune_llm/agents_md/registry.py +307 -0
- attune_llm/claude_memory.py +466 -0
- attune_llm/cli/__init__.py +8 -0
- attune_llm/cli/sync_claude.py +487 -0
- attune_llm/code_health.py +1313 -0
- attune_llm/commands/__init__.py +51 -0
- attune_llm/commands/context.py +375 -0
- attune_llm/commands/loader.py +301 -0
- attune_llm/commands/models.py +231 -0
- attune_llm/commands/parser.py +371 -0
- attune_llm/commands/registry.py +429 -0
- attune_llm/config/__init__.py +29 -0
- attune_llm/config/unified.py +291 -0
- attune_llm/context/__init__.py +22 -0
- attune_llm/context/compaction.py +455 -0
- attune_llm/context/manager.py +434 -0
- attune_llm/contextual_patterns.py +361 -0
- attune_llm/core.py +907 -0
- attune_llm/git_pattern_extractor.py +435 -0
- attune_llm/hooks/__init__.py +24 -0
- attune_llm/hooks/config.py +306 -0
- attune_llm/hooks/executor.py +289 -0
- attune_llm/hooks/registry.py +302 -0
- attune_llm/hooks/scripts/__init__.py +39 -0
- attune_llm/hooks/scripts/evaluate_session.py +201 -0
- attune_llm/hooks/scripts/first_time_init.py +285 -0
- attune_llm/hooks/scripts/pre_compact.py +207 -0
- attune_llm/hooks/scripts/session_end.py +183 -0
- attune_llm/hooks/scripts/session_start.py +163 -0
- attune_llm/hooks/scripts/suggest_compact.py +225 -0
- attune_llm/learning/__init__.py +30 -0
- attune_llm/learning/evaluator.py +438 -0
- attune_llm/learning/extractor.py +514 -0
- attune_llm/learning/storage.py +560 -0
- attune_llm/levels.py +227 -0
- attune_llm/pattern_confidence.py +414 -0
- attune_llm/pattern_resolver.py +272 -0
- attune_llm/pattern_summary.py +350 -0
- attune_llm/providers.py +967 -0
- attune_llm/routing/__init__.py +32 -0
- attune_llm/routing/model_router.py +362 -0
- attune_llm/security/IMPLEMENTATION_SUMMARY.md +413 -0
- attune_llm/security/PHASE2_COMPLETE.md +384 -0
- attune_llm/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- attune_llm/security/QUICK_REFERENCE.md +316 -0
- attune_llm/security/README.md +262 -0
- attune_llm/security/__init__.py +62 -0
- attune_llm/security/audit_logger.py +929 -0
- attune_llm/security/audit_logger_example.py +152 -0
- attune_llm/security/pii_scrubber.py +640 -0
- attune_llm/security/secrets_detector.py +678 -0
- attune_llm/security/secrets_detector_example.py +304 -0
- attune_llm/security/secure_memdocs.py +1192 -0
- attune_llm/security/secure_memdocs_example.py +278 -0
- attune_llm/session_status.py +745 -0
- attune_llm/state.py +246 -0
- attune_llm/utils/__init__.py +5 -0
- attune_llm/utils/tokens.py +349 -0
- attune_software/SOFTWARE_PLUGIN_README.md +57 -0
- attune_software/__init__.py +13 -0
- attune_software/cli/__init__.py +120 -0
- attune_software/cli/inspect.py +362 -0
- attune_software/cli.py +574 -0
- attune_software/plugin.py +188 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
|
@@ -0,0 +1,722 @@
|
|
|
1
|
+
"""Code Review Pipeline
|
|
2
|
+
|
|
3
|
+
A composite workflow that combines CodeReviewCrew with CodeReviewWorkflow
|
|
4
|
+
for comprehensive code analysis.
|
|
5
|
+
|
|
6
|
+
Modes:
|
|
7
|
+
- full: Run CodeReviewCrew (5 agents) + CodeReviewWorkflow
|
|
8
|
+
- standard: Run CodeReviewWorkflow only
|
|
9
|
+
- quick: Run classify + scan stages only (skip architect review)
|
|
10
|
+
|
|
11
|
+
Copyright 2025 Smart-AI-Memory
|
|
12
|
+
Licensed under Fair Source License 0.9
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
import asyncio
|
|
16
|
+
import logging
|
|
17
|
+
import time
|
|
18
|
+
from dataclasses import dataclass, field
|
|
19
|
+
from typing import Any
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@dataclass
|
|
25
|
+
class CodeReviewPipelineResult:
|
|
26
|
+
"""Result from CodeReviewPipeline execution."""
|
|
27
|
+
|
|
28
|
+
success: bool
|
|
29
|
+
verdict: str # "approve", "approve_with_suggestions", "request_changes", "reject"
|
|
30
|
+
quality_score: float
|
|
31
|
+
crew_report: dict | None
|
|
32
|
+
workflow_result: Any # WorkflowResult or None
|
|
33
|
+
combined_findings: list[dict]
|
|
34
|
+
critical_count: int
|
|
35
|
+
high_count: int
|
|
36
|
+
medium_count: int
|
|
37
|
+
agents_used: list[str]
|
|
38
|
+
recommendations: list[str]
|
|
39
|
+
blockers: list[str]
|
|
40
|
+
mode: str
|
|
41
|
+
duration_seconds: float
|
|
42
|
+
cost: float
|
|
43
|
+
metadata: dict = field(default_factory=dict)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class CodeReviewPipeline:
|
|
47
|
+
"""Composite workflow combining CodeReviewCrew with CodeReviewWorkflow.
|
|
48
|
+
|
|
49
|
+
Provides multiple modes for different use cases:
|
|
50
|
+
- full: Most comprehensive (crew + workflow)
|
|
51
|
+
- standard: Balanced (workflow only)
|
|
52
|
+
- quick: Fast check (minimal stages)
|
|
53
|
+
|
|
54
|
+
Usage:
|
|
55
|
+
pipeline = CodeReviewPipeline(mode="full")
|
|
56
|
+
result = await pipeline.execute(
|
|
57
|
+
diff="...",
|
|
58
|
+
files_changed=["src/main.py"],
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
# Or use factory methods:
|
|
62
|
+
pipeline = CodeReviewPipeline.for_pr_review(files_changed=12)
|
|
63
|
+
pipeline = CodeReviewPipeline.for_quick_check()
|
|
64
|
+
"""
|
|
65
|
+
|
|
66
|
+
def __init__(
|
|
67
|
+
self,
|
|
68
|
+
provider: str = "anthropic",
|
|
69
|
+
mode: str = "full",
|
|
70
|
+
parallel_crew: bool = True,
|
|
71
|
+
crew_config: dict | None = None,
|
|
72
|
+
**kwargs,
|
|
73
|
+
):
|
|
74
|
+
"""Initialize the pipeline.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
provider: LLM provider to use (anthropic, openai, etc.)
|
|
78
|
+
mode: Review mode ("full", "standard", "quick")
|
|
79
|
+
parallel_crew: Run crew in parallel with workflow (full mode only)
|
|
80
|
+
crew_config: Configuration for CodeReviewCrew
|
|
81
|
+
**kwargs: Additional arguments (for CLI compatibility)
|
|
82
|
+
|
|
83
|
+
"""
|
|
84
|
+
self.provider = provider
|
|
85
|
+
self.mode = mode
|
|
86
|
+
self.parallel_crew = parallel_crew
|
|
87
|
+
# Inject provider into crew config
|
|
88
|
+
self.crew_config = {"provider": provider, **(crew_config or {})}
|
|
89
|
+
self.crew_enabled = mode == "full"
|
|
90
|
+
|
|
91
|
+
@classmethod
|
|
92
|
+
def for_pr_review(cls, files_changed: int = 0) -> "CodeReviewPipeline":
|
|
93
|
+
"""Factory for PR review - uses crew for complex PRs.
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
files_changed: Number of files changed in PR
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
Pipeline configured for PR review complexity
|
|
100
|
+
|
|
101
|
+
"""
|
|
102
|
+
# Use full mode for complex PRs (5+ files)
|
|
103
|
+
mode = "full" if files_changed > 5 else "standard"
|
|
104
|
+
return cls(mode=mode, parallel_crew=True)
|
|
105
|
+
|
|
106
|
+
@classmethod
|
|
107
|
+
def for_quick_check(cls) -> "CodeReviewPipeline":
|
|
108
|
+
"""Quick code check without crew - minimal analysis."""
|
|
109
|
+
return cls(mode="quick", parallel_crew=False)
|
|
110
|
+
|
|
111
|
+
@classmethod
|
|
112
|
+
def for_full_review(cls) -> "CodeReviewPipeline":
|
|
113
|
+
"""Full review with all agents and workflow stages."""
|
|
114
|
+
return cls(mode="full", parallel_crew=True)
|
|
115
|
+
|
|
116
|
+
async def execute(
|
|
117
|
+
self,
|
|
118
|
+
diff: str = "",
|
|
119
|
+
files_changed: list[str] | None = None,
|
|
120
|
+
target: str = "",
|
|
121
|
+
context: dict | None = None,
|
|
122
|
+
) -> CodeReviewPipelineResult:
|
|
123
|
+
"""Execute the code review pipeline.
|
|
124
|
+
|
|
125
|
+
Args:
|
|
126
|
+
diff: Code diff to review
|
|
127
|
+
files_changed: List of changed files
|
|
128
|
+
target: Target file/folder path (alternative to diff)
|
|
129
|
+
context: Additional context for review
|
|
130
|
+
|
|
131
|
+
Returns:
|
|
132
|
+
CodeReviewPipelineResult with combined analysis
|
|
133
|
+
|
|
134
|
+
"""
|
|
135
|
+
start_time = time.time()
|
|
136
|
+
files_changed = files_changed or []
|
|
137
|
+
context = context or {}
|
|
138
|
+
|
|
139
|
+
# Initialize result collectors
|
|
140
|
+
crew_report: dict | None = None
|
|
141
|
+
workflow_result: Any = None # WorkflowResult or None
|
|
142
|
+
all_findings: list[dict] = []
|
|
143
|
+
recommendations: list[str] = []
|
|
144
|
+
blockers: list[str] = []
|
|
145
|
+
agents_used: list[str] = []
|
|
146
|
+
total_cost = 0.0
|
|
147
|
+
|
|
148
|
+
# Get code to review
|
|
149
|
+
code_to_review = diff or target
|
|
150
|
+
|
|
151
|
+
try:
|
|
152
|
+
if self.mode == "full":
|
|
153
|
+
# Run crew and workflow
|
|
154
|
+
crew_report, workflow_result = await self._run_full_mode(
|
|
155
|
+
code_to_review,
|
|
156
|
+
files_changed,
|
|
157
|
+
context,
|
|
158
|
+
)
|
|
159
|
+
elif self.mode == "standard":
|
|
160
|
+
# Run workflow only
|
|
161
|
+
workflow_result = await self._run_standard_mode(
|
|
162
|
+
code_to_review,
|
|
163
|
+
files_changed,
|
|
164
|
+
context,
|
|
165
|
+
)
|
|
166
|
+
else: # quick
|
|
167
|
+
# Run minimal workflow
|
|
168
|
+
workflow_result = await self._run_quick_mode(code_to_review, files_changed, context)
|
|
169
|
+
|
|
170
|
+
# Aggregate findings
|
|
171
|
+
if crew_report:
|
|
172
|
+
crew_findings = crew_report.get("findings", [])
|
|
173
|
+
all_findings.extend(crew_findings)
|
|
174
|
+
agents_used = crew_report.get("agents_used", [])
|
|
175
|
+
|
|
176
|
+
# Extract crew recommendations
|
|
177
|
+
for finding in crew_findings:
|
|
178
|
+
if finding.get("suggestion"):
|
|
179
|
+
recommendations.append(finding["suggestion"])
|
|
180
|
+
|
|
181
|
+
if workflow_result:
|
|
182
|
+
# Get workflow findings from various stages
|
|
183
|
+
# WorkflowResult is a dataclass, access attributes directly
|
|
184
|
+
wf_output = workflow_result.final_output or {}
|
|
185
|
+
scan_findings = (
|
|
186
|
+
wf_output.get("security_findings", []) if isinstance(wf_output, dict) else []
|
|
187
|
+
)
|
|
188
|
+
all_findings.extend(scan_findings)
|
|
189
|
+
|
|
190
|
+
# Get cost from workflow
|
|
191
|
+
cost_report = workflow_result.cost_report
|
|
192
|
+
if hasattr(cost_report, "total_cost"):
|
|
193
|
+
total_cost = cost_report.total_cost
|
|
194
|
+
|
|
195
|
+
# Deduplicate findings by (file, line, type)
|
|
196
|
+
all_findings = self._deduplicate_findings(all_findings)
|
|
197
|
+
|
|
198
|
+
# Count by severity
|
|
199
|
+
critical_count = len([f for f in all_findings if f.get("severity") == "critical"])
|
|
200
|
+
high_count = len([f for f in all_findings if f.get("severity") == "high"])
|
|
201
|
+
medium_count = len([f for f in all_findings if f.get("severity") == "medium"])
|
|
202
|
+
|
|
203
|
+
# Determine blockers
|
|
204
|
+
if critical_count > 0:
|
|
205
|
+
blockers.append(f"{critical_count} critical issue(s) found")
|
|
206
|
+
if high_count > 3:
|
|
207
|
+
blockers.append(f"{high_count} high severity issues (threshold: 3)")
|
|
208
|
+
|
|
209
|
+
# Calculate combined scores
|
|
210
|
+
quality_score = self._calculate_quality_score(
|
|
211
|
+
crew_report,
|
|
212
|
+
workflow_result,
|
|
213
|
+
all_findings,
|
|
214
|
+
)
|
|
215
|
+
|
|
216
|
+
# Determine verdict
|
|
217
|
+
verdict = self._determine_verdict(crew_report, workflow_result, quality_score, blockers)
|
|
218
|
+
|
|
219
|
+
duration = time.time() - start_time
|
|
220
|
+
|
|
221
|
+
result = CodeReviewPipelineResult(
|
|
222
|
+
success=True,
|
|
223
|
+
verdict=verdict,
|
|
224
|
+
quality_score=quality_score,
|
|
225
|
+
crew_report=crew_report,
|
|
226
|
+
workflow_result=workflow_result,
|
|
227
|
+
combined_findings=all_findings,
|
|
228
|
+
critical_count=critical_count,
|
|
229
|
+
high_count=high_count,
|
|
230
|
+
medium_count=medium_count,
|
|
231
|
+
agents_used=agents_used,
|
|
232
|
+
recommendations=recommendations[:10], # Top 10
|
|
233
|
+
blockers=blockers,
|
|
234
|
+
mode=self.mode,
|
|
235
|
+
duration_seconds=duration,
|
|
236
|
+
cost=total_cost,
|
|
237
|
+
metadata={
|
|
238
|
+
"files_reviewed": len(files_changed),
|
|
239
|
+
"total_findings": len(all_findings),
|
|
240
|
+
"crew_enabled": self.crew_enabled,
|
|
241
|
+
"parallel_crew": self.parallel_crew,
|
|
242
|
+
},
|
|
243
|
+
)
|
|
244
|
+
|
|
245
|
+
# Add formatted report for human readability
|
|
246
|
+
result.metadata["formatted_report"] = format_code_review_pipeline_report(result)
|
|
247
|
+
return result
|
|
248
|
+
|
|
249
|
+
except Exception as e:
|
|
250
|
+
logger.error(f"CodeReviewPipeline failed: {e}")
|
|
251
|
+
duration = time.time() - start_time
|
|
252
|
+
return CodeReviewPipelineResult(
|
|
253
|
+
success=False,
|
|
254
|
+
verdict="reject",
|
|
255
|
+
quality_score=0.0,
|
|
256
|
+
crew_report=crew_report,
|
|
257
|
+
workflow_result=workflow_result,
|
|
258
|
+
combined_findings=all_findings,
|
|
259
|
+
critical_count=0,
|
|
260
|
+
high_count=0,
|
|
261
|
+
medium_count=0,
|
|
262
|
+
agents_used=agents_used,
|
|
263
|
+
recommendations=[],
|
|
264
|
+
blockers=[f"Pipeline error: {e!s}"],
|
|
265
|
+
mode=self.mode,
|
|
266
|
+
duration_seconds=duration,
|
|
267
|
+
cost=total_cost,
|
|
268
|
+
metadata={"error": str(e)},
|
|
269
|
+
)
|
|
270
|
+
|
|
271
|
+
async def _run_full_mode(
|
|
272
|
+
self,
|
|
273
|
+
code_to_review: str,
|
|
274
|
+
files_changed: list[str],
|
|
275
|
+
context: dict,
|
|
276
|
+
) -> tuple[dict | None, Any]: # Second element is WorkflowResult or None
|
|
277
|
+
"""Run full mode with crew and workflow."""
|
|
278
|
+
from .code_review import CodeReviewWorkflow
|
|
279
|
+
|
|
280
|
+
try:
|
|
281
|
+
from .code_review_adapters import (
|
|
282
|
+
_check_crew_available,
|
|
283
|
+
_get_crew_review,
|
|
284
|
+
crew_report_to_workflow_format,
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
crew_available = _check_crew_available()
|
|
288
|
+
except ImportError:
|
|
289
|
+
# Crew adapters removed - fall back to workflow only
|
|
290
|
+
crew_available = False
|
|
291
|
+
_get_crew_review = None
|
|
292
|
+
crew_report_to_workflow_format = None
|
|
293
|
+
|
|
294
|
+
crew_report: dict | None = None
|
|
295
|
+
workflow_result: Any = None # WorkflowResult or None
|
|
296
|
+
|
|
297
|
+
if crew_available and self.parallel_crew:
|
|
298
|
+
# Run crew and workflow in parallel
|
|
299
|
+
crew_task = asyncio.create_task(
|
|
300
|
+
_get_crew_review(
|
|
301
|
+
diff=code_to_review,
|
|
302
|
+
files_changed=files_changed,
|
|
303
|
+
config=self.crew_config,
|
|
304
|
+
),
|
|
305
|
+
)
|
|
306
|
+
|
|
307
|
+
# Run workflow (without crew - we'll merge results)
|
|
308
|
+
workflow = CodeReviewWorkflow(use_crew=False)
|
|
309
|
+
workflow_task = asyncio.create_task(
|
|
310
|
+
workflow.execute(
|
|
311
|
+
diff=code_to_review,
|
|
312
|
+
files_changed=files_changed,
|
|
313
|
+
**context,
|
|
314
|
+
),
|
|
315
|
+
)
|
|
316
|
+
|
|
317
|
+
# Wait for both
|
|
318
|
+
crew_report_obj, workflow_result = await asyncio.gather(
|
|
319
|
+
crew_task,
|
|
320
|
+
workflow_task,
|
|
321
|
+
return_exceptions=True,
|
|
322
|
+
)
|
|
323
|
+
|
|
324
|
+
# Handle crew result
|
|
325
|
+
if isinstance(crew_report_obj, BaseException):
|
|
326
|
+
logger.warning(f"Crew review failed: {crew_report_obj}")
|
|
327
|
+
elif crew_report_obj:
|
|
328
|
+
# crew_report_obj is CodeReviewReport after isinstance check above
|
|
329
|
+
crew_report = crew_report_to_workflow_format(crew_report_obj)
|
|
330
|
+
|
|
331
|
+
# Handle workflow result
|
|
332
|
+
if isinstance(workflow_result, BaseException):
|
|
333
|
+
logger.warning(f"Workflow failed: {workflow_result}")
|
|
334
|
+
workflow_result = None
|
|
335
|
+
|
|
336
|
+
elif crew_available:
|
|
337
|
+
# Run sequentially
|
|
338
|
+
crew_report_obj = await _get_crew_review(
|
|
339
|
+
diff=code_to_review,
|
|
340
|
+
files_changed=files_changed,
|
|
341
|
+
config=self.crew_config,
|
|
342
|
+
)
|
|
343
|
+
if crew_report_obj:
|
|
344
|
+
crew_report = crew_report_to_workflow_format(crew_report_obj)
|
|
345
|
+
|
|
346
|
+
workflow = CodeReviewWorkflow(use_crew=False)
|
|
347
|
+
workflow_result = await workflow.execute(
|
|
348
|
+
diff=code_to_review,
|
|
349
|
+
files_changed=files_changed,
|
|
350
|
+
**context,
|
|
351
|
+
)
|
|
352
|
+
else:
|
|
353
|
+
# Crew not available, run workflow only
|
|
354
|
+
logger.info("CodeReviewCrew not available, running workflow only")
|
|
355
|
+
workflow = CodeReviewWorkflow(use_crew=False)
|
|
356
|
+
workflow_result = await workflow.execute(
|
|
357
|
+
diff=code_to_review,
|
|
358
|
+
files_changed=files_changed,
|
|
359
|
+
**context,
|
|
360
|
+
)
|
|
361
|
+
|
|
362
|
+
return crew_report, workflow_result
|
|
363
|
+
|
|
364
|
+
async def _run_standard_mode(
|
|
365
|
+
self,
|
|
366
|
+
code_to_review: str,
|
|
367
|
+
files_changed: list[str],
|
|
368
|
+
context: dict,
|
|
369
|
+
) -> Any: # Returns WorkflowResult
|
|
370
|
+
"""Run standard mode with workflow only."""
|
|
371
|
+
from .code_review import CodeReviewWorkflow
|
|
372
|
+
|
|
373
|
+
workflow = CodeReviewWorkflow(use_crew=False)
|
|
374
|
+
result = await workflow.execute(
|
|
375
|
+
diff=code_to_review,
|
|
376
|
+
files_changed=files_changed,
|
|
377
|
+
**context,
|
|
378
|
+
)
|
|
379
|
+
return result
|
|
380
|
+
|
|
381
|
+
async def _run_quick_mode(
|
|
382
|
+
self,
|
|
383
|
+
code_to_review: str,
|
|
384
|
+
files_changed: list[str],
|
|
385
|
+
context: dict,
|
|
386
|
+
) -> Any: # Returns WorkflowResult
|
|
387
|
+
"""Run quick mode with minimal stages."""
|
|
388
|
+
from .code_review import CodeReviewWorkflow
|
|
389
|
+
|
|
390
|
+
# Use workflow but it will skip architect_review for simple changes
|
|
391
|
+
workflow = CodeReviewWorkflow(
|
|
392
|
+
file_threshold=1000, # High threshold = skip architect review
|
|
393
|
+
use_crew=False,
|
|
394
|
+
)
|
|
395
|
+
result = await workflow.execute(
|
|
396
|
+
diff=code_to_review,
|
|
397
|
+
files_changed=files_changed,
|
|
398
|
+
is_core_module=False,
|
|
399
|
+
**context,
|
|
400
|
+
)
|
|
401
|
+
return result
|
|
402
|
+
|
|
403
|
+
def _deduplicate_findings(self, findings: list[dict]) -> list[dict]:
|
|
404
|
+
"""Deduplicate findings by (file, line, type)."""
|
|
405
|
+
seen = set()
|
|
406
|
+
unique = []
|
|
407
|
+
for f in findings:
|
|
408
|
+
key = (f.get("file"), f.get("line"), f.get("type"))
|
|
409
|
+
if key not in seen:
|
|
410
|
+
seen.add(key)
|
|
411
|
+
unique.append(f)
|
|
412
|
+
return unique
|
|
413
|
+
|
|
414
|
+
def _calculate_quality_score(
|
|
415
|
+
self,
|
|
416
|
+
crew_report: dict | None,
|
|
417
|
+
workflow_result: Any, # WorkflowResult or None
|
|
418
|
+
findings: list[dict],
|
|
419
|
+
) -> float:
|
|
420
|
+
"""Calculate combined quality score."""
|
|
421
|
+
scores: list[float] = []
|
|
422
|
+
weights: list[float] = []
|
|
423
|
+
|
|
424
|
+
# Crew quality score (if available)
|
|
425
|
+
if crew_report:
|
|
426
|
+
crew_score: float = float(crew_report.get("quality_score", 100))
|
|
427
|
+
scores.append(crew_score)
|
|
428
|
+
weights.append(1.5) # Crew gets higher weight
|
|
429
|
+
|
|
430
|
+
# Workflow security score (if available)
|
|
431
|
+
if workflow_result:
|
|
432
|
+
wf_output = workflow_result.final_output or {}
|
|
433
|
+
security_score = (
|
|
434
|
+
wf_output.get("security_score", 90) if isinstance(wf_output, dict) else 90
|
|
435
|
+
)
|
|
436
|
+
scores.append(security_score)
|
|
437
|
+
weights.append(1.0)
|
|
438
|
+
|
|
439
|
+
# Calculate weighted average
|
|
440
|
+
if scores:
|
|
441
|
+
weighted_sum = sum(s * w for s, w in zip(scores, weights, strict=False))
|
|
442
|
+
quality_score = weighted_sum / sum(weights)
|
|
443
|
+
else:
|
|
444
|
+
# Fallback: deduct based on findings
|
|
445
|
+
quality_score = 100.0
|
|
446
|
+
for f in findings:
|
|
447
|
+
sev = f.get("severity", "medium")
|
|
448
|
+
if sev == "critical":
|
|
449
|
+
quality_score -= 25
|
|
450
|
+
elif sev == "high":
|
|
451
|
+
quality_score -= 15
|
|
452
|
+
elif sev == "medium":
|
|
453
|
+
quality_score -= 5
|
|
454
|
+
elif sev == "low":
|
|
455
|
+
quality_score -= 2
|
|
456
|
+
|
|
457
|
+
return max(0.0, min(100.0, quality_score))
|
|
458
|
+
|
|
459
|
+
def _determine_verdict(
|
|
460
|
+
self,
|
|
461
|
+
crew_report: dict | None,
|
|
462
|
+
workflow_result: Any, # WorkflowResult or None
|
|
463
|
+
quality_score: float,
|
|
464
|
+
blockers: list[str],
|
|
465
|
+
) -> str:
|
|
466
|
+
"""Determine final verdict based on all inputs."""
|
|
467
|
+
# Start with most severe verdict
|
|
468
|
+
verdict_priority = ["reject", "request_changes", "approve_with_suggestions", "approve"]
|
|
469
|
+
|
|
470
|
+
verdicts = []
|
|
471
|
+
|
|
472
|
+
# Crew verdict
|
|
473
|
+
if crew_report:
|
|
474
|
+
crew_verdict = crew_report.get("verdict", "approve")
|
|
475
|
+
verdicts.append(crew_verdict)
|
|
476
|
+
|
|
477
|
+
# Workflow verdict (from architect review)
|
|
478
|
+
if workflow_result:
|
|
479
|
+
wf_output = workflow_result.final_output or {}
|
|
480
|
+
wf_verdict = (
|
|
481
|
+
wf_output.get("verdict", "approve") if isinstance(wf_output, dict) else "approve"
|
|
482
|
+
)
|
|
483
|
+
verdicts.append(wf_verdict)
|
|
484
|
+
|
|
485
|
+
# Score-based verdict
|
|
486
|
+
if quality_score < 50:
|
|
487
|
+
verdicts.append("reject")
|
|
488
|
+
elif quality_score < 70:
|
|
489
|
+
verdicts.append("request_changes")
|
|
490
|
+
elif quality_score < 90:
|
|
491
|
+
verdicts.append("approve_with_suggestions")
|
|
492
|
+
else:
|
|
493
|
+
verdicts.append("approve")
|
|
494
|
+
|
|
495
|
+
# Blocker-based verdict
|
|
496
|
+
if blockers:
|
|
497
|
+
verdicts.append("request_changes")
|
|
498
|
+
|
|
499
|
+
# Take most severe
|
|
500
|
+
for v in verdict_priority:
|
|
501
|
+
if v in verdicts:
|
|
502
|
+
return v
|
|
503
|
+
|
|
504
|
+
return "approve"
|
|
505
|
+
|
|
506
|
+
|
|
507
|
+
# CLI entry point
|
|
508
|
+
def main():
|
|
509
|
+
"""Run CodeReviewPipeline from command line."""
|
|
510
|
+
import argparse
|
|
511
|
+
|
|
512
|
+
parser = argparse.ArgumentParser(description="Code Review Pipeline")
|
|
513
|
+
parser.add_argument("--diff", "-d", help="Code diff to review")
|
|
514
|
+
parser.add_argument("--file", "-f", help="File to review")
|
|
515
|
+
parser.add_argument(
|
|
516
|
+
"--mode",
|
|
517
|
+
"-m",
|
|
518
|
+
default="full",
|
|
519
|
+
choices=["full", "standard", "quick"],
|
|
520
|
+
help="Review mode",
|
|
521
|
+
)
|
|
522
|
+
parser.add_argument(
|
|
523
|
+
"--parallel/--sequential",
|
|
524
|
+
dest="parallel",
|
|
525
|
+
default=True,
|
|
526
|
+
help="Run crew in parallel",
|
|
527
|
+
)
|
|
528
|
+
|
|
529
|
+
args = parser.parse_args()
|
|
530
|
+
|
|
531
|
+
async def run():
|
|
532
|
+
pipeline = CodeReviewPipeline(mode=args.mode, parallel_crew=args.parallel)
|
|
533
|
+
|
|
534
|
+
diff = args.diff or ""
|
|
535
|
+
if args.file:
|
|
536
|
+
try:
|
|
537
|
+
with open(args.file) as f:
|
|
538
|
+
diff = f.read()
|
|
539
|
+
except FileNotFoundError:
|
|
540
|
+
print(f"File not found: {args.file}")
|
|
541
|
+
return
|
|
542
|
+
|
|
543
|
+
result = await pipeline.execute(diff=diff)
|
|
544
|
+
|
|
545
|
+
print("\n" + "=" * 60)
|
|
546
|
+
print("CODE REVIEW PIPELINE RESULTS")
|
|
547
|
+
print("=" * 60)
|
|
548
|
+
print(f"Mode: {result.mode}")
|
|
549
|
+
print(f"Verdict: {result.verdict.upper()}")
|
|
550
|
+
print(f"Quality Score: {result.quality_score:.1f}/100")
|
|
551
|
+
print(f"Duration: {result.duration_seconds * 1000:.0f}ms")
|
|
552
|
+
print(f"Cost: ${result.cost:.4f}")
|
|
553
|
+
|
|
554
|
+
if result.agents_used:
|
|
555
|
+
print(f"\nAgents Used: {', '.join(result.agents_used)}")
|
|
556
|
+
|
|
557
|
+
print(f"\nFindings: {len(result.combined_findings)} total")
|
|
558
|
+
print(f" Critical: {result.critical_count}")
|
|
559
|
+
print(f" High: {result.high_count}")
|
|
560
|
+
print(f" Medium: {result.medium_count}")
|
|
561
|
+
|
|
562
|
+
if result.blockers:
|
|
563
|
+
print("\nBlockers:")
|
|
564
|
+
for b in result.blockers:
|
|
565
|
+
print(f" - {b}")
|
|
566
|
+
|
|
567
|
+
if result.recommendations[:5]:
|
|
568
|
+
print("\nTop Recommendations:")
|
|
569
|
+
for r in result.recommendations[:5]:
|
|
570
|
+
print(f" - {r[:100]}...")
|
|
571
|
+
|
|
572
|
+
asyncio.run(run())
|
|
573
|
+
|
|
574
|
+
|
|
575
|
+
def format_code_review_pipeline_report(result: CodeReviewPipelineResult) -> str:
|
|
576
|
+
"""Format code review pipeline result as a human-readable report.
|
|
577
|
+
|
|
578
|
+
Args:
|
|
579
|
+
result: The CodeReviewPipelineResult dataclass
|
|
580
|
+
|
|
581
|
+
Returns:
|
|
582
|
+
Formatted report string
|
|
583
|
+
|
|
584
|
+
"""
|
|
585
|
+
lines = []
|
|
586
|
+
|
|
587
|
+
# Header with verdict
|
|
588
|
+
verdict_emoji = {
|
|
589
|
+
"approve": "✅",
|
|
590
|
+
"approve_with_suggestions": "🟡",
|
|
591
|
+
"request_changes": "🟠",
|
|
592
|
+
"reject": "🔴",
|
|
593
|
+
}
|
|
594
|
+
emoji = verdict_emoji.get(result.verdict, "⚪")
|
|
595
|
+
|
|
596
|
+
lines.append("=" * 60)
|
|
597
|
+
lines.append("CODE REVIEW REPORT")
|
|
598
|
+
lines.append("=" * 60)
|
|
599
|
+
lines.append("")
|
|
600
|
+
|
|
601
|
+
# Verdict banner
|
|
602
|
+
lines.append("-" * 60)
|
|
603
|
+
lines.append(f"{emoji} VERDICT: {result.verdict.upper().replace('_', ' ')}")
|
|
604
|
+
lines.append("-" * 60)
|
|
605
|
+
lines.append(f"Mode: {result.mode}")
|
|
606
|
+
lines.append("")
|
|
607
|
+
|
|
608
|
+
# Quality score with visual bar
|
|
609
|
+
score = result.quality_score
|
|
610
|
+
bar = "█" * int(score / 10) + "░" * (10 - int(score / 10))
|
|
611
|
+
quality_label = (
|
|
612
|
+
"EXCELLENT"
|
|
613
|
+
if score >= 90
|
|
614
|
+
else "GOOD" if score >= 70 else "NEEDS WORK" if score >= 50 else "POOR"
|
|
615
|
+
)
|
|
616
|
+
lines.append("-" * 60)
|
|
617
|
+
lines.append("QUALITY SCORE")
|
|
618
|
+
lines.append("-" * 60)
|
|
619
|
+
lines.append(f"[{bar}] {score:.0f}/100 ({quality_label})")
|
|
620
|
+
lines.append("")
|
|
621
|
+
|
|
622
|
+
# Crew summary (if available)
|
|
623
|
+
if result.crew_report and result.crew_report.get("summary"):
|
|
624
|
+
lines.append("-" * 60)
|
|
625
|
+
lines.append("SUMMARY")
|
|
626
|
+
lines.append("-" * 60)
|
|
627
|
+
summary = result.crew_report["summary"]
|
|
628
|
+
# Word wrap the summary
|
|
629
|
+
words = summary.split()
|
|
630
|
+
current_line = ""
|
|
631
|
+
for word in words:
|
|
632
|
+
if len(current_line) + len(word) + 1 <= 58:
|
|
633
|
+
current_line += (" " if current_line else "") + word
|
|
634
|
+
else:
|
|
635
|
+
lines.append(current_line)
|
|
636
|
+
current_line = word
|
|
637
|
+
if current_line:
|
|
638
|
+
lines.append(current_line)
|
|
639
|
+
lines.append("")
|
|
640
|
+
|
|
641
|
+
# Findings summary
|
|
642
|
+
total_findings = len(result.combined_findings)
|
|
643
|
+
lines.append("-" * 60)
|
|
644
|
+
lines.append("FINDINGS")
|
|
645
|
+
lines.append("-" * 60)
|
|
646
|
+
|
|
647
|
+
# Show files reviewed from metadata
|
|
648
|
+
files_reviewed = result.metadata.get("files_reviewed", 0)
|
|
649
|
+
if files_reviewed > 0:
|
|
650
|
+
lines.append(f"Files Reviewed: {files_reviewed}")
|
|
651
|
+
|
|
652
|
+
if total_findings > 0 or result.critical_count > 0 or result.high_count > 0:
|
|
653
|
+
lines.append(f"Issues Found: {total_findings}")
|
|
654
|
+
lines.append(f" 🔴 Critical: {result.critical_count}")
|
|
655
|
+
lines.append(f" 🟠 High: {result.high_count}")
|
|
656
|
+
lines.append(f" 🟡 Medium: {result.medium_count}")
|
|
657
|
+
lines.append("")
|
|
658
|
+
|
|
659
|
+
# Show top critical/high findings
|
|
660
|
+
if result.combined_findings:
|
|
661
|
+
critical_high = [
|
|
662
|
+
f for f in result.combined_findings if f.get("severity") in ("critical", "high")
|
|
663
|
+
][:5]
|
|
664
|
+
if critical_high:
|
|
665
|
+
lines.append("Top Issues:")
|
|
666
|
+
for i, finding in enumerate(critical_high, 1):
|
|
667
|
+
severity = finding.get("severity", "unknown")
|
|
668
|
+
title = finding.get(
|
|
669
|
+
"title",
|
|
670
|
+
finding.get("message", finding.get("description", "Issue found")),
|
|
671
|
+
)
|
|
672
|
+
emoji_f = "🔴" if severity == "critical" else "🟠"
|
|
673
|
+
if len(str(title)) > 50:
|
|
674
|
+
title = str(title)[:47] + "..."
|
|
675
|
+
lines.append(f" {emoji_f} {i}. {title}")
|
|
676
|
+
lines.append("")
|
|
677
|
+
else:
|
|
678
|
+
lines.append("✅ No issues found!")
|
|
679
|
+
lines.append("")
|
|
680
|
+
|
|
681
|
+
# Blockers
|
|
682
|
+
if result.blockers:
|
|
683
|
+
lines.append("-" * 60)
|
|
684
|
+
lines.append("🚫 BLOCKERS")
|
|
685
|
+
lines.append("-" * 60)
|
|
686
|
+
for blocker in result.blockers:
|
|
687
|
+
lines.append(f" • {blocker}")
|
|
688
|
+
lines.append("")
|
|
689
|
+
|
|
690
|
+
# Recommendations
|
|
691
|
+
if result.recommendations:
|
|
692
|
+
lines.append("-" * 60)
|
|
693
|
+
lines.append("RECOMMENDATIONS")
|
|
694
|
+
lines.append("-" * 60)
|
|
695
|
+
for i, rec in enumerate(result.recommendations[:5], 1):
|
|
696
|
+
rec_str = str(rec)
|
|
697
|
+
if len(rec_str) > 55:
|
|
698
|
+
rec_str = rec_str[:52] + "..."
|
|
699
|
+
lines.append(f" {i}. {rec_str}")
|
|
700
|
+
if len(result.recommendations) > 5:
|
|
701
|
+
lines.append(f" ... and {len(result.recommendations) - 5} more")
|
|
702
|
+
lines.append("")
|
|
703
|
+
|
|
704
|
+
# Agents used
|
|
705
|
+
if result.agents_used:
|
|
706
|
+
lines.append("-" * 60)
|
|
707
|
+
lines.append("AGENTS USED")
|
|
708
|
+
lines.append("-" * 60)
|
|
709
|
+
lines.append(f" {', '.join(result.agents_used)}")
|
|
710
|
+
lines.append("")
|
|
711
|
+
|
|
712
|
+
# Footer
|
|
713
|
+
lines.append("=" * 60)
|
|
714
|
+
duration_ms = result.duration_seconds * 1000
|
|
715
|
+
lines.append(f"Review completed in {duration_ms:.0f}ms | Cost: ${result.cost:.4f}")
|
|
716
|
+
lines.append("=" * 60)
|
|
717
|
+
|
|
718
|
+
return "\n".join(lines)
|
|
719
|
+
|
|
720
|
+
|
|
721
|
+
if __name__ == "__main__":
|
|
722
|
+
main()
|