attune-ai 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/__init__.py +358 -0
- attune/adaptive/__init__.py +13 -0
- attune/adaptive/task_complexity.py +127 -0
- attune/agent_monitoring.py +414 -0
- attune/cache/__init__.py +117 -0
- attune/cache/base.py +166 -0
- attune/cache/dependency_manager.py +256 -0
- attune/cache/hash_only.py +251 -0
- attune/cache/hybrid.py +457 -0
- attune/cache/storage.py +285 -0
- attune/cache_monitor.py +356 -0
- attune/cache_stats.py +298 -0
- attune/cli/__init__.py +152 -0
- attune/cli/__main__.py +12 -0
- attune/cli/commands/__init__.py +1 -0
- attune/cli/commands/batch.py +264 -0
- attune/cli/commands/cache.py +248 -0
- attune/cli/commands/help.py +331 -0
- attune/cli/commands/info.py +140 -0
- attune/cli/commands/inspect.py +436 -0
- attune/cli/commands/inspection.py +57 -0
- attune/cli/commands/memory.py +48 -0
- attune/cli/commands/metrics.py +92 -0
- attune/cli/commands/orchestrate.py +184 -0
- attune/cli/commands/patterns.py +207 -0
- attune/cli/commands/profiling.py +202 -0
- attune/cli/commands/provider.py +98 -0
- attune/cli/commands/routing.py +285 -0
- attune/cli/commands/setup.py +96 -0
- attune/cli/commands/status.py +235 -0
- attune/cli/commands/sync.py +166 -0
- attune/cli/commands/tier.py +121 -0
- attune/cli/commands/utilities.py +114 -0
- attune/cli/commands/workflow.py +579 -0
- attune/cli/core.py +32 -0
- attune/cli/parsers/__init__.py +68 -0
- attune/cli/parsers/batch.py +118 -0
- attune/cli/parsers/cache.py +65 -0
- attune/cli/parsers/help.py +41 -0
- attune/cli/parsers/info.py +26 -0
- attune/cli/parsers/inspect.py +66 -0
- attune/cli/parsers/metrics.py +42 -0
- attune/cli/parsers/orchestrate.py +61 -0
- attune/cli/parsers/patterns.py +54 -0
- attune/cli/parsers/provider.py +40 -0
- attune/cli/parsers/routing.py +110 -0
- attune/cli/parsers/setup.py +42 -0
- attune/cli/parsers/status.py +47 -0
- attune/cli/parsers/sync.py +31 -0
- attune/cli/parsers/tier.py +33 -0
- attune/cli/parsers/workflow.py +77 -0
- attune/cli/utils/__init__.py +1 -0
- attune/cli/utils/data.py +242 -0
- attune/cli/utils/helpers.py +68 -0
- attune/cli_legacy.py +3957 -0
- attune/cli_minimal.py +1159 -0
- attune/cli_router.py +437 -0
- attune/cli_unified.py +814 -0
- attune/config/__init__.py +66 -0
- attune/config/xml_config.py +286 -0
- attune/config.py +545 -0
- attune/coordination.py +870 -0
- attune/core.py +1511 -0
- attune/core_modules/__init__.py +15 -0
- attune/cost_tracker.py +626 -0
- attune/dashboard/__init__.py +41 -0
- attune/dashboard/app.py +512 -0
- attune/dashboard/simple_server.py +435 -0
- attune/dashboard/standalone_server.py +547 -0
- attune/discovery.py +306 -0
- attune/emergence.py +306 -0
- attune/exceptions.py +123 -0
- attune/feedback_loops.py +373 -0
- attune/hot_reload/README.md +473 -0
- attune/hot_reload/__init__.py +62 -0
- attune/hot_reload/config.py +83 -0
- attune/hot_reload/integration.py +229 -0
- attune/hot_reload/reloader.py +298 -0
- attune/hot_reload/watcher.py +183 -0
- attune/hot_reload/websocket.py +177 -0
- attune/levels.py +577 -0
- attune/leverage_points.py +441 -0
- attune/logging_config.py +261 -0
- attune/mcp/__init__.py +10 -0
- attune/mcp/server.py +506 -0
- attune/memory/__init__.py +237 -0
- attune/memory/claude_memory.py +469 -0
- attune/memory/config.py +224 -0
- attune/memory/control_panel.py +1290 -0
- attune/memory/control_panel_support.py +145 -0
- attune/memory/cross_session.py +845 -0
- attune/memory/edges.py +179 -0
- attune/memory/encryption.py +159 -0
- attune/memory/file_session.py +770 -0
- attune/memory/graph.py +570 -0
- attune/memory/long_term.py +913 -0
- attune/memory/long_term_types.py +99 -0
- attune/memory/mixins/__init__.py +25 -0
- attune/memory/mixins/backend_init_mixin.py +249 -0
- attune/memory/mixins/capabilities_mixin.py +208 -0
- attune/memory/mixins/handoff_mixin.py +208 -0
- attune/memory/mixins/lifecycle_mixin.py +49 -0
- attune/memory/mixins/long_term_mixin.py +352 -0
- attune/memory/mixins/promotion_mixin.py +109 -0
- attune/memory/mixins/short_term_mixin.py +182 -0
- attune/memory/nodes.py +179 -0
- attune/memory/redis_bootstrap.py +540 -0
- attune/memory/security/__init__.py +31 -0
- attune/memory/security/audit_logger.py +932 -0
- attune/memory/security/pii_scrubber.py +640 -0
- attune/memory/security/secrets_detector.py +678 -0
- attune/memory/short_term.py +2192 -0
- attune/memory/simple_storage.py +302 -0
- attune/memory/storage/__init__.py +15 -0
- attune/memory/storage_backend.py +167 -0
- attune/memory/summary_index.py +583 -0
- attune/memory/types.py +446 -0
- attune/memory/unified.py +182 -0
- attune/meta_workflows/__init__.py +74 -0
- attune/meta_workflows/agent_creator.py +248 -0
- attune/meta_workflows/builtin_templates.py +567 -0
- attune/meta_workflows/cli_commands/__init__.py +56 -0
- attune/meta_workflows/cli_commands/agent_commands.py +321 -0
- attune/meta_workflows/cli_commands/analytics_commands.py +442 -0
- attune/meta_workflows/cli_commands/config_commands.py +232 -0
- attune/meta_workflows/cli_commands/memory_commands.py +182 -0
- attune/meta_workflows/cli_commands/template_commands.py +354 -0
- attune/meta_workflows/cli_commands/workflow_commands.py +382 -0
- attune/meta_workflows/cli_meta_workflows.py +59 -0
- attune/meta_workflows/form_engine.py +292 -0
- attune/meta_workflows/intent_detector.py +409 -0
- attune/meta_workflows/models.py +569 -0
- attune/meta_workflows/pattern_learner.py +738 -0
- attune/meta_workflows/plan_generator.py +384 -0
- attune/meta_workflows/session_context.py +397 -0
- attune/meta_workflows/template_registry.py +229 -0
- attune/meta_workflows/workflow.py +984 -0
- attune/metrics/__init__.py +12 -0
- attune/metrics/collector.py +31 -0
- attune/metrics/prompt_metrics.py +194 -0
- attune/models/__init__.py +172 -0
- attune/models/__main__.py +13 -0
- attune/models/adaptive_routing.py +437 -0
- attune/models/auth_cli.py +444 -0
- attune/models/auth_strategy.py +450 -0
- attune/models/cli.py +655 -0
- attune/models/empathy_executor.py +354 -0
- attune/models/executor.py +257 -0
- attune/models/fallback.py +762 -0
- attune/models/provider_config.py +282 -0
- attune/models/registry.py +472 -0
- attune/models/tasks.py +359 -0
- attune/models/telemetry/__init__.py +71 -0
- attune/models/telemetry/analytics.py +594 -0
- attune/models/telemetry/backend.py +196 -0
- attune/models/telemetry/data_models.py +431 -0
- attune/models/telemetry/storage.py +489 -0
- attune/models/token_estimator.py +420 -0
- attune/models/validation.py +280 -0
- attune/monitoring/__init__.py +52 -0
- attune/monitoring/alerts.py +946 -0
- attune/monitoring/alerts_cli.py +448 -0
- attune/monitoring/multi_backend.py +271 -0
- attune/monitoring/otel_backend.py +362 -0
- attune/optimization/__init__.py +19 -0
- attune/optimization/context_optimizer.py +272 -0
- attune/orchestration/__init__.py +67 -0
- attune/orchestration/agent_templates.py +707 -0
- attune/orchestration/config_store.py +499 -0
- attune/orchestration/execution_strategies.py +2111 -0
- attune/orchestration/meta_orchestrator.py +1168 -0
- attune/orchestration/pattern_learner.py +696 -0
- attune/orchestration/real_tools.py +931 -0
- attune/pattern_cache.py +187 -0
- attune/pattern_library.py +542 -0
- attune/patterns/debugging/all_patterns.json +81 -0
- attune/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- attune/patterns/refactoring_memory.json +89 -0
- attune/persistence.py +564 -0
- attune/platform_utils.py +265 -0
- attune/plugins/__init__.py +28 -0
- attune/plugins/base.py +361 -0
- attune/plugins/registry.py +268 -0
- attune/project_index/__init__.py +32 -0
- attune/project_index/cli.py +335 -0
- attune/project_index/index.py +667 -0
- attune/project_index/models.py +504 -0
- attune/project_index/reports.py +474 -0
- attune/project_index/scanner.py +777 -0
- attune/project_index/scanner_parallel.py +291 -0
- attune/prompts/__init__.py +61 -0
- attune/prompts/config.py +77 -0
- attune/prompts/context.py +177 -0
- attune/prompts/parser.py +285 -0
- attune/prompts/registry.py +313 -0
- attune/prompts/templates.py +208 -0
- attune/redis_config.py +302 -0
- attune/redis_memory.py +799 -0
- attune/resilience/__init__.py +56 -0
- attune/resilience/circuit_breaker.py +256 -0
- attune/resilience/fallback.py +179 -0
- attune/resilience/health.py +300 -0
- attune/resilience/retry.py +209 -0
- attune/resilience/timeout.py +135 -0
- attune/routing/__init__.py +43 -0
- attune/routing/chain_executor.py +433 -0
- attune/routing/classifier.py +217 -0
- attune/routing/smart_router.py +234 -0
- attune/routing/workflow_registry.py +343 -0
- attune/scaffolding/README.md +589 -0
- attune/scaffolding/__init__.py +35 -0
- attune/scaffolding/__main__.py +14 -0
- attune/scaffolding/cli.py +240 -0
- attune/scaffolding/templates/base_wizard.py.jinja2 +121 -0
- attune/scaffolding/templates/coach_wizard.py.jinja2 +321 -0
- attune/scaffolding/templates/domain_wizard.py.jinja2 +408 -0
- attune/scaffolding/templates/linear_flow_wizard.py.jinja2 +203 -0
- attune/socratic/__init__.py +256 -0
- attune/socratic/ab_testing.py +958 -0
- attune/socratic/blueprint.py +533 -0
- attune/socratic/cli.py +703 -0
- attune/socratic/collaboration.py +1114 -0
- attune/socratic/domain_templates.py +924 -0
- attune/socratic/embeddings.py +738 -0
- attune/socratic/engine.py +794 -0
- attune/socratic/explainer.py +682 -0
- attune/socratic/feedback.py +772 -0
- attune/socratic/forms.py +629 -0
- attune/socratic/generator.py +732 -0
- attune/socratic/llm_analyzer.py +637 -0
- attune/socratic/mcp_server.py +702 -0
- attune/socratic/session.py +312 -0
- attune/socratic/storage.py +667 -0
- attune/socratic/success.py +730 -0
- attune/socratic/visual_editor.py +860 -0
- attune/socratic/web_ui.py +958 -0
- attune/telemetry/__init__.py +39 -0
- attune/telemetry/agent_coordination.py +475 -0
- attune/telemetry/agent_tracking.py +367 -0
- attune/telemetry/approval_gates.py +545 -0
- attune/telemetry/cli.py +1231 -0
- attune/telemetry/commands/__init__.py +14 -0
- attune/telemetry/commands/dashboard_commands.py +696 -0
- attune/telemetry/event_streaming.py +409 -0
- attune/telemetry/feedback_loop.py +567 -0
- attune/telemetry/usage_tracker.py +591 -0
- attune/templates.py +754 -0
- attune/test_generator/__init__.py +38 -0
- attune/test_generator/__main__.py +14 -0
- attune/test_generator/cli.py +234 -0
- attune/test_generator/generator.py +355 -0
- attune/test_generator/risk_analyzer.py +216 -0
- attune/test_generator/templates/unit_test.py.jinja2 +272 -0
- attune/tier_recommender.py +384 -0
- attune/tools.py +183 -0
- attune/trust/__init__.py +28 -0
- attune/trust/circuit_breaker.py +579 -0
- attune/trust_building.py +527 -0
- attune/validation/__init__.py +19 -0
- attune/validation/xml_validator.py +281 -0
- attune/vscode_bridge.py +173 -0
- attune/workflow_commands.py +780 -0
- attune/workflow_patterns/__init__.py +33 -0
- attune/workflow_patterns/behavior.py +249 -0
- attune/workflow_patterns/core.py +76 -0
- attune/workflow_patterns/output.py +99 -0
- attune/workflow_patterns/registry.py +255 -0
- attune/workflow_patterns/structural.py +288 -0
- attune/workflows/__init__.py +539 -0
- attune/workflows/autonomous_test_gen.py +1268 -0
- attune/workflows/base.py +2667 -0
- attune/workflows/batch_processing.py +342 -0
- attune/workflows/bug_predict.py +1084 -0
- attune/workflows/builder.py +273 -0
- attune/workflows/caching.py +253 -0
- attune/workflows/code_review.py +1048 -0
- attune/workflows/code_review_adapters.py +312 -0
- attune/workflows/code_review_pipeline.py +722 -0
- attune/workflows/config.py +645 -0
- attune/workflows/dependency_check.py +644 -0
- attune/workflows/document_gen/__init__.py +25 -0
- attune/workflows/document_gen/config.py +30 -0
- attune/workflows/document_gen/report_formatter.py +162 -0
- attune/workflows/document_gen/workflow.py +1426 -0
- attune/workflows/document_manager.py +216 -0
- attune/workflows/document_manager_README.md +134 -0
- attune/workflows/documentation_orchestrator.py +1205 -0
- attune/workflows/history.py +510 -0
- attune/workflows/keyboard_shortcuts/__init__.py +39 -0
- attune/workflows/keyboard_shortcuts/generators.py +391 -0
- attune/workflows/keyboard_shortcuts/parsers.py +416 -0
- attune/workflows/keyboard_shortcuts/prompts.py +295 -0
- attune/workflows/keyboard_shortcuts/schema.py +193 -0
- attune/workflows/keyboard_shortcuts/workflow.py +509 -0
- attune/workflows/llm_base.py +363 -0
- attune/workflows/manage_docs.py +87 -0
- attune/workflows/manage_docs_README.md +134 -0
- attune/workflows/manage_documentation.py +821 -0
- attune/workflows/new_sample_workflow1.py +149 -0
- attune/workflows/new_sample_workflow1_README.md +150 -0
- attune/workflows/orchestrated_health_check.py +849 -0
- attune/workflows/orchestrated_release_prep.py +600 -0
- attune/workflows/output.py +413 -0
- attune/workflows/perf_audit.py +863 -0
- attune/workflows/pr_review.py +762 -0
- attune/workflows/progress.py +785 -0
- attune/workflows/progress_server.py +322 -0
- attune/workflows/progressive/README 2.md +454 -0
- attune/workflows/progressive/README.md +454 -0
- attune/workflows/progressive/__init__.py +82 -0
- attune/workflows/progressive/cli.py +219 -0
- attune/workflows/progressive/core.py +488 -0
- attune/workflows/progressive/orchestrator.py +723 -0
- attune/workflows/progressive/reports.py +520 -0
- attune/workflows/progressive/telemetry.py +274 -0
- attune/workflows/progressive/test_gen.py +495 -0
- attune/workflows/progressive/workflow.py +589 -0
- attune/workflows/refactor_plan.py +694 -0
- attune/workflows/release_prep.py +895 -0
- attune/workflows/release_prep_crew.py +969 -0
- attune/workflows/research_synthesis.py +404 -0
- attune/workflows/routing.py +168 -0
- attune/workflows/secure_release.py +593 -0
- attune/workflows/security_adapters.py +297 -0
- attune/workflows/security_audit.py +1329 -0
- attune/workflows/security_audit_phase3.py +355 -0
- attune/workflows/seo_optimization.py +633 -0
- attune/workflows/step_config.py +234 -0
- attune/workflows/telemetry_mixin.py +269 -0
- attune/workflows/test5.py +125 -0
- attune/workflows/test5_README.md +158 -0
- attune/workflows/test_coverage_boost_crew.py +849 -0
- attune/workflows/test_gen/__init__.py +52 -0
- attune/workflows/test_gen/ast_analyzer.py +249 -0
- attune/workflows/test_gen/config.py +88 -0
- attune/workflows/test_gen/data_models.py +38 -0
- attune/workflows/test_gen/report_formatter.py +289 -0
- attune/workflows/test_gen/test_templates.py +381 -0
- attune/workflows/test_gen/workflow.py +655 -0
- attune/workflows/test_gen.py +54 -0
- attune/workflows/test_gen_behavioral.py +477 -0
- attune/workflows/test_gen_parallel.py +341 -0
- attune/workflows/test_lifecycle.py +526 -0
- attune/workflows/test_maintenance.py +627 -0
- attune/workflows/test_maintenance_cli.py +590 -0
- attune/workflows/test_maintenance_crew.py +840 -0
- attune/workflows/test_runner.py +622 -0
- attune/workflows/tier_tracking.py +531 -0
- attune/workflows/xml_enhanced_crew.py +285 -0
- attune_ai-2.0.0.dist-info/METADATA +1026 -0
- attune_ai-2.0.0.dist-info/RECORD +457 -0
- attune_ai-2.0.0.dist-info/WHEEL +5 -0
- attune_ai-2.0.0.dist-info/entry_points.txt +26 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE +201 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- attune_ai-2.0.0.dist-info/top_level.txt +5 -0
- attune_healthcare/__init__.py +13 -0
- attune_healthcare/monitors/__init__.py +9 -0
- attune_healthcare/monitors/clinical_protocol_monitor.py +315 -0
- attune_healthcare/monitors/monitoring/__init__.py +44 -0
- attune_healthcare/monitors/monitoring/protocol_checker.py +300 -0
- attune_healthcare/monitors/monitoring/protocol_loader.py +214 -0
- attune_healthcare/monitors/monitoring/sensor_parsers.py +306 -0
- attune_healthcare/monitors/monitoring/trajectory_analyzer.py +389 -0
- attune_llm/README.md +553 -0
- attune_llm/__init__.py +28 -0
- attune_llm/agent_factory/__init__.py +53 -0
- attune_llm/agent_factory/adapters/__init__.py +85 -0
- attune_llm/agent_factory/adapters/autogen_adapter.py +312 -0
- attune_llm/agent_factory/adapters/crewai_adapter.py +483 -0
- attune_llm/agent_factory/adapters/haystack_adapter.py +298 -0
- attune_llm/agent_factory/adapters/langchain_adapter.py +362 -0
- attune_llm/agent_factory/adapters/langgraph_adapter.py +333 -0
- attune_llm/agent_factory/adapters/native.py +228 -0
- attune_llm/agent_factory/adapters/wizard_adapter.py +423 -0
- attune_llm/agent_factory/base.py +305 -0
- attune_llm/agent_factory/crews/__init__.py +67 -0
- attune_llm/agent_factory/crews/code_review.py +1113 -0
- attune_llm/agent_factory/crews/health_check.py +1262 -0
- attune_llm/agent_factory/crews/refactoring.py +1128 -0
- attune_llm/agent_factory/crews/security_audit.py +1018 -0
- attune_llm/agent_factory/decorators.py +287 -0
- attune_llm/agent_factory/factory.py +558 -0
- attune_llm/agent_factory/framework.py +193 -0
- attune_llm/agent_factory/memory_integration.py +328 -0
- attune_llm/agent_factory/resilient.py +320 -0
- attune_llm/agents_md/__init__.py +22 -0
- attune_llm/agents_md/loader.py +218 -0
- attune_llm/agents_md/parser.py +271 -0
- attune_llm/agents_md/registry.py +307 -0
- attune_llm/claude_memory.py +466 -0
- attune_llm/cli/__init__.py +8 -0
- attune_llm/cli/sync_claude.py +487 -0
- attune_llm/code_health.py +1313 -0
- attune_llm/commands/__init__.py +51 -0
- attune_llm/commands/context.py +375 -0
- attune_llm/commands/loader.py +301 -0
- attune_llm/commands/models.py +231 -0
- attune_llm/commands/parser.py +371 -0
- attune_llm/commands/registry.py +429 -0
- attune_llm/config/__init__.py +29 -0
- attune_llm/config/unified.py +291 -0
- attune_llm/context/__init__.py +22 -0
- attune_llm/context/compaction.py +455 -0
- attune_llm/context/manager.py +434 -0
- attune_llm/contextual_patterns.py +361 -0
- attune_llm/core.py +907 -0
- attune_llm/git_pattern_extractor.py +435 -0
- attune_llm/hooks/__init__.py +24 -0
- attune_llm/hooks/config.py +306 -0
- attune_llm/hooks/executor.py +289 -0
- attune_llm/hooks/registry.py +302 -0
- attune_llm/hooks/scripts/__init__.py +39 -0
- attune_llm/hooks/scripts/evaluate_session.py +201 -0
- attune_llm/hooks/scripts/first_time_init.py +285 -0
- attune_llm/hooks/scripts/pre_compact.py +207 -0
- attune_llm/hooks/scripts/session_end.py +183 -0
- attune_llm/hooks/scripts/session_start.py +163 -0
- attune_llm/hooks/scripts/suggest_compact.py +225 -0
- attune_llm/learning/__init__.py +30 -0
- attune_llm/learning/evaluator.py +438 -0
- attune_llm/learning/extractor.py +514 -0
- attune_llm/learning/storage.py +560 -0
- attune_llm/levels.py +227 -0
- attune_llm/pattern_confidence.py +414 -0
- attune_llm/pattern_resolver.py +272 -0
- attune_llm/pattern_summary.py +350 -0
- attune_llm/providers.py +967 -0
- attune_llm/routing/__init__.py +32 -0
- attune_llm/routing/model_router.py +362 -0
- attune_llm/security/IMPLEMENTATION_SUMMARY.md +413 -0
- attune_llm/security/PHASE2_COMPLETE.md +384 -0
- attune_llm/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- attune_llm/security/QUICK_REFERENCE.md +316 -0
- attune_llm/security/README.md +262 -0
- attune_llm/security/__init__.py +62 -0
- attune_llm/security/audit_logger.py +929 -0
- attune_llm/security/audit_logger_example.py +152 -0
- attune_llm/security/pii_scrubber.py +640 -0
- attune_llm/security/secrets_detector.py +678 -0
- attune_llm/security/secrets_detector_example.py +304 -0
- attune_llm/security/secure_memdocs.py +1192 -0
- attune_llm/security/secure_memdocs_example.py +278 -0
- attune_llm/session_status.py +745 -0
- attune_llm/state.py +246 -0
- attune_llm/utils/__init__.py +5 -0
- attune_llm/utils/tokens.py +349 -0
- attune_software/SOFTWARE_PLUGIN_README.md +57 -0
- attune_software/__init__.py +13 -0
- attune_software/cli/__init__.py +120 -0
- attune_software/cli/inspect.py +362 -0
- attune_software/cli.py +574 -0
- attune_software/plugin.py +188 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
|
@@ -0,0 +1,840 @@
|
|
|
1
|
+
"""Test Maintenance Crew - CrewAI-Based Automated Test Management
|
|
2
|
+
|
|
3
|
+
.. deprecated:: 4.3.0
|
|
4
|
+
This workflow is deprecated in favor of the meta-workflow system.
|
|
5
|
+
Use ``empathy meta-workflow run test-maintenance`` instead.
|
|
6
|
+
See docs/CREWAI_MIGRATION.md for migration guide.
|
|
7
|
+
|
|
8
|
+
A crew of specialized agents that collaboratively manage the test lifecycle:
|
|
9
|
+
- Test Analyst: Analyzes coverage gaps and prioritizes work
|
|
10
|
+
- Test Generator: Creates new tests using LLM
|
|
11
|
+
- Test Validator: Verifies generated tests work correctly
|
|
12
|
+
- Test Reporter: Generates status reports and recommendations
|
|
13
|
+
|
|
14
|
+
The crew can operate autonomously on a schedule or be triggered by events.
|
|
15
|
+
|
|
16
|
+
Copyright 2025 Smart AI Memory, LLC
|
|
17
|
+
Licensed under Fair Source 0.9
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
import heapq
|
|
21
|
+
import logging
|
|
22
|
+
import warnings
|
|
23
|
+
from dataclasses import dataclass, field
|
|
24
|
+
from datetime import datetime
|
|
25
|
+
from pathlib import Path
|
|
26
|
+
from typing import Any
|
|
27
|
+
|
|
28
|
+
from ..project_index import ProjectIndex
|
|
29
|
+
from ..project_index.reports import ReportGenerator
|
|
30
|
+
from .test_maintenance import TestAction, TestMaintenanceWorkflow, TestPlanItem, TestPriority
|
|
31
|
+
|
|
32
|
+
logger = logging.getLogger(__name__)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@dataclass
|
|
36
|
+
class AgentResult:
|
|
37
|
+
"""Result from an agent's work."""
|
|
38
|
+
|
|
39
|
+
agent: str
|
|
40
|
+
task: str
|
|
41
|
+
success: bool
|
|
42
|
+
output: dict[str, Any]
|
|
43
|
+
duration_ms: int = 0
|
|
44
|
+
timestamp: datetime = field(default_factory=datetime.now)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
@dataclass
|
|
48
|
+
class CrewConfig:
|
|
49
|
+
"""Configuration for the test maintenance crew."""
|
|
50
|
+
|
|
51
|
+
# Agent settings
|
|
52
|
+
enable_auto_generation: bool = True
|
|
53
|
+
enable_auto_validation: bool = True
|
|
54
|
+
max_files_per_run: int = 10
|
|
55
|
+
|
|
56
|
+
# Thresholds
|
|
57
|
+
min_coverage_target: float = 80.0
|
|
58
|
+
staleness_threshold_days: int = 7
|
|
59
|
+
high_impact_threshold: float = 5.0
|
|
60
|
+
|
|
61
|
+
# Scheduling
|
|
62
|
+
auto_run_interval_hours: int = 24
|
|
63
|
+
run_on_commit: bool = True
|
|
64
|
+
|
|
65
|
+
# LLM settings
|
|
66
|
+
test_gen_model: str = "sonnet"
|
|
67
|
+
validation_model: str = "haiku"
|
|
68
|
+
|
|
69
|
+
# Validation settings
|
|
70
|
+
validation_timeout_seconds: int = 120 # Per-file timeout
|
|
71
|
+
validation_optional: bool = True # Don't fail crew if validation fails
|
|
72
|
+
skip_validation_on_timeout: bool = True # Continue on timeout
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class TestAnalystAgent:
|
|
76
|
+
"""Analyzes test coverage and prioritizes work.
|
|
77
|
+
|
|
78
|
+
Responsibilities:
|
|
79
|
+
- Identify files needing tests
|
|
80
|
+
- Calculate priority based on impact
|
|
81
|
+
- Generate maintenance plans
|
|
82
|
+
- Track test health metrics
|
|
83
|
+
"""
|
|
84
|
+
|
|
85
|
+
def __init__(self, index: ProjectIndex, config: CrewConfig):
|
|
86
|
+
self.index = index
|
|
87
|
+
self.config = config
|
|
88
|
+
self.name = "Test Analyst"
|
|
89
|
+
|
|
90
|
+
async def analyze_coverage_gaps(self) -> AgentResult:
|
|
91
|
+
"""Identify files with coverage gaps."""
|
|
92
|
+
start = datetime.now()
|
|
93
|
+
|
|
94
|
+
files_needing_tests = self.index.get_files_needing_tests()
|
|
95
|
+
high_impact = [
|
|
96
|
+
f for f in files_needing_tests if f.impact_score >= self.config.high_impact_threshold
|
|
97
|
+
]
|
|
98
|
+
|
|
99
|
+
output = {
|
|
100
|
+
"total_gaps": len(files_needing_tests),
|
|
101
|
+
"high_impact_gaps": len(high_impact),
|
|
102
|
+
"priority_files": [
|
|
103
|
+
{
|
|
104
|
+
"path": f.path,
|
|
105
|
+
"impact": f.impact_score,
|
|
106
|
+
"loc": f.lines_of_code,
|
|
107
|
+
}
|
|
108
|
+
for f in heapq.nlargest(10, high_impact, key=lambda x: x.impact_score)
|
|
109
|
+
],
|
|
110
|
+
"recommendation": self._generate_recommendation(files_needing_tests, high_impact),
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
duration = int((datetime.now() - start).total_seconds() * 1000)
|
|
114
|
+
|
|
115
|
+
return AgentResult(
|
|
116
|
+
agent=self.name,
|
|
117
|
+
task="analyze_coverage_gaps",
|
|
118
|
+
success=True,
|
|
119
|
+
output=output,
|
|
120
|
+
duration_ms=duration,
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
async def analyze_staleness(self) -> AgentResult:
|
|
124
|
+
"""Identify files with stale tests."""
|
|
125
|
+
start = datetime.now()
|
|
126
|
+
|
|
127
|
+
stale_files = self.index.get_stale_files()
|
|
128
|
+
|
|
129
|
+
output = {
|
|
130
|
+
"stale_count": len(stale_files),
|
|
131
|
+
"avg_staleness_days": (
|
|
132
|
+
sum(f.staleness_days for f in stale_files) / len(stale_files) if stale_files else 0
|
|
133
|
+
),
|
|
134
|
+
"stale_files": [
|
|
135
|
+
{
|
|
136
|
+
"path": f.path,
|
|
137
|
+
"staleness_days": f.staleness_days,
|
|
138
|
+
"test_file": f.test_file_path,
|
|
139
|
+
}
|
|
140
|
+
for f in heapq.nlargest(10, stale_files, key=lambda x: x.staleness_days)
|
|
141
|
+
],
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
duration = int((datetime.now() - start).total_seconds() * 1000)
|
|
145
|
+
|
|
146
|
+
return AgentResult(
|
|
147
|
+
agent=self.name,
|
|
148
|
+
task="analyze_staleness",
|
|
149
|
+
success=True,
|
|
150
|
+
output=output,
|
|
151
|
+
duration_ms=duration,
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
async def generate_plan(self) -> AgentResult:
|
|
155
|
+
"""Generate a prioritized maintenance plan."""
|
|
156
|
+
start = datetime.now()
|
|
157
|
+
|
|
158
|
+
workflow = TestMaintenanceWorkflow(str(self.index.project_root), self.index)
|
|
159
|
+
result = await workflow.run(
|
|
160
|
+
{
|
|
161
|
+
"mode": "analyze",
|
|
162
|
+
"max_items": self.config.max_files_per_run,
|
|
163
|
+
},
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
duration = int((datetime.now() - start).total_seconds() * 1000)
|
|
167
|
+
|
|
168
|
+
return AgentResult(
|
|
169
|
+
agent=self.name,
|
|
170
|
+
task="generate_plan",
|
|
171
|
+
success=True,
|
|
172
|
+
output=result,
|
|
173
|
+
duration_ms=duration,
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
def _generate_recommendation(self, all_gaps: list, high_impact: list) -> str:
|
|
177
|
+
"""Generate actionable recommendation."""
|
|
178
|
+
if len(high_impact) > 5:
|
|
179
|
+
return f"URGENT: {len(high_impact)} high-impact files need tests. Start with the top 5."
|
|
180
|
+
if len(high_impact) > 0:
|
|
181
|
+
return f"Prioritize {len(high_impact)} high-impact files before addressing remaining {len(all_gaps) - len(high_impact)} gaps."
|
|
182
|
+
if len(all_gaps) > 20:
|
|
183
|
+
return f"Consider batch test generation for {len(all_gaps)} files."
|
|
184
|
+
if len(all_gaps) > 0:
|
|
185
|
+
return f"Address {len(all_gaps)} remaining test gaps to improve coverage."
|
|
186
|
+
return "Excellent! All files requiring tests have coverage."
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
class TestGeneratorAgent:
|
|
190
|
+
"""Generates tests for source files.
|
|
191
|
+
|
|
192
|
+
Responsibilities:
|
|
193
|
+
- Read source file and understand its structure
|
|
194
|
+
- Generate appropriate test cases
|
|
195
|
+
- Follow project testing patterns
|
|
196
|
+
- Write test files to correct location
|
|
197
|
+
"""
|
|
198
|
+
|
|
199
|
+
def __init__(self, project_root: Path, index: ProjectIndex, config: CrewConfig):
|
|
200
|
+
self.project_root = project_root
|
|
201
|
+
self.index = index
|
|
202
|
+
self.config = config
|
|
203
|
+
self.name = "Test Generator"
|
|
204
|
+
|
|
205
|
+
async def generate_tests(self, plan_items: list[TestPlanItem]) -> AgentResult:
|
|
206
|
+
"""Generate tests for files in the plan."""
|
|
207
|
+
start = datetime.now()
|
|
208
|
+
|
|
209
|
+
results = []
|
|
210
|
+
succeeded = 0
|
|
211
|
+
failed = 0
|
|
212
|
+
|
|
213
|
+
for item in plan_items:
|
|
214
|
+
if item.action != TestAction.CREATE:
|
|
215
|
+
continue
|
|
216
|
+
|
|
217
|
+
try:
|
|
218
|
+
result = await self._generate_test_for_file(item)
|
|
219
|
+
results.append(result)
|
|
220
|
+
if result["success"]:
|
|
221
|
+
succeeded += 1
|
|
222
|
+
else:
|
|
223
|
+
failed += 1
|
|
224
|
+
except Exception as e:
|
|
225
|
+
logger.error(f"Failed to generate tests for {item.file_path}: {e}")
|
|
226
|
+
failed += 1
|
|
227
|
+
results.append(
|
|
228
|
+
{
|
|
229
|
+
"file": item.file_path,
|
|
230
|
+
"success": False,
|
|
231
|
+
"error": str(e),
|
|
232
|
+
},
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
duration = int((datetime.now() - start).total_seconds() * 1000)
|
|
236
|
+
|
|
237
|
+
return AgentResult(
|
|
238
|
+
agent=self.name,
|
|
239
|
+
task="generate_tests",
|
|
240
|
+
success=failed == 0,
|
|
241
|
+
output={
|
|
242
|
+
"processed": len(results),
|
|
243
|
+
"succeeded": succeeded,
|
|
244
|
+
"failed": failed,
|
|
245
|
+
"results": results,
|
|
246
|
+
},
|
|
247
|
+
duration_ms=duration,
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
async def _generate_test_for_file(self, item: TestPlanItem) -> dict[str, Any]:
|
|
251
|
+
"""Generate tests for a single file."""
|
|
252
|
+
source_path = self.project_root / item.file_path
|
|
253
|
+
|
|
254
|
+
if not source_path.exists():
|
|
255
|
+
return {
|
|
256
|
+
"file": item.file_path,
|
|
257
|
+
"success": False,
|
|
258
|
+
"error": "Source file not found",
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
# Determine test file path
|
|
262
|
+
test_file_path = self._determine_test_path(item.file_path)
|
|
263
|
+
full_test_path = self.project_root / test_file_path
|
|
264
|
+
|
|
265
|
+
# Skip if test file already exists and has content
|
|
266
|
+
if full_test_path.exists():
|
|
267
|
+
existing_content = full_test_path.read_text(encoding="utf-8")
|
|
268
|
+
# Only skip if file has real tests (not just placeholder)
|
|
269
|
+
if "def test_" in existing_content and "assert True # Replace" not in existing_content:
|
|
270
|
+
return {
|
|
271
|
+
"file": item.file_path,
|
|
272
|
+
"test_file": test_file_path,
|
|
273
|
+
"success": True,
|
|
274
|
+
"skipped": True,
|
|
275
|
+
"reason": "Test file already exists with real tests",
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
# Read source file
|
|
279
|
+
try:
|
|
280
|
+
source_code = source_path.read_text(encoding="utf-8")
|
|
281
|
+
except Exception as e:
|
|
282
|
+
return {
|
|
283
|
+
"file": item.file_path,
|
|
284
|
+
"success": False,
|
|
285
|
+
"error": f"Failed to read source: {e}",
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
# Generate test code (placeholder - would use LLM)
|
|
289
|
+
test_code = self._generate_test_code(item.file_path, source_code, item.metadata)
|
|
290
|
+
|
|
291
|
+
# Write test file
|
|
292
|
+
try:
|
|
293
|
+
full_test_path.parent.mkdir(parents=True, exist_ok=True)
|
|
294
|
+
full_test_path.write_text(test_code, encoding="utf-8")
|
|
295
|
+
except Exception as e:
|
|
296
|
+
return {
|
|
297
|
+
"file": item.file_path,
|
|
298
|
+
"success": False,
|
|
299
|
+
"error": f"Failed to write test file: {e}",
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
# Update index
|
|
303
|
+
self.index.update_file(
|
|
304
|
+
item.file_path,
|
|
305
|
+
tests_exist=True,
|
|
306
|
+
test_file_path=test_file_path,
|
|
307
|
+
tests_last_modified=datetime.now(),
|
|
308
|
+
is_stale=False,
|
|
309
|
+
staleness_days=0,
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
return {
|
|
313
|
+
"file": item.file_path,
|
|
314
|
+
"test_file": test_file_path,
|
|
315
|
+
"success": True,
|
|
316
|
+
"lines_generated": len(test_code.split("\n")),
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
def _determine_test_path(self, source_path: str) -> str:
|
|
320
|
+
"""Determine the test file path for a source file."""
|
|
321
|
+
path = Path(source_path)
|
|
322
|
+
|
|
323
|
+
# Standard pattern: src/module/file.py -> tests/test_file.py
|
|
324
|
+
if path.parts[0] == "src":
|
|
325
|
+
test_name = f"test_{path.stem}.py"
|
|
326
|
+
return f"tests/{test_name}"
|
|
327
|
+
|
|
328
|
+
# Module in root: module/file.py -> tests/test_file.py
|
|
329
|
+
test_name = f"test_{path.stem}.py"
|
|
330
|
+
return f"tests/{test_name}"
|
|
331
|
+
|
|
332
|
+
def _generate_test_code(
|
|
333
|
+
self,
|
|
334
|
+
source_path: str,
|
|
335
|
+
source_code: str,
|
|
336
|
+
metadata: dict[str, Any],
|
|
337
|
+
) -> str:
|
|
338
|
+
"""Generate test code for a source file."""
|
|
339
|
+
# This is a placeholder - would integrate with LLM for real generation
|
|
340
|
+
module_name = Path(source_path).stem
|
|
341
|
+
class_name = "".join(word.capitalize() for word in module_name.split("_"))
|
|
342
|
+
|
|
343
|
+
return f'''"""
|
|
344
|
+
Tests for {source_path}
|
|
345
|
+
|
|
346
|
+
Auto-generated by Test Maintenance Crew.
|
|
347
|
+
Review and enhance as needed.
|
|
348
|
+
"""
|
|
349
|
+
|
|
350
|
+
import pytest
|
|
351
|
+
|
|
352
|
+
# TODO: Import the module being tested
|
|
353
|
+
# from {module_name} import ...
|
|
354
|
+
|
|
355
|
+
|
|
356
|
+
class Test{class_name}:
|
|
357
|
+
"""Tests for {module_name} module."""
|
|
358
|
+
|
|
359
|
+
def test_placeholder(self):
|
|
360
|
+
"""Placeholder test - implement actual tests."""
|
|
361
|
+
# TODO: Implement actual tests
|
|
362
|
+
# Source file has {metadata.get("lines_of_code", "unknown")} lines
|
|
363
|
+
# Complexity score: {metadata.get("complexity", "unknown")}
|
|
364
|
+
assert True # Replace with actual assertions
|
|
365
|
+
|
|
366
|
+
|
|
367
|
+
# TODO: Add more test cases based on the source code
|
|
368
|
+
'''
|
|
369
|
+
|
|
370
|
+
|
|
371
|
+
class TestValidatorAgent:
|
|
372
|
+
"""Validates generated tests.
|
|
373
|
+
|
|
374
|
+
Responsibilities:
|
|
375
|
+
- Run generated tests to verify they pass
|
|
376
|
+
- Check test coverage
|
|
377
|
+
- Identify issues with generated tests
|
|
378
|
+
- Suggest improvements
|
|
379
|
+
"""
|
|
380
|
+
|
|
381
|
+
def __init__(self, project_root: Path, config: CrewConfig):
|
|
382
|
+
self.project_root = project_root
|
|
383
|
+
self.config = config
|
|
384
|
+
self.name = "Test Validator"
|
|
385
|
+
|
|
386
|
+
async def validate_tests(self, test_files: list[str]) -> AgentResult:
|
|
387
|
+
"""Validate that tests run correctly."""
|
|
388
|
+
start = datetime.now()
|
|
389
|
+
|
|
390
|
+
results = []
|
|
391
|
+
passed = 0
|
|
392
|
+
failed = 0
|
|
393
|
+
skipped = 0
|
|
394
|
+
|
|
395
|
+
for test_file in test_files:
|
|
396
|
+
try:
|
|
397
|
+
result = await self._run_test_file(test_file)
|
|
398
|
+
results.append(result)
|
|
399
|
+
if result.get("skipped"):
|
|
400
|
+
skipped += 1
|
|
401
|
+
elif result["passed"]:
|
|
402
|
+
passed += 1
|
|
403
|
+
else:
|
|
404
|
+
failed += 1
|
|
405
|
+
except Exception as e:
|
|
406
|
+
logger.error(f"Validation error for {test_file}: {e}")
|
|
407
|
+
results.append(
|
|
408
|
+
{
|
|
409
|
+
"file": test_file,
|
|
410
|
+
"passed": False,
|
|
411
|
+
"error": str(e),
|
|
412
|
+
},
|
|
413
|
+
)
|
|
414
|
+
failed += 1
|
|
415
|
+
|
|
416
|
+
duration = int((datetime.now() - start).total_seconds() * 1000)
|
|
417
|
+
|
|
418
|
+
# Success depends on config - if validation is optional, we succeed even with failures
|
|
419
|
+
success = (failed == 0) or self.config.validation_optional
|
|
420
|
+
|
|
421
|
+
return AgentResult(
|
|
422
|
+
agent=self.name,
|
|
423
|
+
task="validate_tests",
|
|
424
|
+
success=success,
|
|
425
|
+
output={
|
|
426
|
+
"total": len(test_files),
|
|
427
|
+
"passed": passed,
|
|
428
|
+
"failed": failed,
|
|
429
|
+
"skipped": skipped,
|
|
430
|
+
"results": results,
|
|
431
|
+
"validation_optional": self.config.validation_optional,
|
|
432
|
+
},
|
|
433
|
+
duration_ms=duration,
|
|
434
|
+
)
|
|
435
|
+
|
|
436
|
+
async def validate_single(self, test_file: str) -> AgentResult:
|
|
437
|
+
"""Validate a single test file (for validate-only mode)."""
|
|
438
|
+
start = datetime.now()
|
|
439
|
+
result = await self._run_test_file(test_file)
|
|
440
|
+
duration = int((datetime.now() - start).total_seconds() * 1000)
|
|
441
|
+
|
|
442
|
+
return AgentResult(
|
|
443
|
+
agent=self.name,
|
|
444
|
+
task="validate_single",
|
|
445
|
+
success=result["passed"],
|
|
446
|
+
output=result,
|
|
447
|
+
duration_ms=duration,
|
|
448
|
+
)
|
|
449
|
+
|
|
450
|
+
async def _run_test_file(self, test_file: str) -> dict[str, Any]:
|
|
451
|
+
"""Run a single test file."""
|
|
452
|
+
import subprocess
|
|
453
|
+
|
|
454
|
+
full_path = self.project_root / test_file
|
|
455
|
+
|
|
456
|
+
if not full_path.exists():
|
|
457
|
+
return {
|
|
458
|
+
"file": test_file,
|
|
459
|
+
"passed": False,
|
|
460
|
+
"error": "Test file not found",
|
|
461
|
+
}
|
|
462
|
+
|
|
463
|
+
timeout = self.config.validation_timeout_seconds
|
|
464
|
+
|
|
465
|
+
try:
|
|
466
|
+
# Run pytest without coverage to avoid coverage threshold failures
|
|
467
|
+
result = subprocess.run(
|
|
468
|
+
["python", "-m", "pytest", str(full_path), "-v", "--tb=short", "-x", "--no-cov"],
|
|
469
|
+
check=False,
|
|
470
|
+
capture_output=True,
|
|
471
|
+
text=True,
|
|
472
|
+
timeout=timeout,
|
|
473
|
+
cwd=str(self.project_root),
|
|
474
|
+
)
|
|
475
|
+
|
|
476
|
+
# Check if tests passed (look for "passed" in output even if returncode != 0)
|
|
477
|
+
tests_passed = result.returncode == 0
|
|
478
|
+
if not tests_passed and result.stdout:
|
|
479
|
+
# pytest may return non-zero for coverage issues even when tests pass
|
|
480
|
+
if "passed" in result.stdout and "failed" not in result.stdout.lower():
|
|
481
|
+
tests_passed = True
|
|
482
|
+
|
|
483
|
+
return {
|
|
484
|
+
"file": test_file,
|
|
485
|
+
"passed": tests_passed,
|
|
486
|
+
"output": result.stdout[-1000:] if result.stdout else "",
|
|
487
|
+
"errors": result.stderr[-500:] if result.stderr else "",
|
|
488
|
+
}
|
|
489
|
+
|
|
490
|
+
except subprocess.TimeoutExpired:
|
|
491
|
+
logger.warning(f"Test timeout for {test_file} after {timeout}s")
|
|
492
|
+
if self.config.skip_validation_on_timeout:
|
|
493
|
+
return {
|
|
494
|
+
"file": test_file,
|
|
495
|
+
"passed": False,
|
|
496
|
+
"skipped": True,
|
|
497
|
+
"error": f"Test timeout after {timeout}s - skipped",
|
|
498
|
+
}
|
|
499
|
+
return {
|
|
500
|
+
"file": test_file,
|
|
501
|
+
"passed": False,
|
|
502
|
+
"error": f"Test timeout after {timeout}s",
|
|
503
|
+
}
|
|
504
|
+
except Exception as e:
|
|
505
|
+
logger.error(f"Validation error for {test_file}: {e}")
|
|
506
|
+
return {
|
|
507
|
+
"file": test_file,
|
|
508
|
+
"passed": False,
|
|
509
|
+
"error": str(e),
|
|
510
|
+
}
|
|
511
|
+
|
|
512
|
+
|
|
513
|
+
class TestReporterAgent:
|
|
514
|
+
"""Generates reports and recommendations.
|
|
515
|
+
|
|
516
|
+
Responsibilities:
|
|
517
|
+
- Generate test health reports
|
|
518
|
+
- Track progress over time
|
|
519
|
+
- Provide actionable recommendations
|
|
520
|
+
- Format output for different consumers
|
|
521
|
+
"""
|
|
522
|
+
|
|
523
|
+
def __init__(self, index: ProjectIndex, config: CrewConfig):
|
|
524
|
+
self.index = index
|
|
525
|
+
self.config = config
|
|
526
|
+
self.name = "Test Reporter"
|
|
527
|
+
|
|
528
|
+
async def generate_status_report(self) -> AgentResult:
|
|
529
|
+
"""Generate comprehensive status report."""
|
|
530
|
+
start = datetime.now()
|
|
531
|
+
|
|
532
|
+
summary = self.index.get_summary()
|
|
533
|
+
generator = ReportGenerator(summary, self.index.get_all_files())
|
|
534
|
+
|
|
535
|
+
output = {
|
|
536
|
+
"health": generator.health_report(),
|
|
537
|
+
"test_gaps": generator.test_gap_report(),
|
|
538
|
+
"staleness": generator.staleness_report(),
|
|
539
|
+
"recommendations": self._generate_recommendations(summary),
|
|
540
|
+
}
|
|
541
|
+
|
|
542
|
+
duration = int((datetime.now() - start).total_seconds() * 1000)
|
|
543
|
+
|
|
544
|
+
return AgentResult(
|
|
545
|
+
agent=self.name,
|
|
546
|
+
task="generate_status_report",
|
|
547
|
+
success=True,
|
|
548
|
+
output=output,
|
|
549
|
+
duration_ms=duration,
|
|
550
|
+
)
|
|
551
|
+
|
|
552
|
+
async def generate_maintenance_summary(
|
|
553
|
+
self,
|
|
554
|
+
crew_results: list[AgentResult],
|
|
555
|
+
) -> AgentResult:
|
|
556
|
+
"""Generate summary of maintenance run."""
|
|
557
|
+
start = datetime.now()
|
|
558
|
+
|
|
559
|
+
total_duration = sum(r.duration_ms for r in crew_results)
|
|
560
|
+
successful = sum(1 for r in crew_results if r.success)
|
|
561
|
+
|
|
562
|
+
output = {
|
|
563
|
+
"run_timestamp": datetime.now().isoformat(),
|
|
564
|
+
"agents_executed": len(crew_results),
|
|
565
|
+
"agents_succeeded": successful,
|
|
566
|
+
"total_duration_ms": total_duration,
|
|
567
|
+
"agent_results": [
|
|
568
|
+
{
|
|
569
|
+
"agent": r.agent,
|
|
570
|
+
"task": r.task,
|
|
571
|
+
"success": r.success,
|
|
572
|
+
"duration_ms": r.duration_ms,
|
|
573
|
+
}
|
|
574
|
+
for r in crew_results
|
|
575
|
+
],
|
|
576
|
+
"overall_success": successful == len(crew_results),
|
|
577
|
+
}
|
|
578
|
+
|
|
579
|
+
duration = int((datetime.now() - start).total_seconds() * 1000)
|
|
580
|
+
|
|
581
|
+
return AgentResult(
|
|
582
|
+
agent=self.name,
|
|
583
|
+
task="generate_maintenance_summary",
|
|
584
|
+
success=True,
|
|
585
|
+
output=output,
|
|
586
|
+
duration_ms=duration,
|
|
587
|
+
)
|
|
588
|
+
|
|
589
|
+
def _generate_recommendations(self, summary) -> list[str]:
|
|
590
|
+
"""Generate actionable recommendations."""
|
|
591
|
+
recommendations = []
|
|
592
|
+
|
|
593
|
+
# Coverage recommendations
|
|
594
|
+
if summary.test_coverage_avg < 50:
|
|
595
|
+
recommendations.append(
|
|
596
|
+
f"CRITICAL: Test coverage is {summary.test_coverage_avg:.1f}%. "
|
|
597
|
+
f"Target is {self.config.min_coverage_target}%. Prioritize test creation.",
|
|
598
|
+
)
|
|
599
|
+
elif summary.test_coverage_avg < self.config.min_coverage_target:
|
|
600
|
+
recommendations.append(
|
|
601
|
+
f"Coverage is {summary.test_coverage_avg:.1f}%, "
|
|
602
|
+
f"below target of {self.config.min_coverage_target}%.",
|
|
603
|
+
)
|
|
604
|
+
|
|
605
|
+
# Test gap recommendations
|
|
606
|
+
if summary.files_without_tests > 20:
|
|
607
|
+
recommendations.append(
|
|
608
|
+
f"Large test gap: {summary.files_without_tests} files need tests. "
|
|
609
|
+
"Consider batch generation.",
|
|
610
|
+
)
|
|
611
|
+
elif summary.files_without_tests > 0:
|
|
612
|
+
recommendations.append(f"{summary.files_without_tests} files still need tests.")
|
|
613
|
+
|
|
614
|
+
# Staleness recommendations
|
|
615
|
+
if summary.stale_file_count > 10:
|
|
616
|
+
recommendations.append(
|
|
617
|
+
f"{summary.stale_file_count} files have stale tests. Run test update workflow.",
|
|
618
|
+
)
|
|
619
|
+
elif summary.stale_file_count > 0:
|
|
620
|
+
recommendations.append(f"{summary.stale_file_count} files have stale tests.")
|
|
621
|
+
|
|
622
|
+
# Critical files
|
|
623
|
+
if summary.critical_untested_files:
|
|
624
|
+
recommendations.append(
|
|
625
|
+
f"PRIORITY: {len(summary.critical_untested_files)} high-impact files "
|
|
626
|
+
"lack tests. Address immediately.",
|
|
627
|
+
)
|
|
628
|
+
|
|
629
|
+
if not recommendations:
|
|
630
|
+
recommendations.append("Test health is good. Maintain current coverage.")
|
|
631
|
+
|
|
632
|
+
return recommendations
|
|
633
|
+
|
|
634
|
+
|
|
635
|
+
class TestMaintenanceCrew:
|
|
636
|
+
"""Coordinates the test maintenance agents.
|
|
637
|
+
|
|
638
|
+
The crew can run different types of maintenance operations:
|
|
639
|
+
- full: Run all agents in sequence
|
|
640
|
+
- analyze: Only run analysis (no generation)
|
|
641
|
+
- generate: Run analysis and generation
|
|
642
|
+
- validate: Run analysis, generation, and validation
|
|
643
|
+
- report: Only generate reports
|
|
644
|
+
"""
|
|
645
|
+
|
|
646
|
+
def __init__(
|
|
647
|
+
self,
|
|
648
|
+
project_root: str,
|
|
649
|
+
index: ProjectIndex | None = None,
|
|
650
|
+
config: CrewConfig | None = None,
|
|
651
|
+
):
|
|
652
|
+
"""Initialize the test maintenance crew.
|
|
653
|
+
|
|
654
|
+
.. deprecated:: 4.3.0
|
|
655
|
+
Use meta-workflow system instead: ``empathy meta-workflow run test-maintenance``
|
|
656
|
+
"""
|
|
657
|
+
warnings.warn(
|
|
658
|
+
"TestMaintenanceCrew is deprecated since v4.3.0. "
|
|
659
|
+
"Use meta-workflow system instead: empathy meta-workflow run test-maintenance. "
|
|
660
|
+
"See docs/CREWAI_MIGRATION.md for migration guide.",
|
|
661
|
+
DeprecationWarning,
|
|
662
|
+
stacklevel=2,
|
|
663
|
+
)
|
|
664
|
+
self.project_root = Path(project_root)
|
|
665
|
+
self.index = index or ProjectIndex(str(project_root))
|
|
666
|
+
self.config = config or CrewConfig()
|
|
667
|
+
|
|
668
|
+
# Initialize agents
|
|
669
|
+
self.analyst = TestAnalystAgent(self.index, self.config)
|
|
670
|
+
self.generator = TestGeneratorAgent(self.project_root, self.index, self.config)
|
|
671
|
+
self.validator = TestValidatorAgent(self.project_root, self.config)
|
|
672
|
+
self.reporter = TestReporterAgent(self.index, self.config)
|
|
673
|
+
|
|
674
|
+
# Results tracking
|
|
675
|
+
self._run_history: list[dict[str, Any]] = []
|
|
676
|
+
|
|
677
|
+
async def run(self, mode: str = "full", test_files: list[str] | None = None) -> dict[str, Any]:
|
|
678
|
+
"""Run the crew with specified mode.
|
|
679
|
+
|
|
680
|
+
Modes:
|
|
681
|
+
- full: Complete maintenance cycle
|
|
682
|
+
- analyze: Only analysis
|
|
683
|
+
- generate: Analysis + generation
|
|
684
|
+
- validate: Analysis + generation + validation
|
|
685
|
+
- validate-only: Only validate specified test files (pass test_files param)
|
|
686
|
+
- report: Only reporting
|
|
687
|
+
"""
|
|
688
|
+
logger.info(f"Starting test maintenance crew in {mode} mode")
|
|
689
|
+
|
|
690
|
+
results: list[AgentResult] = []
|
|
691
|
+
plan = None
|
|
692
|
+
|
|
693
|
+
# Handle validate-only mode separately
|
|
694
|
+
if mode == "validate-only":
|
|
695
|
+
if not test_files:
|
|
696
|
+
return {
|
|
697
|
+
"mode": mode,
|
|
698
|
+
"success": False,
|
|
699
|
+
"error": "validate-only mode requires test_files parameter",
|
|
700
|
+
}
|
|
701
|
+
|
|
702
|
+
for test_file in test_files:
|
|
703
|
+
val_result = await self.validator.validate_single(test_file)
|
|
704
|
+
results.append(val_result)
|
|
705
|
+
|
|
706
|
+
summary_result = await self.reporter.generate_maintenance_summary(results)
|
|
707
|
+
results.append(summary_result)
|
|
708
|
+
|
|
709
|
+
return {
|
|
710
|
+
"mode": mode,
|
|
711
|
+
"timestamp": datetime.now().isoformat(),
|
|
712
|
+
"results": [r.output for r in results],
|
|
713
|
+
"summary": summary_result.output,
|
|
714
|
+
"success": all(
|
|
715
|
+
r.success for r in results if r.task != "generate_maintenance_summary"
|
|
716
|
+
),
|
|
717
|
+
}
|
|
718
|
+
|
|
719
|
+
# Ensure index is fresh
|
|
720
|
+
self.index.refresh()
|
|
721
|
+
|
|
722
|
+
# Phase 1: Analysis (always run except for report-only)
|
|
723
|
+
if mode != "report":
|
|
724
|
+
coverage_result = await self.analyst.analyze_coverage_gaps()
|
|
725
|
+
results.append(coverage_result)
|
|
726
|
+
|
|
727
|
+
staleness_result = await self.analyst.analyze_staleness()
|
|
728
|
+
results.append(staleness_result)
|
|
729
|
+
|
|
730
|
+
plan_result = await self.analyst.generate_plan()
|
|
731
|
+
results.append(plan_result)
|
|
732
|
+
plan = plan_result.output.get("plan", {})
|
|
733
|
+
|
|
734
|
+
# Phase 2: Generation (for generate, validate, full modes)
|
|
735
|
+
if mode in ["generate", "validate", "full"] and plan:
|
|
736
|
+
plan_items = [
|
|
737
|
+
TestPlanItem(
|
|
738
|
+
file_path=item["file_path"],
|
|
739
|
+
action=TestAction(item["action"]),
|
|
740
|
+
priority=TestPriority(item["priority"]),
|
|
741
|
+
reason=item.get("reason", ""),
|
|
742
|
+
metadata=item.get("metadata", {}),
|
|
743
|
+
)
|
|
744
|
+
for item in plan.get("items", [])
|
|
745
|
+
if item["action"] == "create"
|
|
746
|
+
]
|
|
747
|
+
|
|
748
|
+
if plan_items:
|
|
749
|
+
gen_result = await self.generator.generate_tests(plan_items)
|
|
750
|
+
results.append(gen_result)
|
|
751
|
+
|
|
752
|
+
# Phase 3: Validation (for validate, full modes)
|
|
753
|
+
if mode in ["validate", "full"] and self.config.enable_auto_validation:
|
|
754
|
+
# Get test files from generation results
|
|
755
|
+
generated_test_files = []
|
|
756
|
+
for result in results:
|
|
757
|
+
if result.agent == "Test Generator":
|
|
758
|
+
for item in result.output.get("results", []):
|
|
759
|
+
if item.get("success") and item.get("test_file"):
|
|
760
|
+
generated_test_files.append(item["test_file"])
|
|
761
|
+
|
|
762
|
+
if generated_test_files:
|
|
763
|
+
try:
|
|
764
|
+
val_result = await self.validator.validate_tests(generated_test_files)
|
|
765
|
+
results.append(val_result)
|
|
766
|
+
except Exception as e:
|
|
767
|
+
logger.error(f"Validation failed with error: {e}")
|
|
768
|
+
if not self.config.validation_optional:
|
|
769
|
+
raise
|
|
770
|
+
# Log but continue if validation is optional
|
|
771
|
+
results.append(
|
|
772
|
+
AgentResult(
|
|
773
|
+
agent="Test Validator",
|
|
774
|
+
task="validate_tests",
|
|
775
|
+
success=True, # Mark as success since validation is optional
|
|
776
|
+
output={
|
|
777
|
+
"error": str(e),
|
|
778
|
+
"validation_skipped": True,
|
|
779
|
+
"validation_optional": True,
|
|
780
|
+
},
|
|
781
|
+
duration_ms=0,
|
|
782
|
+
),
|
|
783
|
+
)
|
|
784
|
+
|
|
785
|
+
# Phase 4: Reporting (always run)
|
|
786
|
+
status_result = await self.reporter.generate_status_report()
|
|
787
|
+
results.append(status_result)
|
|
788
|
+
|
|
789
|
+
summary_result = await self.reporter.generate_maintenance_summary(results)
|
|
790
|
+
results.append(summary_result)
|
|
791
|
+
|
|
792
|
+
# Compile final output
|
|
793
|
+
output = {
|
|
794
|
+
"mode": mode,
|
|
795
|
+
"timestamp": datetime.now().isoformat(),
|
|
796
|
+
"results": [r.output for r in results],
|
|
797
|
+
"summary": summary_result.output,
|
|
798
|
+
"success": all(r.success for r in results),
|
|
799
|
+
}
|
|
800
|
+
|
|
801
|
+
# Save to history
|
|
802
|
+
self._run_history.append(output)
|
|
803
|
+
|
|
804
|
+
return output
|
|
805
|
+
|
|
806
|
+
def get_run_history(self, limit: int = 10) -> list[dict[str, Any]]:
|
|
807
|
+
"""Get recent run history."""
|
|
808
|
+
return self._run_history[-limit:]
|
|
809
|
+
|
|
810
|
+
def get_crew_status(self) -> dict[str, Any]:
|
|
811
|
+
"""Get current crew status."""
|
|
812
|
+
return {
|
|
813
|
+
"project_root": str(self.project_root),
|
|
814
|
+
"config": {
|
|
815
|
+
"auto_generation": self.config.enable_auto_generation,
|
|
816
|
+
"auto_validation": self.config.enable_auto_validation,
|
|
817
|
+
"max_files_per_run": self.config.max_files_per_run,
|
|
818
|
+
},
|
|
819
|
+
"index_status": {
|
|
820
|
+
"total_files": self.index.get_summary().total_files,
|
|
821
|
+
"files_needing_tests": self.index.get_summary().files_without_tests,
|
|
822
|
+
},
|
|
823
|
+
"run_count": len(self._run_history),
|
|
824
|
+
}
|
|
825
|
+
|
|
826
|
+
|
|
827
|
+
def create_crew_config_from_dict(config_dict: dict[str, Any]) -> CrewConfig:
|
|
828
|
+
"""Create CrewConfig from dictionary."""
|
|
829
|
+
return CrewConfig(
|
|
830
|
+
enable_auto_generation=config_dict.get("enable_auto_generation", True),
|
|
831
|
+
enable_auto_validation=config_dict.get("enable_auto_validation", True),
|
|
832
|
+
max_files_per_run=config_dict.get("max_files_per_run", 10),
|
|
833
|
+
min_coverage_target=config_dict.get("min_coverage_target", 80.0),
|
|
834
|
+
staleness_threshold_days=config_dict.get("staleness_threshold_days", 7),
|
|
835
|
+
high_impact_threshold=config_dict.get("high_impact_threshold", 5.0),
|
|
836
|
+
auto_run_interval_hours=config_dict.get("auto_run_interval_hours", 24),
|
|
837
|
+
run_on_commit=config_dict.get("run_on_commit", True),
|
|
838
|
+
test_gen_model=config_dict.get("test_gen_model", "sonnet"),
|
|
839
|
+
validation_model=config_dict.get("validation_model", "haiku"),
|
|
840
|
+
)
|