attune-ai 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/__init__.py +358 -0
- attune/adaptive/__init__.py +13 -0
- attune/adaptive/task_complexity.py +127 -0
- attune/agent_monitoring.py +414 -0
- attune/cache/__init__.py +117 -0
- attune/cache/base.py +166 -0
- attune/cache/dependency_manager.py +256 -0
- attune/cache/hash_only.py +251 -0
- attune/cache/hybrid.py +457 -0
- attune/cache/storage.py +285 -0
- attune/cache_monitor.py +356 -0
- attune/cache_stats.py +298 -0
- attune/cli/__init__.py +152 -0
- attune/cli/__main__.py +12 -0
- attune/cli/commands/__init__.py +1 -0
- attune/cli/commands/batch.py +264 -0
- attune/cli/commands/cache.py +248 -0
- attune/cli/commands/help.py +331 -0
- attune/cli/commands/info.py +140 -0
- attune/cli/commands/inspect.py +436 -0
- attune/cli/commands/inspection.py +57 -0
- attune/cli/commands/memory.py +48 -0
- attune/cli/commands/metrics.py +92 -0
- attune/cli/commands/orchestrate.py +184 -0
- attune/cli/commands/patterns.py +207 -0
- attune/cli/commands/profiling.py +202 -0
- attune/cli/commands/provider.py +98 -0
- attune/cli/commands/routing.py +285 -0
- attune/cli/commands/setup.py +96 -0
- attune/cli/commands/status.py +235 -0
- attune/cli/commands/sync.py +166 -0
- attune/cli/commands/tier.py +121 -0
- attune/cli/commands/utilities.py +114 -0
- attune/cli/commands/workflow.py +579 -0
- attune/cli/core.py +32 -0
- attune/cli/parsers/__init__.py +68 -0
- attune/cli/parsers/batch.py +118 -0
- attune/cli/parsers/cache.py +65 -0
- attune/cli/parsers/help.py +41 -0
- attune/cli/parsers/info.py +26 -0
- attune/cli/parsers/inspect.py +66 -0
- attune/cli/parsers/metrics.py +42 -0
- attune/cli/parsers/orchestrate.py +61 -0
- attune/cli/parsers/patterns.py +54 -0
- attune/cli/parsers/provider.py +40 -0
- attune/cli/parsers/routing.py +110 -0
- attune/cli/parsers/setup.py +42 -0
- attune/cli/parsers/status.py +47 -0
- attune/cli/parsers/sync.py +31 -0
- attune/cli/parsers/tier.py +33 -0
- attune/cli/parsers/workflow.py +77 -0
- attune/cli/utils/__init__.py +1 -0
- attune/cli/utils/data.py +242 -0
- attune/cli/utils/helpers.py +68 -0
- attune/cli_legacy.py +3957 -0
- attune/cli_minimal.py +1159 -0
- attune/cli_router.py +437 -0
- attune/cli_unified.py +814 -0
- attune/config/__init__.py +66 -0
- attune/config/xml_config.py +286 -0
- attune/config.py +545 -0
- attune/coordination.py +870 -0
- attune/core.py +1511 -0
- attune/core_modules/__init__.py +15 -0
- attune/cost_tracker.py +626 -0
- attune/dashboard/__init__.py +41 -0
- attune/dashboard/app.py +512 -0
- attune/dashboard/simple_server.py +435 -0
- attune/dashboard/standalone_server.py +547 -0
- attune/discovery.py +306 -0
- attune/emergence.py +306 -0
- attune/exceptions.py +123 -0
- attune/feedback_loops.py +373 -0
- attune/hot_reload/README.md +473 -0
- attune/hot_reload/__init__.py +62 -0
- attune/hot_reload/config.py +83 -0
- attune/hot_reload/integration.py +229 -0
- attune/hot_reload/reloader.py +298 -0
- attune/hot_reload/watcher.py +183 -0
- attune/hot_reload/websocket.py +177 -0
- attune/levels.py +577 -0
- attune/leverage_points.py +441 -0
- attune/logging_config.py +261 -0
- attune/mcp/__init__.py +10 -0
- attune/mcp/server.py +506 -0
- attune/memory/__init__.py +237 -0
- attune/memory/claude_memory.py +469 -0
- attune/memory/config.py +224 -0
- attune/memory/control_panel.py +1290 -0
- attune/memory/control_panel_support.py +145 -0
- attune/memory/cross_session.py +845 -0
- attune/memory/edges.py +179 -0
- attune/memory/encryption.py +159 -0
- attune/memory/file_session.py +770 -0
- attune/memory/graph.py +570 -0
- attune/memory/long_term.py +913 -0
- attune/memory/long_term_types.py +99 -0
- attune/memory/mixins/__init__.py +25 -0
- attune/memory/mixins/backend_init_mixin.py +249 -0
- attune/memory/mixins/capabilities_mixin.py +208 -0
- attune/memory/mixins/handoff_mixin.py +208 -0
- attune/memory/mixins/lifecycle_mixin.py +49 -0
- attune/memory/mixins/long_term_mixin.py +352 -0
- attune/memory/mixins/promotion_mixin.py +109 -0
- attune/memory/mixins/short_term_mixin.py +182 -0
- attune/memory/nodes.py +179 -0
- attune/memory/redis_bootstrap.py +540 -0
- attune/memory/security/__init__.py +31 -0
- attune/memory/security/audit_logger.py +932 -0
- attune/memory/security/pii_scrubber.py +640 -0
- attune/memory/security/secrets_detector.py +678 -0
- attune/memory/short_term.py +2192 -0
- attune/memory/simple_storage.py +302 -0
- attune/memory/storage/__init__.py +15 -0
- attune/memory/storage_backend.py +167 -0
- attune/memory/summary_index.py +583 -0
- attune/memory/types.py +446 -0
- attune/memory/unified.py +182 -0
- attune/meta_workflows/__init__.py +74 -0
- attune/meta_workflows/agent_creator.py +248 -0
- attune/meta_workflows/builtin_templates.py +567 -0
- attune/meta_workflows/cli_commands/__init__.py +56 -0
- attune/meta_workflows/cli_commands/agent_commands.py +321 -0
- attune/meta_workflows/cli_commands/analytics_commands.py +442 -0
- attune/meta_workflows/cli_commands/config_commands.py +232 -0
- attune/meta_workflows/cli_commands/memory_commands.py +182 -0
- attune/meta_workflows/cli_commands/template_commands.py +354 -0
- attune/meta_workflows/cli_commands/workflow_commands.py +382 -0
- attune/meta_workflows/cli_meta_workflows.py +59 -0
- attune/meta_workflows/form_engine.py +292 -0
- attune/meta_workflows/intent_detector.py +409 -0
- attune/meta_workflows/models.py +569 -0
- attune/meta_workflows/pattern_learner.py +738 -0
- attune/meta_workflows/plan_generator.py +384 -0
- attune/meta_workflows/session_context.py +397 -0
- attune/meta_workflows/template_registry.py +229 -0
- attune/meta_workflows/workflow.py +984 -0
- attune/metrics/__init__.py +12 -0
- attune/metrics/collector.py +31 -0
- attune/metrics/prompt_metrics.py +194 -0
- attune/models/__init__.py +172 -0
- attune/models/__main__.py +13 -0
- attune/models/adaptive_routing.py +437 -0
- attune/models/auth_cli.py +444 -0
- attune/models/auth_strategy.py +450 -0
- attune/models/cli.py +655 -0
- attune/models/empathy_executor.py +354 -0
- attune/models/executor.py +257 -0
- attune/models/fallback.py +762 -0
- attune/models/provider_config.py +282 -0
- attune/models/registry.py +472 -0
- attune/models/tasks.py +359 -0
- attune/models/telemetry/__init__.py +71 -0
- attune/models/telemetry/analytics.py +594 -0
- attune/models/telemetry/backend.py +196 -0
- attune/models/telemetry/data_models.py +431 -0
- attune/models/telemetry/storage.py +489 -0
- attune/models/token_estimator.py +420 -0
- attune/models/validation.py +280 -0
- attune/monitoring/__init__.py +52 -0
- attune/monitoring/alerts.py +946 -0
- attune/monitoring/alerts_cli.py +448 -0
- attune/monitoring/multi_backend.py +271 -0
- attune/monitoring/otel_backend.py +362 -0
- attune/optimization/__init__.py +19 -0
- attune/optimization/context_optimizer.py +272 -0
- attune/orchestration/__init__.py +67 -0
- attune/orchestration/agent_templates.py +707 -0
- attune/orchestration/config_store.py +499 -0
- attune/orchestration/execution_strategies.py +2111 -0
- attune/orchestration/meta_orchestrator.py +1168 -0
- attune/orchestration/pattern_learner.py +696 -0
- attune/orchestration/real_tools.py +931 -0
- attune/pattern_cache.py +187 -0
- attune/pattern_library.py +542 -0
- attune/patterns/debugging/all_patterns.json +81 -0
- attune/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- attune/patterns/refactoring_memory.json +89 -0
- attune/persistence.py +564 -0
- attune/platform_utils.py +265 -0
- attune/plugins/__init__.py +28 -0
- attune/plugins/base.py +361 -0
- attune/plugins/registry.py +268 -0
- attune/project_index/__init__.py +32 -0
- attune/project_index/cli.py +335 -0
- attune/project_index/index.py +667 -0
- attune/project_index/models.py +504 -0
- attune/project_index/reports.py +474 -0
- attune/project_index/scanner.py +777 -0
- attune/project_index/scanner_parallel.py +291 -0
- attune/prompts/__init__.py +61 -0
- attune/prompts/config.py +77 -0
- attune/prompts/context.py +177 -0
- attune/prompts/parser.py +285 -0
- attune/prompts/registry.py +313 -0
- attune/prompts/templates.py +208 -0
- attune/redis_config.py +302 -0
- attune/redis_memory.py +799 -0
- attune/resilience/__init__.py +56 -0
- attune/resilience/circuit_breaker.py +256 -0
- attune/resilience/fallback.py +179 -0
- attune/resilience/health.py +300 -0
- attune/resilience/retry.py +209 -0
- attune/resilience/timeout.py +135 -0
- attune/routing/__init__.py +43 -0
- attune/routing/chain_executor.py +433 -0
- attune/routing/classifier.py +217 -0
- attune/routing/smart_router.py +234 -0
- attune/routing/workflow_registry.py +343 -0
- attune/scaffolding/README.md +589 -0
- attune/scaffolding/__init__.py +35 -0
- attune/scaffolding/__main__.py +14 -0
- attune/scaffolding/cli.py +240 -0
- attune/scaffolding/templates/base_wizard.py.jinja2 +121 -0
- attune/scaffolding/templates/coach_wizard.py.jinja2 +321 -0
- attune/scaffolding/templates/domain_wizard.py.jinja2 +408 -0
- attune/scaffolding/templates/linear_flow_wizard.py.jinja2 +203 -0
- attune/socratic/__init__.py +256 -0
- attune/socratic/ab_testing.py +958 -0
- attune/socratic/blueprint.py +533 -0
- attune/socratic/cli.py +703 -0
- attune/socratic/collaboration.py +1114 -0
- attune/socratic/domain_templates.py +924 -0
- attune/socratic/embeddings.py +738 -0
- attune/socratic/engine.py +794 -0
- attune/socratic/explainer.py +682 -0
- attune/socratic/feedback.py +772 -0
- attune/socratic/forms.py +629 -0
- attune/socratic/generator.py +732 -0
- attune/socratic/llm_analyzer.py +637 -0
- attune/socratic/mcp_server.py +702 -0
- attune/socratic/session.py +312 -0
- attune/socratic/storage.py +667 -0
- attune/socratic/success.py +730 -0
- attune/socratic/visual_editor.py +860 -0
- attune/socratic/web_ui.py +958 -0
- attune/telemetry/__init__.py +39 -0
- attune/telemetry/agent_coordination.py +475 -0
- attune/telemetry/agent_tracking.py +367 -0
- attune/telemetry/approval_gates.py +545 -0
- attune/telemetry/cli.py +1231 -0
- attune/telemetry/commands/__init__.py +14 -0
- attune/telemetry/commands/dashboard_commands.py +696 -0
- attune/telemetry/event_streaming.py +409 -0
- attune/telemetry/feedback_loop.py +567 -0
- attune/telemetry/usage_tracker.py +591 -0
- attune/templates.py +754 -0
- attune/test_generator/__init__.py +38 -0
- attune/test_generator/__main__.py +14 -0
- attune/test_generator/cli.py +234 -0
- attune/test_generator/generator.py +355 -0
- attune/test_generator/risk_analyzer.py +216 -0
- attune/test_generator/templates/unit_test.py.jinja2 +272 -0
- attune/tier_recommender.py +384 -0
- attune/tools.py +183 -0
- attune/trust/__init__.py +28 -0
- attune/trust/circuit_breaker.py +579 -0
- attune/trust_building.py +527 -0
- attune/validation/__init__.py +19 -0
- attune/validation/xml_validator.py +281 -0
- attune/vscode_bridge.py +173 -0
- attune/workflow_commands.py +780 -0
- attune/workflow_patterns/__init__.py +33 -0
- attune/workflow_patterns/behavior.py +249 -0
- attune/workflow_patterns/core.py +76 -0
- attune/workflow_patterns/output.py +99 -0
- attune/workflow_patterns/registry.py +255 -0
- attune/workflow_patterns/structural.py +288 -0
- attune/workflows/__init__.py +539 -0
- attune/workflows/autonomous_test_gen.py +1268 -0
- attune/workflows/base.py +2667 -0
- attune/workflows/batch_processing.py +342 -0
- attune/workflows/bug_predict.py +1084 -0
- attune/workflows/builder.py +273 -0
- attune/workflows/caching.py +253 -0
- attune/workflows/code_review.py +1048 -0
- attune/workflows/code_review_adapters.py +312 -0
- attune/workflows/code_review_pipeline.py +722 -0
- attune/workflows/config.py +645 -0
- attune/workflows/dependency_check.py +644 -0
- attune/workflows/document_gen/__init__.py +25 -0
- attune/workflows/document_gen/config.py +30 -0
- attune/workflows/document_gen/report_formatter.py +162 -0
- attune/workflows/document_gen/workflow.py +1426 -0
- attune/workflows/document_manager.py +216 -0
- attune/workflows/document_manager_README.md +134 -0
- attune/workflows/documentation_orchestrator.py +1205 -0
- attune/workflows/history.py +510 -0
- attune/workflows/keyboard_shortcuts/__init__.py +39 -0
- attune/workflows/keyboard_shortcuts/generators.py +391 -0
- attune/workflows/keyboard_shortcuts/parsers.py +416 -0
- attune/workflows/keyboard_shortcuts/prompts.py +295 -0
- attune/workflows/keyboard_shortcuts/schema.py +193 -0
- attune/workflows/keyboard_shortcuts/workflow.py +509 -0
- attune/workflows/llm_base.py +363 -0
- attune/workflows/manage_docs.py +87 -0
- attune/workflows/manage_docs_README.md +134 -0
- attune/workflows/manage_documentation.py +821 -0
- attune/workflows/new_sample_workflow1.py +149 -0
- attune/workflows/new_sample_workflow1_README.md +150 -0
- attune/workflows/orchestrated_health_check.py +849 -0
- attune/workflows/orchestrated_release_prep.py +600 -0
- attune/workflows/output.py +413 -0
- attune/workflows/perf_audit.py +863 -0
- attune/workflows/pr_review.py +762 -0
- attune/workflows/progress.py +785 -0
- attune/workflows/progress_server.py +322 -0
- attune/workflows/progressive/README 2.md +454 -0
- attune/workflows/progressive/README.md +454 -0
- attune/workflows/progressive/__init__.py +82 -0
- attune/workflows/progressive/cli.py +219 -0
- attune/workflows/progressive/core.py +488 -0
- attune/workflows/progressive/orchestrator.py +723 -0
- attune/workflows/progressive/reports.py +520 -0
- attune/workflows/progressive/telemetry.py +274 -0
- attune/workflows/progressive/test_gen.py +495 -0
- attune/workflows/progressive/workflow.py +589 -0
- attune/workflows/refactor_plan.py +694 -0
- attune/workflows/release_prep.py +895 -0
- attune/workflows/release_prep_crew.py +969 -0
- attune/workflows/research_synthesis.py +404 -0
- attune/workflows/routing.py +168 -0
- attune/workflows/secure_release.py +593 -0
- attune/workflows/security_adapters.py +297 -0
- attune/workflows/security_audit.py +1329 -0
- attune/workflows/security_audit_phase3.py +355 -0
- attune/workflows/seo_optimization.py +633 -0
- attune/workflows/step_config.py +234 -0
- attune/workflows/telemetry_mixin.py +269 -0
- attune/workflows/test5.py +125 -0
- attune/workflows/test5_README.md +158 -0
- attune/workflows/test_coverage_boost_crew.py +849 -0
- attune/workflows/test_gen/__init__.py +52 -0
- attune/workflows/test_gen/ast_analyzer.py +249 -0
- attune/workflows/test_gen/config.py +88 -0
- attune/workflows/test_gen/data_models.py +38 -0
- attune/workflows/test_gen/report_formatter.py +289 -0
- attune/workflows/test_gen/test_templates.py +381 -0
- attune/workflows/test_gen/workflow.py +655 -0
- attune/workflows/test_gen.py +54 -0
- attune/workflows/test_gen_behavioral.py +477 -0
- attune/workflows/test_gen_parallel.py +341 -0
- attune/workflows/test_lifecycle.py +526 -0
- attune/workflows/test_maintenance.py +627 -0
- attune/workflows/test_maintenance_cli.py +590 -0
- attune/workflows/test_maintenance_crew.py +840 -0
- attune/workflows/test_runner.py +622 -0
- attune/workflows/tier_tracking.py +531 -0
- attune/workflows/xml_enhanced_crew.py +285 -0
- attune_ai-2.0.0.dist-info/METADATA +1026 -0
- attune_ai-2.0.0.dist-info/RECORD +457 -0
- attune_ai-2.0.0.dist-info/WHEEL +5 -0
- attune_ai-2.0.0.dist-info/entry_points.txt +26 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE +201 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- attune_ai-2.0.0.dist-info/top_level.txt +5 -0
- attune_healthcare/__init__.py +13 -0
- attune_healthcare/monitors/__init__.py +9 -0
- attune_healthcare/monitors/clinical_protocol_monitor.py +315 -0
- attune_healthcare/monitors/monitoring/__init__.py +44 -0
- attune_healthcare/monitors/monitoring/protocol_checker.py +300 -0
- attune_healthcare/monitors/monitoring/protocol_loader.py +214 -0
- attune_healthcare/monitors/monitoring/sensor_parsers.py +306 -0
- attune_healthcare/monitors/monitoring/trajectory_analyzer.py +389 -0
- attune_llm/README.md +553 -0
- attune_llm/__init__.py +28 -0
- attune_llm/agent_factory/__init__.py +53 -0
- attune_llm/agent_factory/adapters/__init__.py +85 -0
- attune_llm/agent_factory/adapters/autogen_adapter.py +312 -0
- attune_llm/agent_factory/adapters/crewai_adapter.py +483 -0
- attune_llm/agent_factory/adapters/haystack_adapter.py +298 -0
- attune_llm/agent_factory/adapters/langchain_adapter.py +362 -0
- attune_llm/agent_factory/adapters/langgraph_adapter.py +333 -0
- attune_llm/agent_factory/adapters/native.py +228 -0
- attune_llm/agent_factory/adapters/wizard_adapter.py +423 -0
- attune_llm/agent_factory/base.py +305 -0
- attune_llm/agent_factory/crews/__init__.py +67 -0
- attune_llm/agent_factory/crews/code_review.py +1113 -0
- attune_llm/agent_factory/crews/health_check.py +1262 -0
- attune_llm/agent_factory/crews/refactoring.py +1128 -0
- attune_llm/agent_factory/crews/security_audit.py +1018 -0
- attune_llm/agent_factory/decorators.py +287 -0
- attune_llm/agent_factory/factory.py +558 -0
- attune_llm/agent_factory/framework.py +193 -0
- attune_llm/agent_factory/memory_integration.py +328 -0
- attune_llm/agent_factory/resilient.py +320 -0
- attune_llm/agents_md/__init__.py +22 -0
- attune_llm/agents_md/loader.py +218 -0
- attune_llm/agents_md/parser.py +271 -0
- attune_llm/agents_md/registry.py +307 -0
- attune_llm/claude_memory.py +466 -0
- attune_llm/cli/__init__.py +8 -0
- attune_llm/cli/sync_claude.py +487 -0
- attune_llm/code_health.py +1313 -0
- attune_llm/commands/__init__.py +51 -0
- attune_llm/commands/context.py +375 -0
- attune_llm/commands/loader.py +301 -0
- attune_llm/commands/models.py +231 -0
- attune_llm/commands/parser.py +371 -0
- attune_llm/commands/registry.py +429 -0
- attune_llm/config/__init__.py +29 -0
- attune_llm/config/unified.py +291 -0
- attune_llm/context/__init__.py +22 -0
- attune_llm/context/compaction.py +455 -0
- attune_llm/context/manager.py +434 -0
- attune_llm/contextual_patterns.py +361 -0
- attune_llm/core.py +907 -0
- attune_llm/git_pattern_extractor.py +435 -0
- attune_llm/hooks/__init__.py +24 -0
- attune_llm/hooks/config.py +306 -0
- attune_llm/hooks/executor.py +289 -0
- attune_llm/hooks/registry.py +302 -0
- attune_llm/hooks/scripts/__init__.py +39 -0
- attune_llm/hooks/scripts/evaluate_session.py +201 -0
- attune_llm/hooks/scripts/first_time_init.py +285 -0
- attune_llm/hooks/scripts/pre_compact.py +207 -0
- attune_llm/hooks/scripts/session_end.py +183 -0
- attune_llm/hooks/scripts/session_start.py +163 -0
- attune_llm/hooks/scripts/suggest_compact.py +225 -0
- attune_llm/learning/__init__.py +30 -0
- attune_llm/learning/evaluator.py +438 -0
- attune_llm/learning/extractor.py +514 -0
- attune_llm/learning/storage.py +560 -0
- attune_llm/levels.py +227 -0
- attune_llm/pattern_confidence.py +414 -0
- attune_llm/pattern_resolver.py +272 -0
- attune_llm/pattern_summary.py +350 -0
- attune_llm/providers.py +967 -0
- attune_llm/routing/__init__.py +32 -0
- attune_llm/routing/model_router.py +362 -0
- attune_llm/security/IMPLEMENTATION_SUMMARY.md +413 -0
- attune_llm/security/PHASE2_COMPLETE.md +384 -0
- attune_llm/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- attune_llm/security/QUICK_REFERENCE.md +316 -0
- attune_llm/security/README.md +262 -0
- attune_llm/security/__init__.py +62 -0
- attune_llm/security/audit_logger.py +929 -0
- attune_llm/security/audit_logger_example.py +152 -0
- attune_llm/security/pii_scrubber.py +640 -0
- attune_llm/security/secrets_detector.py +678 -0
- attune_llm/security/secrets_detector_example.py +304 -0
- attune_llm/security/secure_memdocs.py +1192 -0
- attune_llm/security/secure_memdocs_example.py +278 -0
- attune_llm/session_status.py +745 -0
- attune_llm/state.py +246 -0
- attune_llm/utils/__init__.py +5 -0
- attune_llm/utils/tokens.py +349 -0
- attune_software/SOFTWARE_PLUGIN_README.md +57 -0
- attune_software/__init__.py +13 -0
- attune_software/cli/__init__.py +120 -0
- attune_software/cli/inspect.py +362 -0
- attune_software/cli.py +574 -0
- attune_software/plugin.py +188 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
|
@@ -0,0 +1,849 @@
|
|
|
1
|
+
"""Test Coverage Boost Crew - Multi-agent test generation workflow.
|
|
2
|
+
|
|
3
|
+
.. deprecated:: 4.3.0
|
|
4
|
+
This workflow is deprecated in favor of the meta-workflow system.
|
|
5
|
+
Use ``empathy meta-workflow run test-coverage-boost`` instead.
|
|
6
|
+
See docs/CREWAI_MIGRATION.md for migration guide.
|
|
7
|
+
|
|
8
|
+
This module provides a CrewAI-based workflow that uses 3 specialized agents
|
|
9
|
+
to analyze coverage gaps, generate tests, and validate improvements.
|
|
10
|
+
|
|
11
|
+
Copyright 2025 Smart AI Memory, LLC
|
|
12
|
+
Licensed under Fair Source 0.9
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
import asyncio
|
|
16
|
+
import json
|
|
17
|
+
import re
|
|
18
|
+
import warnings
|
|
19
|
+
from dataclasses import dataclass, field
|
|
20
|
+
from datetime import datetime
|
|
21
|
+
from pathlib import Path
|
|
22
|
+
|
|
23
|
+
from attune.models.executor import ExecutionContext
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@dataclass
|
|
27
|
+
class Agent:
|
|
28
|
+
"""Agent configuration for test coverage boost."""
|
|
29
|
+
|
|
30
|
+
role: str
|
|
31
|
+
goal: str
|
|
32
|
+
backstory: str
|
|
33
|
+
expertise_level: str = "expert"
|
|
34
|
+
weight: float = 1.0
|
|
35
|
+
|
|
36
|
+
def get_system_prompt(self) -> str:
|
|
37
|
+
"""Generate system prompt for this agent."""
|
|
38
|
+
return f"""You are a {self.role}.
|
|
39
|
+
|
|
40
|
+
{self.backstory}
|
|
41
|
+
|
|
42
|
+
Your goal: {self.goal}
|
|
43
|
+
|
|
44
|
+
Expertise level: {self.expertise_level}
|
|
45
|
+
|
|
46
|
+
Provide your response in this format:
|
|
47
|
+
<thinking>
|
|
48
|
+
[Your analysis and reasoning]
|
|
49
|
+
</thinking>
|
|
50
|
+
|
|
51
|
+
<answer>
|
|
52
|
+
[Your JSON response matching the expected format]
|
|
53
|
+
</answer>"""
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
@dataclass
|
|
57
|
+
class Task:
|
|
58
|
+
"""Task configuration for an agent."""
|
|
59
|
+
|
|
60
|
+
description: str
|
|
61
|
+
expected_output: str
|
|
62
|
+
context_keys: list[str] = field(default_factory=list)
|
|
63
|
+
|
|
64
|
+
def get_user_prompt(self, context: dict) -> str:
|
|
65
|
+
"""Generate user prompt with context data."""
|
|
66
|
+
# Build context section
|
|
67
|
+
context_lines = ["<context>"]
|
|
68
|
+
for key in self.context_keys:
|
|
69
|
+
if key in context:
|
|
70
|
+
value = context[key]
|
|
71
|
+
if isinstance(value, (dict, list)):
|
|
72
|
+
value = json.dumps(value, indent=2)
|
|
73
|
+
context_lines.append(f"<{key}>")
|
|
74
|
+
context_lines.append(str(value))
|
|
75
|
+
context_lines.append(f"</{key}>")
|
|
76
|
+
context_lines.append("</context>")
|
|
77
|
+
|
|
78
|
+
return f"""{self.description}
|
|
79
|
+
|
|
80
|
+
{chr(10).join(context_lines)}
|
|
81
|
+
|
|
82
|
+
<expected_output>
|
|
83
|
+
{self.expected_output}
|
|
84
|
+
</expected_output>
|
|
85
|
+
|
|
86
|
+
<instructions>
|
|
87
|
+
1. Review all context data in the <context> tags above
|
|
88
|
+
2. Structure your response using <thinking> and <answer> tags
|
|
89
|
+
3. Provide a JSON response in the <answer> section matching the expected output format exactly
|
|
90
|
+
</instructions>"""
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
@dataclass
|
|
94
|
+
class CoverageGap:
|
|
95
|
+
"""Represents a gap in test coverage."""
|
|
96
|
+
|
|
97
|
+
file_path: str
|
|
98
|
+
function_name: str
|
|
99
|
+
line_start: int
|
|
100
|
+
line_end: int
|
|
101
|
+
priority: float # 0-1, higher = more important
|
|
102
|
+
reason: str
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
@dataclass
|
|
106
|
+
class GeneratedTest:
|
|
107
|
+
"""Represents a generated test case."""
|
|
108
|
+
|
|
109
|
+
test_name: str
|
|
110
|
+
test_code: str
|
|
111
|
+
target_function: str
|
|
112
|
+
target_file: str
|
|
113
|
+
coverage_impact: float # Estimated coverage improvement
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
@dataclass
|
|
117
|
+
class TestCoverageBoostCrewResult:
|
|
118
|
+
"""Result from TestCoverageBoostCrew execution."""
|
|
119
|
+
|
|
120
|
+
success: bool
|
|
121
|
+
current_coverage: float # 0-100
|
|
122
|
+
target_coverage: float # 0-100
|
|
123
|
+
final_coverage: float # 0-100
|
|
124
|
+
coverage_improvement: float # Percentage points gained
|
|
125
|
+
|
|
126
|
+
# Detailed results
|
|
127
|
+
gaps_found: int
|
|
128
|
+
tests_generated: int
|
|
129
|
+
tests_passing: int
|
|
130
|
+
gaps_analyzed: list[CoverageGap] = field(default_factory=list)
|
|
131
|
+
generated_tests: list[GeneratedTest] = field(default_factory=list)
|
|
132
|
+
|
|
133
|
+
# Execution metadata
|
|
134
|
+
agents_executed: int = 3
|
|
135
|
+
cost: float = 0.0
|
|
136
|
+
duration_ms: int = 0
|
|
137
|
+
timestamp: str = field(default_factory=lambda: datetime.now().isoformat())
|
|
138
|
+
errors: list[str] = field(default_factory=list)
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def parse_xml_response(response: str) -> dict:
|
|
142
|
+
"""Parse XML-structured agent response."""
|
|
143
|
+
result = {
|
|
144
|
+
"thinking": "",
|
|
145
|
+
"answer": "",
|
|
146
|
+
"raw": response,
|
|
147
|
+
"has_xml_structure": False,
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
# Try to extract thinking section
|
|
151
|
+
thinking_start = response.find("<thinking>")
|
|
152
|
+
thinking_end = response.find("</thinking>")
|
|
153
|
+
if thinking_start != -1 and thinking_end != -1:
|
|
154
|
+
result["thinking"] = response[thinking_start + 10 : thinking_end].strip()
|
|
155
|
+
result["has_xml_structure"] = True
|
|
156
|
+
|
|
157
|
+
# Try to extract answer section
|
|
158
|
+
answer_start = response.find("<answer>")
|
|
159
|
+
answer_end = response.find("</answer>")
|
|
160
|
+
if answer_start != -1 and answer_end != -1:
|
|
161
|
+
result["answer"] = response[answer_start + 8 : answer_end].strip()
|
|
162
|
+
result["has_xml_structure"] = True
|
|
163
|
+
|
|
164
|
+
# If no answer found, extract content after </thinking> or use full response
|
|
165
|
+
if not result["answer"]:
|
|
166
|
+
if thinking_end != -1:
|
|
167
|
+
# Extract everything after </thinking> tag
|
|
168
|
+
result["answer"] = response[thinking_end + 11 :].strip()
|
|
169
|
+
else:
|
|
170
|
+
# Use full response as fallback
|
|
171
|
+
result["answer"] = response
|
|
172
|
+
|
|
173
|
+
return result
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
class TestCoverageBoostCrew:
|
|
177
|
+
"""Test Coverage Boost Crew - Multi-agent test generation.
|
|
178
|
+
|
|
179
|
+
Uses 3 specialized agents to analyze coverage gaps, generate tests,
|
|
180
|
+
and validate improvements.
|
|
181
|
+
|
|
182
|
+
Agents:
|
|
183
|
+
- Gap Analyzer: Identifies untested code and prioritizes gaps
|
|
184
|
+
- Test Generator: Creates comprehensive test cases
|
|
185
|
+
- Test Validator: Validates generated tests and measures improvement
|
|
186
|
+
|
|
187
|
+
Usage:
|
|
188
|
+
crew = TestCoverageBoostCrew(target_coverage=85.0)
|
|
189
|
+
result = await crew.execute(project_root="./src")
|
|
190
|
+
|
|
191
|
+
print(f"Coverage improved by {result.coverage_improvement}%")
|
|
192
|
+
"""
|
|
193
|
+
|
|
194
|
+
name = "Test_Coverage_Boost_Crew"
|
|
195
|
+
description = "Multi-agent test generation with gap analysis and validation"
|
|
196
|
+
process_type = "sequential"
|
|
197
|
+
|
|
198
|
+
def __init__(
|
|
199
|
+
self,
|
|
200
|
+
target_coverage: float = 80.0,
|
|
201
|
+
project_root: str = ".",
|
|
202
|
+
**kwargs, # Accept extra CLI arguments
|
|
203
|
+
):
|
|
204
|
+
"""Initialize the test coverage boost crew.
|
|
205
|
+
|
|
206
|
+
.. deprecated:: 4.3.0
|
|
207
|
+
Use meta-workflow system instead: ``empathy meta-workflow run test-coverage-boost``
|
|
208
|
+
|
|
209
|
+
Args:
|
|
210
|
+
target_coverage: Target coverage percentage (0-100)
|
|
211
|
+
project_root: Root directory of project to analyze
|
|
212
|
+
**kwargs: Additional arguments (ignored, for CLI compatibility)
|
|
213
|
+
"""
|
|
214
|
+
warnings.warn(
|
|
215
|
+
"TestCoverageBoostCrew is deprecated since v4.3.0. "
|
|
216
|
+
"Use meta-workflow system instead: empathy meta-workflow run test-coverage-boost. "
|
|
217
|
+
"See docs/CREWAI_MIGRATION.md for migration guide.",
|
|
218
|
+
DeprecationWarning,
|
|
219
|
+
stacklevel=2,
|
|
220
|
+
)
|
|
221
|
+
if not 0 <= target_coverage <= 100:
|
|
222
|
+
raise ValueError("target_coverage must be between 0 and 100")
|
|
223
|
+
|
|
224
|
+
self.target_coverage = target_coverage
|
|
225
|
+
self.project_root = Path(project_root).resolve()
|
|
226
|
+
|
|
227
|
+
# Initialize tracking
|
|
228
|
+
self._total_cost = 0.0
|
|
229
|
+
self._total_input_tokens = 0
|
|
230
|
+
self._total_output_tokens = 0
|
|
231
|
+
self._executor = None
|
|
232
|
+
self._project_index = None
|
|
233
|
+
|
|
234
|
+
# Initialize ProjectIndex if available
|
|
235
|
+
try:
|
|
236
|
+
from attune.project_index import ProjectIndex
|
|
237
|
+
|
|
238
|
+
self._project_index = ProjectIndex(str(self.project_root))
|
|
239
|
+
if not self._project_index.load():
|
|
240
|
+
print(" [ProjectIndex] Building index (first run)...")
|
|
241
|
+
self._project_index.refresh()
|
|
242
|
+
else:
|
|
243
|
+
print(" [ProjectIndex] Loaded existing index")
|
|
244
|
+
except Exception as e:
|
|
245
|
+
print(f" [ProjectIndex] Warning: Could not load index: {e}")
|
|
246
|
+
self._project_index = None
|
|
247
|
+
|
|
248
|
+
# Define agents
|
|
249
|
+
self.agents = [
|
|
250
|
+
Agent(
|
|
251
|
+
role="Gap Analyzer",
|
|
252
|
+
goal="Identify critical gaps in test coverage and prioritize them",
|
|
253
|
+
backstory="Expert code analyzer specializing in identifying untested code paths. "
|
|
254
|
+
"You understand which functions are most critical to test based on complexity, "
|
|
255
|
+
"usage patterns, and risk. You prioritize gaps by impact and provide actionable insights.",
|
|
256
|
+
expertise_level="expert",
|
|
257
|
+
),
|
|
258
|
+
Agent(
|
|
259
|
+
role="Test Generator",
|
|
260
|
+
goal="Generate comprehensive, high-quality test cases for coverage gaps",
|
|
261
|
+
backstory="Senior test engineer who writes clean, maintainable, effective tests. "
|
|
262
|
+
"You follow testing best practices, use appropriate assertions, cover edge cases, "
|
|
263
|
+
"and write tests that are both thorough and readable.",
|
|
264
|
+
expertise_level="expert",
|
|
265
|
+
),
|
|
266
|
+
Agent(
|
|
267
|
+
role="Test Validator",
|
|
268
|
+
goal="Validate generated tests and measure coverage improvement",
|
|
269
|
+
backstory="QA specialist focused on test quality and coverage metrics. "
|
|
270
|
+
"You verify that tests are correct, run successfully, and actually improve coverage. "
|
|
271
|
+
"You identify issues with generated tests and recommend fixes.",
|
|
272
|
+
expertise_level="expert",
|
|
273
|
+
),
|
|
274
|
+
]
|
|
275
|
+
|
|
276
|
+
def _initialize_executor(self):
|
|
277
|
+
"""Initialize LLM executor for agent calls."""
|
|
278
|
+
if self._executor is not None:
|
|
279
|
+
return
|
|
280
|
+
|
|
281
|
+
try:
|
|
282
|
+
from attune.models.empathy_executor import EmpathyLLMExecutor
|
|
283
|
+
|
|
284
|
+
self._executor = EmpathyLLMExecutor(provider="anthropic")
|
|
285
|
+
except Exception as e:
|
|
286
|
+
print(f" [LLM] Warning: Could not initialize executor: {e}")
|
|
287
|
+
print(" [LLM] Workflow will use mock responses")
|
|
288
|
+
self._executor = None
|
|
289
|
+
|
|
290
|
+
def define_tasks(self) -> list[Task]:
|
|
291
|
+
"""Define tasks for each agent."""
|
|
292
|
+
return [
|
|
293
|
+
Task(
|
|
294
|
+
description="Analyze the codebase and identify critical test coverage gaps",
|
|
295
|
+
expected_output="""JSON object with:
|
|
296
|
+
{
|
|
297
|
+
"gaps": [
|
|
298
|
+
{
|
|
299
|
+
"file_path": "path/to/file.py",
|
|
300
|
+
"function": "function_name",
|
|
301
|
+
"line_start": 10,
|
|
302
|
+
"line_end": 50,
|
|
303
|
+
"priority": 0.9,
|
|
304
|
+
"reason": "High complexity function with no tests"
|
|
305
|
+
}
|
|
306
|
+
],
|
|
307
|
+
"current_coverage": 65.0,
|
|
308
|
+
"summary": "Found 5 critical gaps in high-impact files"
|
|
309
|
+
}""",
|
|
310
|
+
context_keys=[
|
|
311
|
+
"project_root",
|
|
312
|
+
"target_coverage",
|
|
313
|
+
"project_stats",
|
|
314
|
+
"coverage_data",
|
|
315
|
+
"files_to_analyze",
|
|
316
|
+
"high_impact_files",
|
|
317
|
+
],
|
|
318
|
+
),
|
|
319
|
+
Task(
|
|
320
|
+
description="Generate comprehensive test cases for the identified coverage gaps",
|
|
321
|
+
expected_output="""JSON object with properly escaped strings:
|
|
322
|
+
{
|
|
323
|
+
"tests": [
|
|
324
|
+
{
|
|
325
|
+
"test_name": "test_function_name_edge_case",
|
|
326
|
+
"test_code": "def test_function_name_edge_case():\\n assert result == \\"expected\\"\\n assert x != \\"bad\\"",
|
|
327
|
+
"target_function": "function_name",
|
|
328
|
+
"target_file": "path/to/file.py",
|
|
329
|
+
"coverage_impact": 5.2
|
|
330
|
+
}
|
|
331
|
+
],
|
|
332
|
+
"total_tests": 5,
|
|
333
|
+
"estimated_coverage_gain": 12.5
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
CRITICAL FORMATTING RULES:
|
|
337
|
+
1. ALWAYS escape quotes in test_code: Use \\" not "
|
|
338
|
+
2. Use \\n for newlines in test_code
|
|
339
|
+
3. Example CORRECT: "test_code": "def test():\\n assert x == \\"value\\""
|
|
340
|
+
4. Example WRONG: "test_code": "def test(): assert x == "value""
|
|
341
|
+
5. Keep test_code concise - max 5 lines per test""",
|
|
342
|
+
context_keys=["gaps", "project_root", "existing_tests"],
|
|
343
|
+
),
|
|
344
|
+
Task(
|
|
345
|
+
description="Validate the generated tests and measure actual coverage improvement",
|
|
346
|
+
expected_output="""JSON object with:
|
|
347
|
+
{
|
|
348
|
+
"tests_passing": 4,
|
|
349
|
+
"tests_failing": 1,
|
|
350
|
+
"final_coverage": 77.5,
|
|
351
|
+
"coverage_improvement": 12.5,
|
|
352
|
+
"issues": ["test_foo failed: assertion error"],
|
|
353
|
+
"recommendations": ["Add fixture for database setup"]
|
|
354
|
+
}""",
|
|
355
|
+
context_keys=["generated_tests", "target_coverage"],
|
|
356
|
+
),
|
|
357
|
+
]
|
|
358
|
+
|
|
359
|
+
def _get_file_contents_for_analysis(self, files: list) -> list[dict]:
|
|
360
|
+
"""Read actual file contents for analysis.
|
|
361
|
+
|
|
362
|
+
Args:
|
|
363
|
+
files: List of FileRecord objects
|
|
364
|
+
|
|
365
|
+
Returns:
|
|
366
|
+
List of dicts with path, code, and metadata
|
|
367
|
+
"""
|
|
368
|
+
result = []
|
|
369
|
+
for file in files:
|
|
370
|
+
try:
|
|
371
|
+
file_path = self.project_root / file.path
|
|
372
|
+
if not file_path.exists() or not file_path.suffix == ".py":
|
|
373
|
+
continue
|
|
374
|
+
|
|
375
|
+
code = file_path.read_text(encoding="utf-8")
|
|
376
|
+
|
|
377
|
+
# Limit code size to avoid token bloat (max ~5000 chars per file)
|
|
378
|
+
if len(code) > 5000:
|
|
379
|
+
code = code[:5000] + f"\n... (truncated, {len(code) - 5000} more chars)"
|
|
380
|
+
|
|
381
|
+
result.append(
|
|
382
|
+
{
|
|
383
|
+
"path": str(file.path),
|
|
384
|
+
"complexity": file.complexity_score,
|
|
385
|
+
"lines": file.lines_of_code,
|
|
386
|
+
"has_test": file.tests_exist,
|
|
387
|
+
"coverage": file.coverage_percent,
|
|
388
|
+
"code": code,
|
|
389
|
+
}
|
|
390
|
+
)
|
|
391
|
+
except Exception:
|
|
392
|
+
# Skip files that can't be read
|
|
393
|
+
continue
|
|
394
|
+
|
|
395
|
+
return result
|
|
396
|
+
|
|
397
|
+
def _get_project_context(self) -> dict:
|
|
398
|
+
"""Get project context from ProjectIndex."""
|
|
399
|
+
if self._project_index is None:
|
|
400
|
+
return {
|
|
401
|
+
"project_root": str(self.project_root),
|
|
402
|
+
"target_coverage": self.target_coverage,
|
|
403
|
+
}
|
|
404
|
+
|
|
405
|
+
try:
|
|
406
|
+
summary = self._project_index.get_summary()
|
|
407
|
+
|
|
408
|
+
# Get files needing tests for gap analysis
|
|
409
|
+
files_needing_tests = self._project_index.get_files_needing_tests()
|
|
410
|
+
|
|
411
|
+
# Get high impact files for prioritization
|
|
412
|
+
high_impact_files = self._project_index.get_high_impact_files()
|
|
413
|
+
|
|
414
|
+
return {
|
|
415
|
+
"project_root": str(self.project_root),
|
|
416
|
+
"target_coverage": self.target_coverage,
|
|
417
|
+
"project_stats": {
|
|
418
|
+
"total_files": summary.total_files,
|
|
419
|
+
"source_files": summary.source_files,
|
|
420
|
+
"test_files": summary.test_files,
|
|
421
|
+
"total_loc": summary.total_lines_of_code,
|
|
422
|
+
"avg_complexity": summary.avg_complexity,
|
|
423
|
+
"test_coverage_avg": summary.test_coverage_avg,
|
|
424
|
+
},
|
|
425
|
+
"coverage_data": {
|
|
426
|
+
"current_coverage": summary.test_coverage_avg,
|
|
427
|
+
"files_without_tests": summary.files_without_tests,
|
|
428
|
+
"files_needing_tests": len(files_needing_tests),
|
|
429
|
+
},
|
|
430
|
+
"files_to_analyze": self._get_file_contents_for_analysis(
|
|
431
|
+
files_needing_tests[:5]
|
|
432
|
+
), # Top 5 files with code
|
|
433
|
+
"high_impact_files": [
|
|
434
|
+
{
|
|
435
|
+
"path": str(file.path),
|
|
436
|
+
"impact_score": file.impact_score,
|
|
437
|
+
"complexity": file.complexity_score,
|
|
438
|
+
"lines": file.lines_of_code,
|
|
439
|
+
}
|
|
440
|
+
for file in high_impact_files[:5] # Top 5 high-impact files
|
|
441
|
+
],
|
|
442
|
+
}
|
|
443
|
+
except Exception as e:
|
|
444
|
+
print(f" [ProjectIndex] Could not load project data: {e}")
|
|
445
|
+
return {
|
|
446
|
+
"project_root": str(self.project_root),
|
|
447
|
+
"target_coverage": self.target_coverage,
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
async def _call_llm(
|
|
451
|
+
self,
|
|
452
|
+
agent: Agent,
|
|
453
|
+
task: Task,
|
|
454
|
+
context: dict,
|
|
455
|
+
) -> tuple[str, int, int, float]:
|
|
456
|
+
"""Call the LLM with agent/task configuration.
|
|
457
|
+
|
|
458
|
+
Returns: (response_text, input_tokens, output_tokens, cost)
|
|
459
|
+
"""
|
|
460
|
+
system_prompt = agent.get_system_prompt()
|
|
461
|
+
user_prompt = task.get_user_prompt(context)
|
|
462
|
+
|
|
463
|
+
if self._executor is None:
|
|
464
|
+
# Fallback: return mock response
|
|
465
|
+
return await self._mock_llm_call(agent, task)
|
|
466
|
+
|
|
467
|
+
try:
|
|
468
|
+
# Create execution context
|
|
469
|
+
exec_context = ExecutionContext(
|
|
470
|
+
task_type="test_generation",
|
|
471
|
+
workflow_name="test-coverage-boost",
|
|
472
|
+
step_name=agent.role,
|
|
473
|
+
)
|
|
474
|
+
|
|
475
|
+
# Execute with timeout using correct LLMExecutor API
|
|
476
|
+
result = await asyncio.wait_for(
|
|
477
|
+
self._executor.run(
|
|
478
|
+
task_type="test_generation",
|
|
479
|
+
prompt=user_prompt,
|
|
480
|
+
system=system_prompt,
|
|
481
|
+
context=exec_context,
|
|
482
|
+
),
|
|
483
|
+
timeout=120.0,
|
|
484
|
+
)
|
|
485
|
+
|
|
486
|
+
response = result.content
|
|
487
|
+
input_tokens = result.input_tokens
|
|
488
|
+
output_tokens = result.output_tokens
|
|
489
|
+
cost = result.cost
|
|
490
|
+
|
|
491
|
+
# Track totals
|
|
492
|
+
self._total_cost += cost
|
|
493
|
+
self._total_input_tokens += input_tokens
|
|
494
|
+
self._total_output_tokens += output_tokens
|
|
495
|
+
|
|
496
|
+
return (response, input_tokens, output_tokens, cost)
|
|
497
|
+
|
|
498
|
+
except asyncio.TimeoutError:
|
|
499
|
+
print(f" [LLM] Timeout calling {agent.role}")
|
|
500
|
+
return await self._mock_llm_call(agent, task, reason="Timeout")
|
|
501
|
+
except Exception as e:
|
|
502
|
+
print(f" [LLM] Error calling {agent.role}: {e}")
|
|
503
|
+
return await self._mock_llm_call(agent, task, reason=str(e))
|
|
504
|
+
|
|
505
|
+
async def _mock_llm_call(
|
|
506
|
+
self,
|
|
507
|
+
agent: Agent,
|
|
508
|
+
task: Task,
|
|
509
|
+
reason: str = "Executor not available",
|
|
510
|
+
) -> tuple[str, int, int, float]:
|
|
511
|
+
"""Return mock response when LLM is unavailable."""
|
|
512
|
+
print(f" [LLM] Using mock response for {agent.role}: {reason}")
|
|
513
|
+
|
|
514
|
+
# Simple mock responses based on agent role
|
|
515
|
+
if "Gap Analyzer" in agent.role:
|
|
516
|
+
response = json.dumps(
|
|
517
|
+
{
|
|
518
|
+
"gaps": [
|
|
519
|
+
{
|
|
520
|
+
"file_path": "src/core.py",
|
|
521
|
+
"function": "process_data",
|
|
522
|
+
"line_start": 10,
|
|
523
|
+
"line_end": 50,
|
|
524
|
+
"priority": 0.9,
|
|
525
|
+
"reason": "High complexity function with no tests",
|
|
526
|
+
}
|
|
527
|
+
],
|
|
528
|
+
"current_coverage": 65.0,
|
|
529
|
+
"summary": "Mock response - no real gaps analyzed",
|
|
530
|
+
}
|
|
531
|
+
)
|
|
532
|
+
elif "Test Generator" in agent.role:
|
|
533
|
+
response = json.dumps({"tests": [], "total_tests": 0, "estimated_coverage_gain": 0.0})
|
|
534
|
+
else: # Validator
|
|
535
|
+
response = json.dumps(
|
|
536
|
+
{
|
|
537
|
+
"tests_passing": 0,
|
|
538
|
+
"tests_failing": 0,
|
|
539
|
+
"final_coverage": 65.0,
|
|
540
|
+
"coverage_improvement": 0.0,
|
|
541
|
+
"issues": ["Mock response - no real validation performed"],
|
|
542
|
+
"recommendations": [],
|
|
543
|
+
}
|
|
544
|
+
)
|
|
545
|
+
|
|
546
|
+
return (response, 0, 0, 0.0)
|
|
547
|
+
|
|
548
|
+
async def execute(
|
|
549
|
+
self,
|
|
550
|
+
project_root: str | None = None,
|
|
551
|
+
context: dict | None = None,
|
|
552
|
+
**kwargs, # Accept extra parameters from CLI
|
|
553
|
+
) -> TestCoverageBoostCrewResult:
|
|
554
|
+
"""Execute the test coverage boost crew.
|
|
555
|
+
|
|
556
|
+
Args:
|
|
557
|
+
project_root: Path to project root (overrides init value)
|
|
558
|
+
context: Additional context for agents
|
|
559
|
+
**kwargs: Additional arguments (e.g., target_coverage passed by CLI)
|
|
560
|
+
|
|
561
|
+
Returns:
|
|
562
|
+
TestCoverageBoostCrewResult with detailed outcomes
|
|
563
|
+
"""
|
|
564
|
+
if project_root:
|
|
565
|
+
self.project_root = Path(project_root).resolve()
|
|
566
|
+
|
|
567
|
+
# Merge kwargs into context for CLI compatibility
|
|
568
|
+
context = context or {}
|
|
569
|
+
context.update(kwargs)
|
|
570
|
+
|
|
571
|
+
started_at = datetime.now()
|
|
572
|
+
|
|
573
|
+
print("\n" + "=" * 70)
|
|
574
|
+
print(" TEST COVERAGE BOOST CREW")
|
|
575
|
+
print("=" * 70)
|
|
576
|
+
print(f"\n Project Root: {self.project_root}")
|
|
577
|
+
print(f" Target Coverage: {self.target_coverage}%")
|
|
578
|
+
print(f" Agents: {len(self.agents)} (sequential execution)")
|
|
579
|
+
print("")
|
|
580
|
+
|
|
581
|
+
# Initialize executor
|
|
582
|
+
self._initialize_executor()
|
|
583
|
+
|
|
584
|
+
# Get project context
|
|
585
|
+
agent_context = self._get_project_context()
|
|
586
|
+
agent_context.update(context)
|
|
587
|
+
|
|
588
|
+
# Define tasks
|
|
589
|
+
tasks = self.define_tasks()
|
|
590
|
+
|
|
591
|
+
# Execute agents sequentially, passing results forward
|
|
592
|
+
print(" 🚀 Executing agents sequentially...\n")
|
|
593
|
+
|
|
594
|
+
# Agent 1: Gap Analyzer
|
|
595
|
+
print(f" • {self.agents[0].role}")
|
|
596
|
+
gap_response, _, _, _ = await self._call_llm(self.agents[0], tasks[0], agent_context)
|
|
597
|
+
gap_data = self._parse_gap_analysis(gap_response)
|
|
598
|
+
agent_context["gaps"] = gap_data.get("gaps", [])
|
|
599
|
+
agent_context["current_coverage"] = gap_data.get("current_coverage", 0.0)
|
|
600
|
+
|
|
601
|
+
# Agent 2: Test Generator
|
|
602
|
+
print(f" • {self.agents[1].role}")
|
|
603
|
+
gen_response, _, _, _ = await self._call_llm(self.agents[1], tasks[1], agent_context)
|
|
604
|
+
test_data = self._parse_test_generation(gen_response)
|
|
605
|
+
agent_context["generated_tests"] = test_data.get("tests", [])
|
|
606
|
+
|
|
607
|
+
# Agent 3: Test Validator
|
|
608
|
+
print(f" • {self.agents[2].role}")
|
|
609
|
+
val_response, _, _, _ = await self._call_llm(self.agents[2], tasks[2], agent_context)
|
|
610
|
+
validation_data = self._parse_validation(val_response)
|
|
611
|
+
|
|
612
|
+
print("\n ✓ All agents completed\n")
|
|
613
|
+
|
|
614
|
+
# Build result
|
|
615
|
+
current_coverage = gap_data.get("current_coverage", 0.0)
|
|
616
|
+
final_coverage = validation_data.get("final_coverage", current_coverage)
|
|
617
|
+
coverage_improvement = final_coverage - current_coverage
|
|
618
|
+
|
|
619
|
+
# Parse gaps into CoverageGap objects
|
|
620
|
+
gaps_analyzed = []
|
|
621
|
+
for gap in gap_data.get("gaps", [])[:5]: # Top 5 gaps
|
|
622
|
+
gaps_analyzed.append(
|
|
623
|
+
CoverageGap(
|
|
624
|
+
file_path=gap.get("file_path", "unknown"),
|
|
625
|
+
function_name=gap.get("function", "unknown"),
|
|
626
|
+
line_start=gap.get("line_start", 0),
|
|
627
|
+
line_end=gap.get("line_end", 0),
|
|
628
|
+
priority=gap.get("priority", 0.5),
|
|
629
|
+
reason=gap.get("reason", "No reason provided"),
|
|
630
|
+
)
|
|
631
|
+
)
|
|
632
|
+
|
|
633
|
+
# Parse generated tests into GeneratedTest objects
|
|
634
|
+
generated_tests = []
|
|
635
|
+
for test in test_data.get("tests", []):
|
|
636
|
+
generated_tests.append(
|
|
637
|
+
GeneratedTest(
|
|
638
|
+
test_name=test.get("test_name", "test_unknown"),
|
|
639
|
+
test_code=test.get("test_code", ""),
|
|
640
|
+
target_function=test.get("target_function", "unknown"),
|
|
641
|
+
target_file=test.get("target_file", "unknown"),
|
|
642
|
+
coverage_impact=test.get("coverage_impact", 0.0),
|
|
643
|
+
)
|
|
644
|
+
)
|
|
645
|
+
|
|
646
|
+
# Calculate duration
|
|
647
|
+
duration_ms = int((datetime.now() - started_at).total_seconds() * 1000)
|
|
648
|
+
|
|
649
|
+
result = TestCoverageBoostCrewResult(
|
|
650
|
+
success=True,
|
|
651
|
+
current_coverage=current_coverage,
|
|
652
|
+
target_coverage=self.target_coverage,
|
|
653
|
+
final_coverage=final_coverage,
|
|
654
|
+
coverage_improvement=coverage_improvement,
|
|
655
|
+
gaps_found=len(gap_data.get("gaps", [])),
|
|
656
|
+
tests_generated=test_data.get("total_tests", 0),
|
|
657
|
+
tests_passing=validation_data.get("tests_passing", 0),
|
|
658
|
+
gaps_analyzed=gaps_analyzed,
|
|
659
|
+
generated_tests=generated_tests,
|
|
660
|
+
agents_executed=len(self.agents),
|
|
661
|
+
cost=self._total_cost,
|
|
662
|
+
duration_ms=duration_ms,
|
|
663
|
+
)
|
|
664
|
+
|
|
665
|
+
# Print formatted report
|
|
666
|
+
print(self._format_report(result))
|
|
667
|
+
|
|
668
|
+
return result
|
|
669
|
+
|
|
670
|
+
def _parse_gap_analysis(self, response: str) -> dict:
|
|
671
|
+
"""Parse gap analysis response."""
|
|
672
|
+
parsed = parse_xml_response(response)
|
|
673
|
+
answer = parsed["answer"]
|
|
674
|
+
|
|
675
|
+
# Clean up answer - strip ALL XML tags and code blocks
|
|
676
|
+
answer = re.sub(r"</?answer>", "", answer) # Remove all <answer> and </answer> tags
|
|
677
|
+
answer = re.sub(r"```json\s*", "", answer) # Remove ```json
|
|
678
|
+
answer = re.sub(r"```\s*", "", answer) # Remove closing ```
|
|
679
|
+
answer = answer.strip()
|
|
680
|
+
|
|
681
|
+
try:
|
|
682
|
+
data = json.loads(answer)
|
|
683
|
+
return data
|
|
684
|
+
except json.JSONDecodeError:
|
|
685
|
+
# Try regex extraction
|
|
686
|
+
gaps = []
|
|
687
|
+
current_coverage = 0.0
|
|
688
|
+
|
|
689
|
+
# Extract coverage
|
|
690
|
+
cov_match = re.search(r'"current_coverage"\s*:\s*(\d+\.?\d*)', answer)
|
|
691
|
+
if cov_match:
|
|
692
|
+
current_coverage = float(cov_match.group(1))
|
|
693
|
+
|
|
694
|
+
return {
|
|
695
|
+
"gaps": gaps,
|
|
696
|
+
"current_coverage": current_coverage,
|
|
697
|
+
"summary": "Could not parse gap analysis",
|
|
698
|
+
}
|
|
699
|
+
|
|
700
|
+
def _parse_test_generation(self, response: str) -> dict:
|
|
701
|
+
"""Parse test generation response."""
|
|
702
|
+
parsed = parse_xml_response(response)
|
|
703
|
+
answer = parsed["answer"]
|
|
704
|
+
|
|
705
|
+
# Clean up answer - strip ALL XML tags and code blocks
|
|
706
|
+
answer = re.sub(r"</?answer>", "", answer) # Remove all <answer> and </answer> tags
|
|
707
|
+
answer = re.sub(r"```json\s*", "", answer) # Remove ```json
|
|
708
|
+
answer = re.sub(r"```\s*", "", answer) # Remove closing ```
|
|
709
|
+
answer = answer.strip()
|
|
710
|
+
|
|
711
|
+
try:
|
|
712
|
+
data = json.loads(answer)
|
|
713
|
+
return data
|
|
714
|
+
except json.JSONDecodeError:
|
|
715
|
+
# JSON parsing failed - attempt regex extraction
|
|
716
|
+
tests = []
|
|
717
|
+
|
|
718
|
+
# Pattern to extract test objects (handles malformed JSON)
|
|
719
|
+
# More lenient pattern - looks for key fields in any order
|
|
720
|
+
test_blocks = re.finditer(
|
|
721
|
+
r'\{[^}]*"test_name"\s*:\s*"([^"]+)"[^}]*\}', answer, re.DOTALL
|
|
722
|
+
)
|
|
723
|
+
|
|
724
|
+
for match in test_blocks:
|
|
725
|
+
block_text = match.group(0)
|
|
726
|
+
|
|
727
|
+
# Extract test_name
|
|
728
|
+
test_name_match = re.search(r'"test_name"\s*:\s*"([^"]+)"', block_text)
|
|
729
|
+
test_name = test_name_match.group(1) if test_name_match else "test_unknown"
|
|
730
|
+
|
|
731
|
+
# Extract test_code (handles escaped quotes)
|
|
732
|
+
test_code_match = re.search(
|
|
733
|
+
r'"test_code"\s*:\s*"((?:[^"\\]|\\.)*?)"', block_text, re.DOTALL
|
|
734
|
+
)
|
|
735
|
+
test_code = test_code_match.group(1) if test_code_match else ""
|
|
736
|
+
|
|
737
|
+
# Extract target_function
|
|
738
|
+
func_match = re.search(r'"target_function"\s*:\s*"([^"]*)"', block_text)
|
|
739
|
+
target_function = func_match.group(1) if func_match else "unknown"
|
|
740
|
+
|
|
741
|
+
# Extract target_file
|
|
742
|
+
file_match = re.search(r'"target_file"\s*:\s*"([^"]*)"', block_text)
|
|
743
|
+
target_file = file_match.group(1) if file_match else "unknown"
|
|
744
|
+
|
|
745
|
+
# Unescape the test code
|
|
746
|
+
test_code = test_code.replace("\\n", "\n").replace('\\"', '"').replace("\\\\", "\\")
|
|
747
|
+
|
|
748
|
+
tests.append(
|
|
749
|
+
{
|
|
750
|
+
"test_name": test_name,
|
|
751
|
+
"test_code": test_code,
|
|
752
|
+
"target_function": target_function,
|
|
753
|
+
"target_file": target_file,
|
|
754
|
+
"coverage_impact": 0.0,
|
|
755
|
+
}
|
|
756
|
+
)
|
|
757
|
+
|
|
758
|
+
# Try to extract total_tests
|
|
759
|
+
total_tests = len(tests)
|
|
760
|
+
total_match = re.search(r'"total_tests"\s*:\s*(\d+)', answer)
|
|
761
|
+
if total_match:
|
|
762
|
+
total_tests = max(total_tests, int(total_match.group(1)))
|
|
763
|
+
|
|
764
|
+
# Extract estimated coverage gain
|
|
765
|
+
coverage_gain = 0.0
|
|
766
|
+
gain_match = re.search(r'"estimated_coverage_gain"\s*:\s*(\d+\.?\d*)', answer)
|
|
767
|
+
if gain_match:
|
|
768
|
+
coverage_gain = float(gain_match.group(1))
|
|
769
|
+
|
|
770
|
+
return {
|
|
771
|
+
"tests": tests,
|
|
772
|
+
"total_tests": total_tests,
|
|
773
|
+
"estimated_coverage_gain": coverage_gain,
|
|
774
|
+
}
|
|
775
|
+
|
|
776
|
+
def _parse_validation(self, response: str) -> dict:
|
|
777
|
+
"""Parse validation response."""
|
|
778
|
+
parsed = parse_xml_response(response)
|
|
779
|
+
answer = parsed["answer"]
|
|
780
|
+
|
|
781
|
+
# Clean up answer - strip ALL XML tags and code blocks
|
|
782
|
+
answer = re.sub(r"</?answer>", "", answer) # Remove all <answer> and </answer> tags
|
|
783
|
+
answer = re.sub(r"```json\s*", "", answer) # Remove ```json
|
|
784
|
+
answer = re.sub(r"```\s*", "", answer) # Remove closing ```
|
|
785
|
+
answer = answer.strip()
|
|
786
|
+
|
|
787
|
+
try:
|
|
788
|
+
data = json.loads(answer)
|
|
789
|
+
return data
|
|
790
|
+
except json.JSONDecodeError:
|
|
791
|
+
return {
|
|
792
|
+
"tests_passing": 0,
|
|
793
|
+
"tests_failing": 0,
|
|
794
|
+
"final_coverage": 0.0,
|
|
795
|
+
"coverage_improvement": 0.0,
|
|
796
|
+
"issues": ["Could not parse validation results"],
|
|
797
|
+
"recommendations": [],
|
|
798
|
+
}
|
|
799
|
+
|
|
800
|
+
def _format_report(self, result: TestCoverageBoostCrewResult) -> str:
|
|
801
|
+
"""Format result as human-readable report."""
|
|
802
|
+
lines = []
|
|
803
|
+
|
|
804
|
+
lines.append("=" * 70)
|
|
805
|
+
lines.append("TEST COVERAGE BOOST RESULTS")
|
|
806
|
+
lines.append("=" * 70)
|
|
807
|
+
lines.append("")
|
|
808
|
+
lines.append(f"Current Coverage: {result.current_coverage:.1f}%")
|
|
809
|
+
lines.append(f"Target Coverage: {result.target_coverage:.1f}%")
|
|
810
|
+
lines.append(f"Final Coverage: {result.final_coverage:.1f}%")
|
|
811
|
+
lines.append(f"Improvement: +{result.coverage_improvement:.1f}%")
|
|
812
|
+
lines.append("")
|
|
813
|
+
lines.append(f"Gaps Found: {result.gaps_found}")
|
|
814
|
+
lines.append(f"Tests Generated: {result.tests_generated}")
|
|
815
|
+
lines.append(f"Tests Passing: {result.tests_passing}")
|
|
816
|
+
lines.append("")
|
|
817
|
+
lines.append(f"Cost: ${result.cost:.4f}")
|
|
818
|
+
lines.append(f"Duration: {result.duration_ms}ms ({result.duration_ms / 1000:.1f}s)")
|
|
819
|
+
lines.append("")
|
|
820
|
+
|
|
821
|
+
if result.gaps_analyzed:
|
|
822
|
+
lines.append("-" * 70)
|
|
823
|
+
lines.append("TOP COVERAGE GAPS")
|
|
824
|
+
lines.append("-" * 70)
|
|
825
|
+
for i, gap in enumerate(result.gaps_analyzed[:5], 1):
|
|
826
|
+
lines.append(
|
|
827
|
+
f"{i}. {gap.file_path}::{gap.function_name} (priority: {gap.priority:.2f})"
|
|
828
|
+
)
|
|
829
|
+
lines.append(f" {gap.reason}")
|
|
830
|
+
lines.append("")
|
|
831
|
+
|
|
832
|
+
if result.generated_tests:
|
|
833
|
+
lines.append("-" * 70)
|
|
834
|
+
lines.append("GENERATED TESTS")
|
|
835
|
+
lines.append("-" * 70)
|
|
836
|
+
for i, test in enumerate(result.generated_tests[:3], 1):
|
|
837
|
+
lines.append(f"{i}. {test.test_name}")
|
|
838
|
+
lines.append(f" Target: {test.target_file}::{test.target_function}")
|
|
839
|
+
lines.append(f" Impact: +{test.coverage_impact:.1f}%")
|
|
840
|
+
lines.append("")
|
|
841
|
+
|
|
842
|
+
lines.append("=" * 70)
|
|
843
|
+
if result.coverage_improvement > 0:
|
|
844
|
+
lines.append(f"✅ Coverage improved by {result.coverage_improvement:.1f}%")
|
|
845
|
+
else:
|
|
846
|
+
lines.append("⚠️ No coverage improvement achieved")
|
|
847
|
+
lines.append("=" * 70)
|
|
848
|
+
|
|
849
|
+
return "\n".join(lines)
|