attune-ai 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/__init__.py +358 -0
- attune/adaptive/__init__.py +13 -0
- attune/adaptive/task_complexity.py +127 -0
- attune/agent_monitoring.py +414 -0
- attune/cache/__init__.py +117 -0
- attune/cache/base.py +166 -0
- attune/cache/dependency_manager.py +256 -0
- attune/cache/hash_only.py +251 -0
- attune/cache/hybrid.py +457 -0
- attune/cache/storage.py +285 -0
- attune/cache_monitor.py +356 -0
- attune/cache_stats.py +298 -0
- attune/cli/__init__.py +152 -0
- attune/cli/__main__.py +12 -0
- attune/cli/commands/__init__.py +1 -0
- attune/cli/commands/batch.py +264 -0
- attune/cli/commands/cache.py +248 -0
- attune/cli/commands/help.py +331 -0
- attune/cli/commands/info.py +140 -0
- attune/cli/commands/inspect.py +436 -0
- attune/cli/commands/inspection.py +57 -0
- attune/cli/commands/memory.py +48 -0
- attune/cli/commands/metrics.py +92 -0
- attune/cli/commands/orchestrate.py +184 -0
- attune/cli/commands/patterns.py +207 -0
- attune/cli/commands/profiling.py +202 -0
- attune/cli/commands/provider.py +98 -0
- attune/cli/commands/routing.py +285 -0
- attune/cli/commands/setup.py +96 -0
- attune/cli/commands/status.py +235 -0
- attune/cli/commands/sync.py +166 -0
- attune/cli/commands/tier.py +121 -0
- attune/cli/commands/utilities.py +114 -0
- attune/cli/commands/workflow.py +579 -0
- attune/cli/core.py +32 -0
- attune/cli/parsers/__init__.py +68 -0
- attune/cli/parsers/batch.py +118 -0
- attune/cli/parsers/cache.py +65 -0
- attune/cli/parsers/help.py +41 -0
- attune/cli/parsers/info.py +26 -0
- attune/cli/parsers/inspect.py +66 -0
- attune/cli/parsers/metrics.py +42 -0
- attune/cli/parsers/orchestrate.py +61 -0
- attune/cli/parsers/patterns.py +54 -0
- attune/cli/parsers/provider.py +40 -0
- attune/cli/parsers/routing.py +110 -0
- attune/cli/parsers/setup.py +42 -0
- attune/cli/parsers/status.py +47 -0
- attune/cli/parsers/sync.py +31 -0
- attune/cli/parsers/tier.py +33 -0
- attune/cli/parsers/workflow.py +77 -0
- attune/cli/utils/__init__.py +1 -0
- attune/cli/utils/data.py +242 -0
- attune/cli/utils/helpers.py +68 -0
- attune/cli_legacy.py +3957 -0
- attune/cli_minimal.py +1159 -0
- attune/cli_router.py +437 -0
- attune/cli_unified.py +814 -0
- attune/config/__init__.py +66 -0
- attune/config/xml_config.py +286 -0
- attune/config.py +545 -0
- attune/coordination.py +870 -0
- attune/core.py +1511 -0
- attune/core_modules/__init__.py +15 -0
- attune/cost_tracker.py +626 -0
- attune/dashboard/__init__.py +41 -0
- attune/dashboard/app.py +512 -0
- attune/dashboard/simple_server.py +435 -0
- attune/dashboard/standalone_server.py +547 -0
- attune/discovery.py +306 -0
- attune/emergence.py +306 -0
- attune/exceptions.py +123 -0
- attune/feedback_loops.py +373 -0
- attune/hot_reload/README.md +473 -0
- attune/hot_reload/__init__.py +62 -0
- attune/hot_reload/config.py +83 -0
- attune/hot_reload/integration.py +229 -0
- attune/hot_reload/reloader.py +298 -0
- attune/hot_reload/watcher.py +183 -0
- attune/hot_reload/websocket.py +177 -0
- attune/levels.py +577 -0
- attune/leverage_points.py +441 -0
- attune/logging_config.py +261 -0
- attune/mcp/__init__.py +10 -0
- attune/mcp/server.py +506 -0
- attune/memory/__init__.py +237 -0
- attune/memory/claude_memory.py +469 -0
- attune/memory/config.py +224 -0
- attune/memory/control_panel.py +1290 -0
- attune/memory/control_panel_support.py +145 -0
- attune/memory/cross_session.py +845 -0
- attune/memory/edges.py +179 -0
- attune/memory/encryption.py +159 -0
- attune/memory/file_session.py +770 -0
- attune/memory/graph.py +570 -0
- attune/memory/long_term.py +913 -0
- attune/memory/long_term_types.py +99 -0
- attune/memory/mixins/__init__.py +25 -0
- attune/memory/mixins/backend_init_mixin.py +249 -0
- attune/memory/mixins/capabilities_mixin.py +208 -0
- attune/memory/mixins/handoff_mixin.py +208 -0
- attune/memory/mixins/lifecycle_mixin.py +49 -0
- attune/memory/mixins/long_term_mixin.py +352 -0
- attune/memory/mixins/promotion_mixin.py +109 -0
- attune/memory/mixins/short_term_mixin.py +182 -0
- attune/memory/nodes.py +179 -0
- attune/memory/redis_bootstrap.py +540 -0
- attune/memory/security/__init__.py +31 -0
- attune/memory/security/audit_logger.py +932 -0
- attune/memory/security/pii_scrubber.py +640 -0
- attune/memory/security/secrets_detector.py +678 -0
- attune/memory/short_term.py +2192 -0
- attune/memory/simple_storage.py +302 -0
- attune/memory/storage/__init__.py +15 -0
- attune/memory/storage_backend.py +167 -0
- attune/memory/summary_index.py +583 -0
- attune/memory/types.py +446 -0
- attune/memory/unified.py +182 -0
- attune/meta_workflows/__init__.py +74 -0
- attune/meta_workflows/agent_creator.py +248 -0
- attune/meta_workflows/builtin_templates.py +567 -0
- attune/meta_workflows/cli_commands/__init__.py +56 -0
- attune/meta_workflows/cli_commands/agent_commands.py +321 -0
- attune/meta_workflows/cli_commands/analytics_commands.py +442 -0
- attune/meta_workflows/cli_commands/config_commands.py +232 -0
- attune/meta_workflows/cli_commands/memory_commands.py +182 -0
- attune/meta_workflows/cli_commands/template_commands.py +354 -0
- attune/meta_workflows/cli_commands/workflow_commands.py +382 -0
- attune/meta_workflows/cli_meta_workflows.py +59 -0
- attune/meta_workflows/form_engine.py +292 -0
- attune/meta_workflows/intent_detector.py +409 -0
- attune/meta_workflows/models.py +569 -0
- attune/meta_workflows/pattern_learner.py +738 -0
- attune/meta_workflows/plan_generator.py +384 -0
- attune/meta_workflows/session_context.py +397 -0
- attune/meta_workflows/template_registry.py +229 -0
- attune/meta_workflows/workflow.py +984 -0
- attune/metrics/__init__.py +12 -0
- attune/metrics/collector.py +31 -0
- attune/metrics/prompt_metrics.py +194 -0
- attune/models/__init__.py +172 -0
- attune/models/__main__.py +13 -0
- attune/models/adaptive_routing.py +437 -0
- attune/models/auth_cli.py +444 -0
- attune/models/auth_strategy.py +450 -0
- attune/models/cli.py +655 -0
- attune/models/empathy_executor.py +354 -0
- attune/models/executor.py +257 -0
- attune/models/fallback.py +762 -0
- attune/models/provider_config.py +282 -0
- attune/models/registry.py +472 -0
- attune/models/tasks.py +359 -0
- attune/models/telemetry/__init__.py +71 -0
- attune/models/telemetry/analytics.py +594 -0
- attune/models/telemetry/backend.py +196 -0
- attune/models/telemetry/data_models.py +431 -0
- attune/models/telemetry/storage.py +489 -0
- attune/models/token_estimator.py +420 -0
- attune/models/validation.py +280 -0
- attune/monitoring/__init__.py +52 -0
- attune/monitoring/alerts.py +946 -0
- attune/monitoring/alerts_cli.py +448 -0
- attune/monitoring/multi_backend.py +271 -0
- attune/monitoring/otel_backend.py +362 -0
- attune/optimization/__init__.py +19 -0
- attune/optimization/context_optimizer.py +272 -0
- attune/orchestration/__init__.py +67 -0
- attune/orchestration/agent_templates.py +707 -0
- attune/orchestration/config_store.py +499 -0
- attune/orchestration/execution_strategies.py +2111 -0
- attune/orchestration/meta_orchestrator.py +1168 -0
- attune/orchestration/pattern_learner.py +696 -0
- attune/orchestration/real_tools.py +931 -0
- attune/pattern_cache.py +187 -0
- attune/pattern_library.py +542 -0
- attune/patterns/debugging/all_patterns.json +81 -0
- attune/patterns/debugging/workflow_20260107_1770825e.json +77 -0
- attune/patterns/refactoring_memory.json +89 -0
- attune/persistence.py +564 -0
- attune/platform_utils.py +265 -0
- attune/plugins/__init__.py +28 -0
- attune/plugins/base.py +361 -0
- attune/plugins/registry.py +268 -0
- attune/project_index/__init__.py +32 -0
- attune/project_index/cli.py +335 -0
- attune/project_index/index.py +667 -0
- attune/project_index/models.py +504 -0
- attune/project_index/reports.py +474 -0
- attune/project_index/scanner.py +777 -0
- attune/project_index/scanner_parallel.py +291 -0
- attune/prompts/__init__.py +61 -0
- attune/prompts/config.py +77 -0
- attune/prompts/context.py +177 -0
- attune/prompts/parser.py +285 -0
- attune/prompts/registry.py +313 -0
- attune/prompts/templates.py +208 -0
- attune/redis_config.py +302 -0
- attune/redis_memory.py +799 -0
- attune/resilience/__init__.py +56 -0
- attune/resilience/circuit_breaker.py +256 -0
- attune/resilience/fallback.py +179 -0
- attune/resilience/health.py +300 -0
- attune/resilience/retry.py +209 -0
- attune/resilience/timeout.py +135 -0
- attune/routing/__init__.py +43 -0
- attune/routing/chain_executor.py +433 -0
- attune/routing/classifier.py +217 -0
- attune/routing/smart_router.py +234 -0
- attune/routing/workflow_registry.py +343 -0
- attune/scaffolding/README.md +589 -0
- attune/scaffolding/__init__.py +35 -0
- attune/scaffolding/__main__.py +14 -0
- attune/scaffolding/cli.py +240 -0
- attune/scaffolding/templates/base_wizard.py.jinja2 +121 -0
- attune/scaffolding/templates/coach_wizard.py.jinja2 +321 -0
- attune/scaffolding/templates/domain_wizard.py.jinja2 +408 -0
- attune/scaffolding/templates/linear_flow_wizard.py.jinja2 +203 -0
- attune/socratic/__init__.py +256 -0
- attune/socratic/ab_testing.py +958 -0
- attune/socratic/blueprint.py +533 -0
- attune/socratic/cli.py +703 -0
- attune/socratic/collaboration.py +1114 -0
- attune/socratic/domain_templates.py +924 -0
- attune/socratic/embeddings.py +738 -0
- attune/socratic/engine.py +794 -0
- attune/socratic/explainer.py +682 -0
- attune/socratic/feedback.py +772 -0
- attune/socratic/forms.py +629 -0
- attune/socratic/generator.py +732 -0
- attune/socratic/llm_analyzer.py +637 -0
- attune/socratic/mcp_server.py +702 -0
- attune/socratic/session.py +312 -0
- attune/socratic/storage.py +667 -0
- attune/socratic/success.py +730 -0
- attune/socratic/visual_editor.py +860 -0
- attune/socratic/web_ui.py +958 -0
- attune/telemetry/__init__.py +39 -0
- attune/telemetry/agent_coordination.py +475 -0
- attune/telemetry/agent_tracking.py +367 -0
- attune/telemetry/approval_gates.py +545 -0
- attune/telemetry/cli.py +1231 -0
- attune/telemetry/commands/__init__.py +14 -0
- attune/telemetry/commands/dashboard_commands.py +696 -0
- attune/telemetry/event_streaming.py +409 -0
- attune/telemetry/feedback_loop.py +567 -0
- attune/telemetry/usage_tracker.py +591 -0
- attune/templates.py +754 -0
- attune/test_generator/__init__.py +38 -0
- attune/test_generator/__main__.py +14 -0
- attune/test_generator/cli.py +234 -0
- attune/test_generator/generator.py +355 -0
- attune/test_generator/risk_analyzer.py +216 -0
- attune/test_generator/templates/unit_test.py.jinja2 +272 -0
- attune/tier_recommender.py +384 -0
- attune/tools.py +183 -0
- attune/trust/__init__.py +28 -0
- attune/trust/circuit_breaker.py +579 -0
- attune/trust_building.py +527 -0
- attune/validation/__init__.py +19 -0
- attune/validation/xml_validator.py +281 -0
- attune/vscode_bridge.py +173 -0
- attune/workflow_commands.py +780 -0
- attune/workflow_patterns/__init__.py +33 -0
- attune/workflow_patterns/behavior.py +249 -0
- attune/workflow_patterns/core.py +76 -0
- attune/workflow_patterns/output.py +99 -0
- attune/workflow_patterns/registry.py +255 -0
- attune/workflow_patterns/structural.py +288 -0
- attune/workflows/__init__.py +539 -0
- attune/workflows/autonomous_test_gen.py +1268 -0
- attune/workflows/base.py +2667 -0
- attune/workflows/batch_processing.py +342 -0
- attune/workflows/bug_predict.py +1084 -0
- attune/workflows/builder.py +273 -0
- attune/workflows/caching.py +253 -0
- attune/workflows/code_review.py +1048 -0
- attune/workflows/code_review_adapters.py +312 -0
- attune/workflows/code_review_pipeline.py +722 -0
- attune/workflows/config.py +645 -0
- attune/workflows/dependency_check.py +644 -0
- attune/workflows/document_gen/__init__.py +25 -0
- attune/workflows/document_gen/config.py +30 -0
- attune/workflows/document_gen/report_formatter.py +162 -0
- attune/workflows/document_gen/workflow.py +1426 -0
- attune/workflows/document_manager.py +216 -0
- attune/workflows/document_manager_README.md +134 -0
- attune/workflows/documentation_orchestrator.py +1205 -0
- attune/workflows/history.py +510 -0
- attune/workflows/keyboard_shortcuts/__init__.py +39 -0
- attune/workflows/keyboard_shortcuts/generators.py +391 -0
- attune/workflows/keyboard_shortcuts/parsers.py +416 -0
- attune/workflows/keyboard_shortcuts/prompts.py +295 -0
- attune/workflows/keyboard_shortcuts/schema.py +193 -0
- attune/workflows/keyboard_shortcuts/workflow.py +509 -0
- attune/workflows/llm_base.py +363 -0
- attune/workflows/manage_docs.py +87 -0
- attune/workflows/manage_docs_README.md +134 -0
- attune/workflows/manage_documentation.py +821 -0
- attune/workflows/new_sample_workflow1.py +149 -0
- attune/workflows/new_sample_workflow1_README.md +150 -0
- attune/workflows/orchestrated_health_check.py +849 -0
- attune/workflows/orchestrated_release_prep.py +600 -0
- attune/workflows/output.py +413 -0
- attune/workflows/perf_audit.py +863 -0
- attune/workflows/pr_review.py +762 -0
- attune/workflows/progress.py +785 -0
- attune/workflows/progress_server.py +322 -0
- attune/workflows/progressive/README 2.md +454 -0
- attune/workflows/progressive/README.md +454 -0
- attune/workflows/progressive/__init__.py +82 -0
- attune/workflows/progressive/cli.py +219 -0
- attune/workflows/progressive/core.py +488 -0
- attune/workflows/progressive/orchestrator.py +723 -0
- attune/workflows/progressive/reports.py +520 -0
- attune/workflows/progressive/telemetry.py +274 -0
- attune/workflows/progressive/test_gen.py +495 -0
- attune/workflows/progressive/workflow.py +589 -0
- attune/workflows/refactor_plan.py +694 -0
- attune/workflows/release_prep.py +895 -0
- attune/workflows/release_prep_crew.py +969 -0
- attune/workflows/research_synthesis.py +404 -0
- attune/workflows/routing.py +168 -0
- attune/workflows/secure_release.py +593 -0
- attune/workflows/security_adapters.py +297 -0
- attune/workflows/security_audit.py +1329 -0
- attune/workflows/security_audit_phase3.py +355 -0
- attune/workflows/seo_optimization.py +633 -0
- attune/workflows/step_config.py +234 -0
- attune/workflows/telemetry_mixin.py +269 -0
- attune/workflows/test5.py +125 -0
- attune/workflows/test5_README.md +158 -0
- attune/workflows/test_coverage_boost_crew.py +849 -0
- attune/workflows/test_gen/__init__.py +52 -0
- attune/workflows/test_gen/ast_analyzer.py +249 -0
- attune/workflows/test_gen/config.py +88 -0
- attune/workflows/test_gen/data_models.py +38 -0
- attune/workflows/test_gen/report_formatter.py +289 -0
- attune/workflows/test_gen/test_templates.py +381 -0
- attune/workflows/test_gen/workflow.py +655 -0
- attune/workflows/test_gen.py +54 -0
- attune/workflows/test_gen_behavioral.py +477 -0
- attune/workflows/test_gen_parallel.py +341 -0
- attune/workflows/test_lifecycle.py +526 -0
- attune/workflows/test_maintenance.py +627 -0
- attune/workflows/test_maintenance_cli.py +590 -0
- attune/workflows/test_maintenance_crew.py +840 -0
- attune/workflows/test_runner.py +622 -0
- attune/workflows/tier_tracking.py +531 -0
- attune/workflows/xml_enhanced_crew.py +285 -0
- attune_ai-2.0.0.dist-info/METADATA +1026 -0
- attune_ai-2.0.0.dist-info/RECORD +457 -0
- attune_ai-2.0.0.dist-info/WHEEL +5 -0
- attune_ai-2.0.0.dist-info/entry_points.txt +26 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE +201 -0
- attune_ai-2.0.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +101 -0
- attune_ai-2.0.0.dist-info/top_level.txt +5 -0
- attune_healthcare/__init__.py +13 -0
- attune_healthcare/monitors/__init__.py +9 -0
- attune_healthcare/monitors/clinical_protocol_monitor.py +315 -0
- attune_healthcare/monitors/monitoring/__init__.py +44 -0
- attune_healthcare/monitors/monitoring/protocol_checker.py +300 -0
- attune_healthcare/monitors/monitoring/protocol_loader.py +214 -0
- attune_healthcare/monitors/monitoring/sensor_parsers.py +306 -0
- attune_healthcare/monitors/monitoring/trajectory_analyzer.py +389 -0
- attune_llm/README.md +553 -0
- attune_llm/__init__.py +28 -0
- attune_llm/agent_factory/__init__.py +53 -0
- attune_llm/agent_factory/adapters/__init__.py +85 -0
- attune_llm/agent_factory/adapters/autogen_adapter.py +312 -0
- attune_llm/agent_factory/adapters/crewai_adapter.py +483 -0
- attune_llm/agent_factory/adapters/haystack_adapter.py +298 -0
- attune_llm/agent_factory/adapters/langchain_adapter.py +362 -0
- attune_llm/agent_factory/adapters/langgraph_adapter.py +333 -0
- attune_llm/agent_factory/adapters/native.py +228 -0
- attune_llm/agent_factory/adapters/wizard_adapter.py +423 -0
- attune_llm/agent_factory/base.py +305 -0
- attune_llm/agent_factory/crews/__init__.py +67 -0
- attune_llm/agent_factory/crews/code_review.py +1113 -0
- attune_llm/agent_factory/crews/health_check.py +1262 -0
- attune_llm/agent_factory/crews/refactoring.py +1128 -0
- attune_llm/agent_factory/crews/security_audit.py +1018 -0
- attune_llm/agent_factory/decorators.py +287 -0
- attune_llm/agent_factory/factory.py +558 -0
- attune_llm/agent_factory/framework.py +193 -0
- attune_llm/agent_factory/memory_integration.py +328 -0
- attune_llm/agent_factory/resilient.py +320 -0
- attune_llm/agents_md/__init__.py +22 -0
- attune_llm/agents_md/loader.py +218 -0
- attune_llm/agents_md/parser.py +271 -0
- attune_llm/agents_md/registry.py +307 -0
- attune_llm/claude_memory.py +466 -0
- attune_llm/cli/__init__.py +8 -0
- attune_llm/cli/sync_claude.py +487 -0
- attune_llm/code_health.py +1313 -0
- attune_llm/commands/__init__.py +51 -0
- attune_llm/commands/context.py +375 -0
- attune_llm/commands/loader.py +301 -0
- attune_llm/commands/models.py +231 -0
- attune_llm/commands/parser.py +371 -0
- attune_llm/commands/registry.py +429 -0
- attune_llm/config/__init__.py +29 -0
- attune_llm/config/unified.py +291 -0
- attune_llm/context/__init__.py +22 -0
- attune_llm/context/compaction.py +455 -0
- attune_llm/context/manager.py +434 -0
- attune_llm/contextual_patterns.py +361 -0
- attune_llm/core.py +907 -0
- attune_llm/git_pattern_extractor.py +435 -0
- attune_llm/hooks/__init__.py +24 -0
- attune_llm/hooks/config.py +306 -0
- attune_llm/hooks/executor.py +289 -0
- attune_llm/hooks/registry.py +302 -0
- attune_llm/hooks/scripts/__init__.py +39 -0
- attune_llm/hooks/scripts/evaluate_session.py +201 -0
- attune_llm/hooks/scripts/first_time_init.py +285 -0
- attune_llm/hooks/scripts/pre_compact.py +207 -0
- attune_llm/hooks/scripts/session_end.py +183 -0
- attune_llm/hooks/scripts/session_start.py +163 -0
- attune_llm/hooks/scripts/suggest_compact.py +225 -0
- attune_llm/learning/__init__.py +30 -0
- attune_llm/learning/evaluator.py +438 -0
- attune_llm/learning/extractor.py +514 -0
- attune_llm/learning/storage.py +560 -0
- attune_llm/levels.py +227 -0
- attune_llm/pattern_confidence.py +414 -0
- attune_llm/pattern_resolver.py +272 -0
- attune_llm/pattern_summary.py +350 -0
- attune_llm/providers.py +967 -0
- attune_llm/routing/__init__.py +32 -0
- attune_llm/routing/model_router.py +362 -0
- attune_llm/security/IMPLEMENTATION_SUMMARY.md +413 -0
- attune_llm/security/PHASE2_COMPLETE.md +384 -0
- attune_llm/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
- attune_llm/security/QUICK_REFERENCE.md +316 -0
- attune_llm/security/README.md +262 -0
- attune_llm/security/__init__.py +62 -0
- attune_llm/security/audit_logger.py +929 -0
- attune_llm/security/audit_logger_example.py +152 -0
- attune_llm/security/pii_scrubber.py +640 -0
- attune_llm/security/secrets_detector.py +678 -0
- attune_llm/security/secrets_detector_example.py +304 -0
- attune_llm/security/secure_memdocs.py +1192 -0
- attune_llm/security/secure_memdocs_example.py +278 -0
- attune_llm/session_status.py +745 -0
- attune_llm/state.py +246 -0
- attune_llm/utils/__init__.py +5 -0
- attune_llm/utils/tokens.py +349 -0
- attune_software/SOFTWARE_PLUGIN_README.md +57 -0
- attune_software/__init__.py +13 -0
- attune_software/cli/__init__.py +120 -0
- attune_software/cli/inspect.py +362 -0
- attune_software/cli.py +574 -0
- attune_software/plugin.py +188 -0
- workflow_scaffolding/__init__.py +11 -0
- workflow_scaffolding/__main__.py +12 -0
- workflow_scaffolding/cli.py +206 -0
- workflow_scaffolding/generator.py +265 -0
|
@@ -0,0 +1,931 @@
|
|
|
1
|
+
"""Real tool implementations for meta-orchestration agents.
|
|
2
|
+
|
|
3
|
+
This module provides actual tool integrations for agents to interact with
|
|
4
|
+
real systems instead of returning mock data.
|
|
5
|
+
|
|
6
|
+
Security:
|
|
7
|
+
- All file operations validated with _validate_file_path()
|
|
8
|
+
- Subprocess calls sanitized
|
|
9
|
+
- Output size limited to prevent memory issues
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import json
|
|
13
|
+
import logging
|
|
14
|
+
import subprocess
|
|
15
|
+
from dataclasses import dataclass
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
from typing import Any
|
|
18
|
+
|
|
19
|
+
logger = logging.getLogger(__name__)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def _validate_file_path(path: str) -> Path:
|
|
23
|
+
"""Validate file path to prevent path traversal (simplified version).
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
path: File path to validate
|
|
27
|
+
|
|
28
|
+
Returns:
|
|
29
|
+
Validated Path object
|
|
30
|
+
|
|
31
|
+
Raises:
|
|
32
|
+
ValueError: If path is invalid
|
|
33
|
+
"""
|
|
34
|
+
if not path or not isinstance(path, str):
|
|
35
|
+
raise ValueError("path must be a non-empty string")
|
|
36
|
+
|
|
37
|
+
if "\x00" in path:
|
|
38
|
+
raise ValueError("path contains null bytes")
|
|
39
|
+
|
|
40
|
+
try:
|
|
41
|
+
resolved = Path(path).resolve()
|
|
42
|
+
except (OSError, RuntimeError) as e:
|
|
43
|
+
raise ValueError(f"Invalid path: {e}") from e
|
|
44
|
+
|
|
45
|
+
# Block system directories
|
|
46
|
+
dangerous_paths = ["/etc", "/sys", "/proc", "/dev"]
|
|
47
|
+
for dangerous in dangerous_paths:
|
|
48
|
+
if str(resolved).startswith(dangerous):
|
|
49
|
+
raise ValueError(f"Cannot write to system directory: {dangerous}")
|
|
50
|
+
|
|
51
|
+
return resolved
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
@dataclass
|
|
55
|
+
class CoverageReport:
|
|
56
|
+
"""Coverage analysis report from pytest-cov."""
|
|
57
|
+
|
|
58
|
+
total_coverage: float
|
|
59
|
+
files_analyzed: int
|
|
60
|
+
uncovered_files: list[dict[str, Any]]
|
|
61
|
+
missing_lines: dict[str, list[int]]
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class RealCoverageAnalyzer:
|
|
65
|
+
"""Runs real pytest coverage analysis."""
|
|
66
|
+
|
|
67
|
+
def __init__(self, project_root: str = "."):
|
|
68
|
+
"""Initialize coverage analyzer.
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
project_root: Project root directory
|
|
72
|
+
"""
|
|
73
|
+
self.project_root = Path(project_root).resolve()
|
|
74
|
+
|
|
75
|
+
def analyze(self, use_existing: bool = True) -> CoverageReport:
|
|
76
|
+
"""Run coverage analysis on all project packages.
|
|
77
|
+
|
|
78
|
+
Analyzes coverage for: empathy_os, empathy_llm_toolkit,
|
|
79
|
+
empathy_software_plugin, empathy_healthcare_plugin
|
|
80
|
+
|
|
81
|
+
Args:
|
|
82
|
+
use_existing: Use existing coverage.json if available (default: True)
|
|
83
|
+
|
|
84
|
+
Returns:
|
|
85
|
+
CoverageReport with results
|
|
86
|
+
|
|
87
|
+
Raises:
|
|
88
|
+
RuntimeError: If coverage analysis fails
|
|
89
|
+
"""
|
|
90
|
+
logger.info("Running coverage analysis on all packages")
|
|
91
|
+
|
|
92
|
+
coverage_file = self.project_root / "coverage.json"
|
|
93
|
+
|
|
94
|
+
# Check if we can use existing coverage data
|
|
95
|
+
if use_existing and coverage_file.exists():
|
|
96
|
+
import time
|
|
97
|
+
|
|
98
|
+
file_age = time.time() - coverage_file.stat().st_mtime
|
|
99
|
+
# Use existing file if less than 1 hour old
|
|
100
|
+
if file_age < 3600:
|
|
101
|
+
logger.info(f"Using existing coverage data (age: {file_age / 60:.1f} minutes)")
|
|
102
|
+
else:
|
|
103
|
+
logger.info("Existing coverage data is stale, regenerating")
|
|
104
|
+
use_existing = False
|
|
105
|
+
|
|
106
|
+
if not use_existing or not coverage_file.exists():
|
|
107
|
+
try:
|
|
108
|
+
# Run pytest with coverage on test suite
|
|
109
|
+
logger.info("Running test suite to generate coverage (may take 2-5 minutes)")
|
|
110
|
+
|
|
111
|
+
# Use actual package names (match pyproject.toml configuration)
|
|
112
|
+
cov_packages = [
|
|
113
|
+
"attune",
|
|
114
|
+
"empathy_llm_toolkit",
|
|
115
|
+
"empathy_software_plugin",
|
|
116
|
+
"empathy_healthcare_plugin",
|
|
117
|
+
]
|
|
118
|
+
|
|
119
|
+
cmd = [
|
|
120
|
+
"pytest",
|
|
121
|
+
"tests/", # Run all tests to measure coverage
|
|
122
|
+
"--cov-report=json",
|
|
123
|
+
"--cov-report=term-missing",
|
|
124
|
+
"-q",
|
|
125
|
+
"--tb=no",
|
|
126
|
+
"--maxfail=50", # Continue despite failures
|
|
127
|
+
]
|
|
128
|
+
|
|
129
|
+
# Add --cov for each package
|
|
130
|
+
for pkg in cov_packages:
|
|
131
|
+
cmd.append(f"--cov={pkg}")
|
|
132
|
+
|
|
133
|
+
_result = subprocess.run( # Result not needed, only coverage.json
|
|
134
|
+
cmd,
|
|
135
|
+
cwd=self.project_root,
|
|
136
|
+
capture_output=True,
|
|
137
|
+
text=True,
|
|
138
|
+
timeout=600, # Increased to 10 minutes
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
except subprocess.TimeoutExpired:
|
|
142
|
+
logger.warning("Coverage generation timed out, checking for partial results")
|
|
143
|
+
# Fall through to use whatever coverage.json exists
|
|
144
|
+
|
|
145
|
+
# Read coverage.json
|
|
146
|
+
if not coverage_file.exists():
|
|
147
|
+
raise RuntimeError(
|
|
148
|
+
"Coverage report not found. Run 'pytest --cov=src --cov-report=json' first."
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
try:
|
|
152
|
+
with coverage_file.open() as f:
|
|
153
|
+
coverage_data = json.load(f)
|
|
154
|
+
|
|
155
|
+
# Parse results
|
|
156
|
+
total_coverage = coverage_data["totals"]["percent_covered"]
|
|
157
|
+
files = coverage_data.get("files", {})
|
|
158
|
+
|
|
159
|
+
# Identify low coverage files
|
|
160
|
+
uncovered_files = []
|
|
161
|
+
missing_lines = {}
|
|
162
|
+
|
|
163
|
+
for filepath, file_data in files.items():
|
|
164
|
+
file_coverage = file_data["summary"]["percent_covered"]
|
|
165
|
+
if file_coverage < 80: # Below target
|
|
166
|
+
uncovered_files.append(
|
|
167
|
+
{
|
|
168
|
+
"path": filepath,
|
|
169
|
+
"coverage": file_coverage,
|
|
170
|
+
"missing_lines": file_data["missing_lines"],
|
|
171
|
+
}
|
|
172
|
+
)
|
|
173
|
+
missing_lines[filepath] = file_data["missing_lines"]
|
|
174
|
+
|
|
175
|
+
logger.info(
|
|
176
|
+
f"Coverage analysis complete: {total_coverage:.1f}% "
|
|
177
|
+
f"({len(uncovered_files)} files below 80%)"
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
return CoverageReport(
|
|
181
|
+
total_coverage=total_coverage,
|
|
182
|
+
files_analyzed=len(files),
|
|
183
|
+
uncovered_files=uncovered_files,
|
|
184
|
+
missing_lines=missing_lines,
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
except Exception as e:
|
|
188
|
+
logger.error(f"Coverage analysis failed: {e}")
|
|
189
|
+
raise RuntimeError(f"Coverage analysis failed: {e}") from e
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
class RealTestGenerator:
|
|
193
|
+
"""Generates actual test code using LLM."""
|
|
194
|
+
|
|
195
|
+
def __init__(
|
|
196
|
+
self,
|
|
197
|
+
project_root: str = ".",
|
|
198
|
+
output_dir: str = "tests/generated",
|
|
199
|
+
api_key: str | None = None,
|
|
200
|
+
use_llm: bool = True,
|
|
201
|
+
):
|
|
202
|
+
"""Initialize test generator.
|
|
203
|
+
|
|
204
|
+
Args:
|
|
205
|
+
project_root: Project root directory
|
|
206
|
+
output_dir: Directory for generated tests (relative to project_root)
|
|
207
|
+
api_key: Anthropic API key (or uses env var)
|
|
208
|
+
use_llm: Whether to use LLM for intelligent test generation
|
|
209
|
+
"""
|
|
210
|
+
self.project_root = Path(project_root).resolve()
|
|
211
|
+
self.output_dir = self.project_root / output_dir
|
|
212
|
+
self.output_dir.mkdir(parents=True, exist_ok=True)
|
|
213
|
+
self.api_key = api_key
|
|
214
|
+
self.use_llm = use_llm
|
|
215
|
+
|
|
216
|
+
# Initialize LLM client if needed
|
|
217
|
+
self._llm = None
|
|
218
|
+
if use_llm:
|
|
219
|
+
self._initialize_llm()
|
|
220
|
+
|
|
221
|
+
def _initialize_llm(self):
|
|
222
|
+
"""Initialize Anthropic LLM client."""
|
|
223
|
+
try:
|
|
224
|
+
import os
|
|
225
|
+
|
|
226
|
+
from anthropic import Anthropic
|
|
227
|
+
|
|
228
|
+
# Try to load .env file
|
|
229
|
+
try:
|
|
230
|
+
from dotenv import load_dotenv
|
|
231
|
+
|
|
232
|
+
load_dotenv()
|
|
233
|
+
except ImportError:
|
|
234
|
+
pass # python-dotenv not required
|
|
235
|
+
|
|
236
|
+
api_key = self.api_key or os.environ.get("ANTHROPIC_API_KEY")
|
|
237
|
+
if not api_key:
|
|
238
|
+
logger.warning(
|
|
239
|
+
"No Anthropic API key found. Set ANTHROPIC_API_KEY environment variable "
|
|
240
|
+
"or create .env file with ANTHROPIC_API_KEY=your_key_here. "
|
|
241
|
+
"Falling back to basic templates."
|
|
242
|
+
)
|
|
243
|
+
self.use_llm = False
|
|
244
|
+
return
|
|
245
|
+
|
|
246
|
+
self._llm = Anthropic(api_key=api_key)
|
|
247
|
+
logger.info("✓ LLM client initialized successfully with Claude")
|
|
248
|
+
|
|
249
|
+
except ImportError as e:
|
|
250
|
+
logger.warning(f"Required package not installed: {e}. Falling back to templates")
|
|
251
|
+
self.use_llm = False
|
|
252
|
+
except Exception as e:
|
|
253
|
+
logger.warning(f"Failed to initialize LLM: {e}. Falling back to templates")
|
|
254
|
+
self.use_llm = False
|
|
255
|
+
|
|
256
|
+
def generate_tests_for_file(self, source_file: str, missing_lines: list[int]) -> Path:
|
|
257
|
+
"""Generate tests for uncovered code in a file.
|
|
258
|
+
|
|
259
|
+
Args:
|
|
260
|
+
source_file: Path to source file
|
|
261
|
+
missing_lines: Line numbers without coverage
|
|
262
|
+
|
|
263
|
+
Returns:
|
|
264
|
+
Path to generated test file
|
|
265
|
+
|
|
266
|
+
Raises:
|
|
267
|
+
RuntimeError: If test generation fails
|
|
268
|
+
"""
|
|
269
|
+
logger.info(f"Generating tests for {source_file} (lines: {missing_lines[:5]}...)")
|
|
270
|
+
|
|
271
|
+
# Read source file
|
|
272
|
+
source_path = Path(source_file)
|
|
273
|
+
if not source_path.exists():
|
|
274
|
+
source_path = self.project_root / source_file
|
|
275
|
+
|
|
276
|
+
# Resolve to absolute path for relative_to() to work correctly
|
|
277
|
+
source_path = source_path.resolve()
|
|
278
|
+
|
|
279
|
+
try:
|
|
280
|
+
source_code = source_path.read_text()
|
|
281
|
+
except Exception as e:
|
|
282
|
+
raise RuntimeError(f"Cannot read source file: {e}") from e
|
|
283
|
+
|
|
284
|
+
# Create unique test name from full path to avoid collisions
|
|
285
|
+
# Example: src/attune/telemetry/cli.py → test_src_empathy_os_telemetry_cli_generated.py
|
|
286
|
+
relative_path = str(source_path.relative_to(self.project_root))
|
|
287
|
+
test_name = f"test_{relative_path.replace('/', '_').replace('.py', '')}_generated.py"
|
|
288
|
+
test_path = self.output_dir / test_name
|
|
289
|
+
|
|
290
|
+
# Generate tests using LLM or template
|
|
291
|
+
if self.use_llm and self._llm:
|
|
292
|
+
test_code = self._generate_llm_tests(source_file, source_code, missing_lines)
|
|
293
|
+
else:
|
|
294
|
+
test_code = self._generate_basic_test_template(source_file, source_code, missing_lines)
|
|
295
|
+
|
|
296
|
+
# Write test file
|
|
297
|
+
validated_path = _validate_file_path(str(test_path))
|
|
298
|
+
validated_path.write_text(test_code)
|
|
299
|
+
|
|
300
|
+
logger.info(f"Generated test file: {test_path}")
|
|
301
|
+
return test_path
|
|
302
|
+
|
|
303
|
+
def _generate_llm_tests(
|
|
304
|
+
self, source_file: str, source_code: str, missing_lines: list[int]
|
|
305
|
+
) -> str:
|
|
306
|
+
"""Generate tests using LLM (Claude).
|
|
307
|
+
|
|
308
|
+
Args:
|
|
309
|
+
source_file: Source file path
|
|
310
|
+
source_code: Source file content
|
|
311
|
+
missing_lines: Uncovered line numbers
|
|
312
|
+
|
|
313
|
+
Returns:
|
|
314
|
+
Generated test code
|
|
315
|
+
|
|
316
|
+
Raises:
|
|
317
|
+
RuntimeError: If LLM generation fails
|
|
318
|
+
"""
|
|
319
|
+
logger.info(f"Using LLM to generate intelligent tests for {source_file}")
|
|
320
|
+
|
|
321
|
+
# Extract API signatures using AST
|
|
322
|
+
api_docs = self._extract_api_docs(source_code)
|
|
323
|
+
|
|
324
|
+
# Extract module path
|
|
325
|
+
module_path = source_file.replace("/", ".").replace(".py", "")
|
|
326
|
+
|
|
327
|
+
# Create prompt for Claude with full context
|
|
328
|
+
prompt = f"""Generate comprehensive pytest tests for the following Python code.
|
|
329
|
+
|
|
330
|
+
**Source File:** `{source_file}`
|
|
331
|
+
**Module Path:** `{module_path}`
|
|
332
|
+
**Uncovered Lines:** {missing_lines[:20]}
|
|
333
|
+
|
|
334
|
+
{api_docs}
|
|
335
|
+
|
|
336
|
+
**Full Source Code:**
|
|
337
|
+
```python
|
|
338
|
+
{source_code}
|
|
339
|
+
```
|
|
340
|
+
|
|
341
|
+
**CRITICAL Requirements - API Accuracy:**
|
|
342
|
+
1. **READ THE SOURCE CODE CAREFULLY** - Extract exact API signatures from:
|
|
343
|
+
- Dataclass definitions (@dataclass) - use EXACT parameter names
|
|
344
|
+
- Function signatures - match parameter names and types
|
|
345
|
+
- Class __init__ methods - use correct constructor arguments
|
|
346
|
+
|
|
347
|
+
2. **DO NOT GUESS** parameter names - if you see:
|
|
348
|
+
```python
|
|
349
|
+
@dataclass
|
|
350
|
+
class Foo:
|
|
351
|
+
bar: str # Parameter name is 'bar', NOT 'bar_name'
|
|
352
|
+
```
|
|
353
|
+
Then use: `Foo(bar="value")` NOT `Foo(bar_name="value")`
|
|
354
|
+
|
|
355
|
+
3. **Computed Properties** - Do NOT pass @property values to constructors:
|
|
356
|
+
- If source has `@property def total(self): return self.a + self.b`
|
|
357
|
+
- Then DO NOT use `Foo(total=10)` - it's computed from `a` and `b`
|
|
358
|
+
|
|
359
|
+
**Test Requirements:**
|
|
360
|
+
1. Write complete, runnable pytest tests
|
|
361
|
+
2. Focus on covering uncovered lines: {missing_lines[:10]}
|
|
362
|
+
3. Include:
|
|
363
|
+
- Test class with descriptive name
|
|
364
|
+
- Test methods for key functions/classes
|
|
365
|
+
- Proper imports from the actual module path
|
|
366
|
+
- Mock external dependencies (database, API calls, etc.)
|
|
367
|
+
- Edge cases (empty inputs, None, zero, negative numbers)
|
|
368
|
+
- Error handling tests (invalid input, exceptions)
|
|
369
|
+
4. Follow pytest best practices
|
|
370
|
+
5. Use clear, descriptive test method names
|
|
371
|
+
6. Add docstrings explaining what each test validates
|
|
372
|
+
|
|
373
|
+
**Output Format:**
|
|
374
|
+
Return ONLY the Python test code, starting with imports. No markdown, no explanations.
|
|
375
|
+
"""
|
|
376
|
+
|
|
377
|
+
try:
|
|
378
|
+
# Try Sonnet models only (Capable tier) - do NOT downgrade
|
|
379
|
+
models_to_try = [
|
|
380
|
+
"claude-sonnet-4-5-20250929", # Sonnet 4.5 (January 2025 - latest)
|
|
381
|
+
"claude-3-5-sonnet-20241022", # 3.5 Sonnet Oct 2024
|
|
382
|
+
"claude-3-5-sonnet-20240620", # 3.5 Sonnet Jun 2024
|
|
383
|
+
]
|
|
384
|
+
|
|
385
|
+
response = None
|
|
386
|
+
last_error = None
|
|
387
|
+
|
|
388
|
+
for model_name in models_to_try:
|
|
389
|
+
try:
|
|
390
|
+
response = self._llm.messages.create(
|
|
391
|
+
model=model_name,
|
|
392
|
+
max_tokens=12000, # Increased to prevent truncation on large files
|
|
393
|
+
temperature=0.3, # Lower temperature for consistent code
|
|
394
|
+
messages=[{"role": "user", "content": prompt}],
|
|
395
|
+
)
|
|
396
|
+
logger.info(f"✓ Using Sonnet model: {model_name}")
|
|
397
|
+
break
|
|
398
|
+
except Exception as e:
|
|
399
|
+
last_error = e
|
|
400
|
+
logger.debug(f"Model {model_name} not available: {e}")
|
|
401
|
+
continue
|
|
402
|
+
|
|
403
|
+
if response is None:
|
|
404
|
+
error_msg = f"All Sonnet models unavailable. Last error: {last_error}"
|
|
405
|
+
logger.error(error_msg)
|
|
406
|
+
raise RuntimeError(error_msg)
|
|
407
|
+
|
|
408
|
+
test_code = response.content[0].text
|
|
409
|
+
|
|
410
|
+
# Clean up markdown if present
|
|
411
|
+
if "```python" in test_code:
|
|
412
|
+
test_code = test_code.split("```python")[1].split("```")[0].strip()
|
|
413
|
+
elif "```" in test_code:
|
|
414
|
+
test_code = test_code.split("```")[1].split("```")[0].strip()
|
|
415
|
+
|
|
416
|
+
logger.info(f"✓ LLM generated {len(test_code)} chars of test code")
|
|
417
|
+
return test_code
|
|
418
|
+
|
|
419
|
+
except Exception as e:
|
|
420
|
+
logger.error(f"LLM test generation failed: {e}, falling back to template")
|
|
421
|
+
return self._generate_basic_test_template(source_file, source_code, missing_lines)
|
|
422
|
+
|
|
423
|
+
def _extract_api_docs(self, source_code: str) -> str:
|
|
424
|
+
"""Extract API signatures from source code using AST.
|
|
425
|
+
|
|
426
|
+
Args:
|
|
427
|
+
source_code: Python source code
|
|
428
|
+
|
|
429
|
+
Returns:
|
|
430
|
+
Formatted API documentation for LLM prompt
|
|
431
|
+
"""
|
|
432
|
+
try:
|
|
433
|
+
import sys
|
|
434
|
+
from pathlib import Path
|
|
435
|
+
|
|
436
|
+
# Add scripts to path
|
|
437
|
+
scripts_dir = Path(__file__).parent.parent.parent.parent / "scripts"
|
|
438
|
+
if str(scripts_dir) not in sys.path:
|
|
439
|
+
sys.path.insert(0, str(scripts_dir))
|
|
440
|
+
|
|
441
|
+
from ast_api_extractor import extract_api_signatures, format_api_docs
|
|
442
|
+
|
|
443
|
+
classes, functions = extract_api_signatures(source_code)
|
|
444
|
+
return format_api_docs(classes, functions)
|
|
445
|
+
except Exception as e:
|
|
446
|
+
logger.warning(f"AST extraction failed: {e}, proceeding without API docs")
|
|
447
|
+
return "# API extraction failed - use source code carefully"
|
|
448
|
+
|
|
449
|
+
def _generate_basic_test_template(
|
|
450
|
+
self, source_file: str, source_code: str, missing_lines: list[int]
|
|
451
|
+
) -> str:
|
|
452
|
+
"""Generate basic test template.
|
|
453
|
+
|
|
454
|
+
Args:
|
|
455
|
+
source_file: Source file path
|
|
456
|
+
source_code: Source file content
|
|
457
|
+
missing_lines: Uncovered line numbers
|
|
458
|
+
|
|
459
|
+
Returns:
|
|
460
|
+
Test code as string
|
|
461
|
+
"""
|
|
462
|
+
# Extract module name
|
|
463
|
+
module_path = source_file.replace("/", ".").replace(".py", "")
|
|
464
|
+
|
|
465
|
+
template = f'''"""Auto-generated tests for {source_file}.
|
|
466
|
+
|
|
467
|
+
Coverage gaps on lines: {missing_lines[:10]}
|
|
468
|
+
"""
|
|
469
|
+
|
|
470
|
+
import pytest
|
|
471
|
+
|
|
472
|
+
|
|
473
|
+
class TestGeneratedCoverage:
|
|
474
|
+
"""Tests to improve coverage for {source_file}."""
|
|
475
|
+
|
|
476
|
+
def test_module_imports(self):
|
|
477
|
+
"""Test that module can be imported."""
|
|
478
|
+
try:
|
|
479
|
+
import {module_path}
|
|
480
|
+
assert True
|
|
481
|
+
except ImportError as e:
|
|
482
|
+
pytest.fail(f"Module import failed: {{e}}")
|
|
483
|
+
|
|
484
|
+
def test_placeholder_for_lines_{missing_lines[0] if missing_lines else 0}(self):
|
|
485
|
+
"""Placeholder test for uncovered code.
|
|
486
|
+
|
|
487
|
+
TODO: Implement actual test logic for lines {missing_lines[:5]}
|
|
488
|
+
"""
|
|
489
|
+
# This is a placeholder - connect to LLM for real test generation
|
|
490
|
+
assert True, "Placeholder test - needs implementation"
|
|
491
|
+
'''
|
|
492
|
+
return template
|
|
493
|
+
|
|
494
|
+
|
|
495
|
+
class RealTestValidator:
|
|
496
|
+
"""Validates generated tests by running them."""
|
|
497
|
+
|
|
498
|
+
def __init__(self, project_root: str = "."):
|
|
499
|
+
"""Initialize test validator.
|
|
500
|
+
|
|
501
|
+
Args:
|
|
502
|
+
project_root: Project root directory
|
|
503
|
+
"""
|
|
504
|
+
self.project_root = Path(project_root).resolve()
|
|
505
|
+
|
|
506
|
+
def validate_tests(self, test_files: list[Path]) -> dict[str, Any]:
|
|
507
|
+
"""Run tests and measure coverage improvement.
|
|
508
|
+
|
|
509
|
+
Args:
|
|
510
|
+
test_files: List of test file paths
|
|
511
|
+
|
|
512
|
+
Returns:
|
|
513
|
+
Validation results dict
|
|
514
|
+
|
|
515
|
+
Raises:
|
|
516
|
+
RuntimeError: If validation fails
|
|
517
|
+
"""
|
|
518
|
+
logger.info(f"Validating {len(test_files)} generated test files")
|
|
519
|
+
|
|
520
|
+
try:
|
|
521
|
+
# Run tests
|
|
522
|
+
test_paths = [str(t) for t in test_files]
|
|
523
|
+
cmd = ["pytest"] + test_paths + ["-v", "--tb=short"]
|
|
524
|
+
|
|
525
|
+
result = subprocess.run(
|
|
526
|
+
cmd,
|
|
527
|
+
cwd=self.project_root,
|
|
528
|
+
capture_output=True,
|
|
529
|
+
text=True,
|
|
530
|
+
timeout=300,
|
|
531
|
+
)
|
|
532
|
+
|
|
533
|
+
tests_passed = result.returncode == 0
|
|
534
|
+
output_lines = result.stdout.split("\n")
|
|
535
|
+
|
|
536
|
+
# Count passed/failed
|
|
537
|
+
passed = sum(1 for line in output_lines if " PASSED" in line)
|
|
538
|
+
failed = sum(1 for line in output_lines if " FAILED" in line)
|
|
539
|
+
|
|
540
|
+
logger.info(
|
|
541
|
+
f"Validation complete: {passed} passed, {failed} failed, "
|
|
542
|
+
f"tests_passed={tests_passed}"
|
|
543
|
+
)
|
|
544
|
+
|
|
545
|
+
return {
|
|
546
|
+
"all_passed": tests_passed,
|
|
547
|
+
"passed_count": passed,
|
|
548
|
+
"failed_count": failed,
|
|
549
|
+
"output": result.stdout[:1000], # Limit output
|
|
550
|
+
}
|
|
551
|
+
|
|
552
|
+
except subprocess.TimeoutExpired:
|
|
553
|
+
raise RuntimeError("Test validation timed out after 5 minutes")
|
|
554
|
+
except Exception as e:
|
|
555
|
+
logger.error(f"Test validation failed: {e}")
|
|
556
|
+
raise RuntimeError(f"Test validation failed: {e}") from e
|
|
557
|
+
|
|
558
|
+
|
|
559
|
+
@dataclass
|
|
560
|
+
class SecurityReport:
|
|
561
|
+
"""Security audit report from bandit."""
|
|
562
|
+
|
|
563
|
+
total_issues: int
|
|
564
|
+
critical_count: int
|
|
565
|
+
high_count: int
|
|
566
|
+
medium_count: int
|
|
567
|
+
low_count: int
|
|
568
|
+
issues_by_file: dict[str, list[dict[str, Any]]]
|
|
569
|
+
passed: bool
|
|
570
|
+
|
|
571
|
+
|
|
572
|
+
class RealSecurityAuditor:
|
|
573
|
+
"""Runs real security audit using bandit."""
|
|
574
|
+
|
|
575
|
+
def __init__(self, project_root: str = "."):
|
|
576
|
+
"""Initialize security auditor.
|
|
577
|
+
|
|
578
|
+
Args:
|
|
579
|
+
project_root: Project root directory
|
|
580
|
+
"""
|
|
581
|
+
self.project_root = Path(project_root).resolve()
|
|
582
|
+
|
|
583
|
+
def audit(self, target_path: str = "src") -> SecurityReport:
|
|
584
|
+
"""Run security audit on codebase.
|
|
585
|
+
|
|
586
|
+
Args:
|
|
587
|
+
target_path: Path to audit (default: src)
|
|
588
|
+
|
|
589
|
+
Returns:
|
|
590
|
+
SecurityReport with vulnerability findings
|
|
591
|
+
|
|
592
|
+
Raises:
|
|
593
|
+
RuntimeError: If security audit fails
|
|
594
|
+
"""
|
|
595
|
+
logger.info(f"Running security audit on {target_path}")
|
|
596
|
+
|
|
597
|
+
try:
|
|
598
|
+
# Run bandit with JSON output
|
|
599
|
+
cmd = [
|
|
600
|
+
"bandit",
|
|
601
|
+
"-r",
|
|
602
|
+
target_path,
|
|
603
|
+
"-f",
|
|
604
|
+
"json",
|
|
605
|
+
"-q", # Quiet mode - suppress progress bar and log messages
|
|
606
|
+
"-ll", # Only report medium and above
|
|
607
|
+
]
|
|
608
|
+
|
|
609
|
+
result = subprocess.run(
|
|
610
|
+
cmd,
|
|
611
|
+
cwd=self.project_root,
|
|
612
|
+
capture_output=True,
|
|
613
|
+
text=True,
|
|
614
|
+
timeout=300,
|
|
615
|
+
)
|
|
616
|
+
|
|
617
|
+
# Parse JSON output
|
|
618
|
+
try:
|
|
619
|
+
bandit_data = json.loads(result.stdout)
|
|
620
|
+
except json.JSONDecodeError as e:
|
|
621
|
+
# Bandit might not be installed or JSON output malformed
|
|
622
|
+
logger.warning(f"Bandit not available or returned invalid JSON: {e}")
|
|
623
|
+
stdout = result.stdout if isinstance(result.stdout, str) else ""
|
|
624
|
+
stderr = result.stderr if isinstance(result.stderr, str) else ""
|
|
625
|
+
logger.debug(f"Bandit stdout: {stdout[:500]}")
|
|
626
|
+
logger.debug(f"Bandit stderr: {stderr[:500]}")
|
|
627
|
+
return SecurityReport(
|
|
628
|
+
total_issues=0,
|
|
629
|
+
critical_count=0,
|
|
630
|
+
high_count=0,
|
|
631
|
+
medium_count=0,
|
|
632
|
+
low_count=0,
|
|
633
|
+
issues_by_file={},
|
|
634
|
+
passed=True,
|
|
635
|
+
)
|
|
636
|
+
|
|
637
|
+
# Count issues by severity
|
|
638
|
+
results = bandit_data.get("results", [])
|
|
639
|
+
critical_count = sum(1 for r in results if r.get("issue_severity") == "CRITICAL")
|
|
640
|
+
high_count = sum(1 for r in results if r.get("issue_severity") == "HIGH")
|
|
641
|
+
medium_count = sum(1 for r in results if r.get("issue_severity") == "MEDIUM")
|
|
642
|
+
low_count = sum(1 for r in results if r.get("issue_severity") == "LOW")
|
|
643
|
+
|
|
644
|
+
# Group by file
|
|
645
|
+
issues_by_file = {}
|
|
646
|
+
for issue in results:
|
|
647
|
+
filepath = issue.get("filename", "unknown")
|
|
648
|
+
if filepath not in issues_by_file:
|
|
649
|
+
issues_by_file[filepath] = []
|
|
650
|
+
issues_by_file[filepath].append(
|
|
651
|
+
{
|
|
652
|
+
"line": issue.get("line_number"),
|
|
653
|
+
"severity": issue.get("issue_severity"),
|
|
654
|
+
"confidence": issue.get("issue_confidence"),
|
|
655
|
+
"message": issue.get("issue_text"),
|
|
656
|
+
"test_id": issue.get("test_id"),
|
|
657
|
+
}
|
|
658
|
+
)
|
|
659
|
+
|
|
660
|
+
total_issues = len(results)
|
|
661
|
+
passed = critical_count == 0 and high_count == 0
|
|
662
|
+
|
|
663
|
+
logger.info(
|
|
664
|
+
f"Security audit complete: {total_issues} issues "
|
|
665
|
+
f"(critical={critical_count}, high={high_count}, medium={medium_count})"
|
|
666
|
+
)
|
|
667
|
+
|
|
668
|
+
return SecurityReport(
|
|
669
|
+
total_issues=total_issues,
|
|
670
|
+
critical_count=critical_count,
|
|
671
|
+
high_count=high_count,
|
|
672
|
+
medium_count=medium_count,
|
|
673
|
+
low_count=low_count,
|
|
674
|
+
issues_by_file=issues_by_file,
|
|
675
|
+
passed=passed,
|
|
676
|
+
)
|
|
677
|
+
|
|
678
|
+
except subprocess.TimeoutExpired:
|
|
679
|
+
raise RuntimeError("Security audit timed out after 5 minutes")
|
|
680
|
+
except Exception as e:
|
|
681
|
+
logger.error(f"Security audit failed: {e}")
|
|
682
|
+
raise RuntimeError(f"Security audit failed: {e}") from e
|
|
683
|
+
|
|
684
|
+
|
|
685
|
+
@dataclass
|
|
686
|
+
class QualityReport:
|
|
687
|
+
"""Code quality report from ruff and mypy."""
|
|
688
|
+
|
|
689
|
+
quality_score: float # 0-10
|
|
690
|
+
ruff_issues: int
|
|
691
|
+
mypy_issues: int
|
|
692
|
+
total_files: int
|
|
693
|
+
issues_by_category: dict[str, int]
|
|
694
|
+
passed: bool
|
|
695
|
+
|
|
696
|
+
|
|
697
|
+
class RealCodeQualityAnalyzer:
|
|
698
|
+
"""Runs real code quality analysis using ruff and mypy."""
|
|
699
|
+
|
|
700
|
+
def __init__(self, project_root: str = "."):
|
|
701
|
+
"""Initialize code quality analyzer.
|
|
702
|
+
|
|
703
|
+
Args:
|
|
704
|
+
project_root: Project root directory
|
|
705
|
+
"""
|
|
706
|
+
self.project_root = Path(project_root).resolve()
|
|
707
|
+
|
|
708
|
+
def analyze(self, target_path: str = "src") -> QualityReport:
|
|
709
|
+
"""Run code quality analysis.
|
|
710
|
+
|
|
711
|
+
Args:
|
|
712
|
+
target_path: Path to analyze (default: src)
|
|
713
|
+
|
|
714
|
+
Returns:
|
|
715
|
+
QualityReport with quality metrics
|
|
716
|
+
|
|
717
|
+
Raises:
|
|
718
|
+
RuntimeError: If quality analysis fails
|
|
719
|
+
"""
|
|
720
|
+
logger.info(f"Running code quality analysis on {target_path}")
|
|
721
|
+
|
|
722
|
+
try:
|
|
723
|
+
# Run ruff for linting
|
|
724
|
+
ruff_issues = self._run_ruff(target_path)
|
|
725
|
+
|
|
726
|
+
# Run mypy for type checking (optional - may not be installed)
|
|
727
|
+
mypy_issues = self._run_mypy(target_path)
|
|
728
|
+
|
|
729
|
+
# Count files
|
|
730
|
+
target = self.project_root / target_path
|
|
731
|
+
py_files = list(target.rglob("*.py")) if target.is_dir() else [target]
|
|
732
|
+
total_files = len(py_files)
|
|
733
|
+
|
|
734
|
+
# Calculate quality score (0-10 scale)
|
|
735
|
+
# Start with 10, deduct points for issues
|
|
736
|
+
quality_score = 10.0
|
|
737
|
+
quality_score -= min(ruff_issues * 0.01, 3.0) # Max -3 points for ruff
|
|
738
|
+
quality_score -= min(mypy_issues * 0.02, 2.0) # Max -2 points for mypy
|
|
739
|
+
quality_score = max(0.0, quality_score) # Floor at 0
|
|
740
|
+
|
|
741
|
+
# Passed if score >= 7.0
|
|
742
|
+
passed = quality_score >= 7.0
|
|
743
|
+
|
|
744
|
+
logger.info(
|
|
745
|
+
f"Quality analysis complete: score={quality_score:.1f}/10 "
|
|
746
|
+
f"(ruff={ruff_issues}, mypy={mypy_issues})"
|
|
747
|
+
)
|
|
748
|
+
|
|
749
|
+
return QualityReport(
|
|
750
|
+
quality_score=quality_score,
|
|
751
|
+
ruff_issues=ruff_issues,
|
|
752
|
+
mypy_issues=mypy_issues,
|
|
753
|
+
total_files=total_files,
|
|
754
|
+
issues_by_category={"ruff": ruff_issues, "mypy": mypy_issues},
|
|
755
|
+
passed=passed,
|
|
756
|
+
)
|
|
757
|
+
|
|
758
|
+
except Exception as e:
|
|
759
|
+
logger.error(f"Quality analysis failed: {e}")
|
|
760
|
+
raise RuntimeError(f"Quality analysis failed: {e}") from e
|
|
761
|
+
|
|
762
|
+
def _run_ruff(self, target_path: str) -> int:
|
|
763
|
+
"""Run ruff linter and count issues."""
|
|
764
|
+
try:
|
|
765
|
+
cmd = ["ruff", "check", target_path, "--output-format=json"]
|
|
766
|
+
|
|
767
|
+
result = subprocess.run(
|
|
768
|
+
cmd,
|
|
769
|
+
cwd=self.project_root,
|
|
770
|
+
capture_output=True,
|
|
771
|
+
text=True,
|
|
772
|
+
timeout=120,
|
|
773
|
+
)
|
|
774
|
+
|
|
775
|
+
# Parse JSON output
|
|
776
|
+
try:
|
|
777
|
+
ruff_data = json.loads(result.stdout) if result.stdout else []
|
|
778
|
+
return len(ruff_data)
|
|
779
|
+
except json.JSONDecodeError:
|
|
780
|
+
logger.warning("Ruff returned invalid JSON")
|
|
781
|
+
return 0
|
|
782
|
+
|
|
783
|
+
except FileNotFoundError:
|
|
784
|
+
logger.warning("Ruff not installed, skipping")
|
|
785
|
+
return 0
|
|
786
|
+
except Exception as e:
|
|
787
|
+
logger.warning(f"Ruff check failed: {e}")
|
|
788
|
+
return 0
|
|
789
|
+
|
|
790
|
+
def _run_mypy(self, target_path: str) -> int:
|
|
791
|
+
"""Run mypy type checker and count issues."""
|
|
792
|
+
try:
|
|
793
|
+
cmd = ["mypy", target_path, "--no-error-summary"]
|
|
794
|
+
|
|
795
|
+
result = subprocess.run(
|
|
796
|
+
cmd,
|
|
797
|
+
cwd=self.project_root,
|
|
798
|
+
capture_output=True,
|
|
799
|
+
text=True,
|
|
800
|
+
timeout=120,
|
|
801
|
+
)
|
|
802
|
+
|
|
803
|
+
# Count error lines
|
|
804
|
+
error_count = sum(1 for line in result.stdout.split("\n") if ": error:" in line)
|
|
805
|
+
return error_count
|
|
806
|
+
|
|
807
|
+
except FileNotFoundError:
|
|
808
|
+
logger.warning("Mypy not installed, skipping")
|
|
809
|
+
return 0
|
|
810
|
+
except Exception as e:
|
|
811
|
+
logger.warning(f"Mypy check failed: {e}")
|
|
812
|
+
return 0
|
|
813
|
+
|
|
814
|
+
|
|
815
|
+
@dataclass
|
|
816
|
+
class DocumentationReport:
|
|
817
|
+
"""Documentation completeness report."""
|
|
818
|
+
|
|
819
|
+
completeness_percentage: float
|
|
820
|
+
total_functions: int
|
|
821
|
+
documented_functions: int
|
|
822
|
+
total_classes: int
|
|
823
|
+
documented_classes: int
|
|
824
|
+
missing_docstrings: list[str]
|
|
825
|
+
passed: bool
|
|
826
|
+
|
|
827
|
+
|
|
828
|
+
class RealDocumentationAnalyzer:
|
|
829
|
+
"""Analyzes documentation completeness by scanning docstrings."""
|
|
830
|
+
|
|
831
|
+
def __init__(self, project_root: str = "."):
|
|
832
|
+
"""Initialize documentation analyzer.
|
|
833
|
+
|
|
834
|
+
Args:
|
|
835
|
+
project_root: Project root directory
|
|
836
|
+
"""
|
|
837
|
+
self.project_root = Path(project_root).resolve()
|
|
838
|
+
|
|
839
|
+
def analyze(self, target_path: str = "src") -> DocumentationReport:
|
|
840
|
+
"""Analyze documentation completeness.
|
|
841
|
+
|
|
842
|
+
Args:
|
|
843
|
+
target_path: Path to analyze (default: src)
|
|
844
|
+
|
|
845
|
+
Returns:
|
|
846
|
+
DocumentationReport with completeness metrics
|
|
847
|
+
|
|
848
|
+
Raises:
|
|
849
|
+
RuntimeError: If analysis fails
|
|
850
|
+
"""
|
|
851
|
+
logger.info(f"Analyzing documentation completeness in {target_path}")
|
|
852
|
+
|
|
853
|
+
import ast
|
|
854
|
+
|
|
855
|
+
target = self.project_root / target_path
|
|
856
|
+
py_files = list(target.rglob("*.py")) if target.is_dir() else [target]
|
|
857
|
+
|
|
858
|
+
total_functions = 0
|
|
859
|
+
documented_functions = 0
|
|
860
|
+
total_classes = 0
|
|
861
|
+
documented_classes = 0
|
|
862
|
+
missing_docstrings = []
|
|
863
|
+
|
|
864
|
+
for py_file in py_files:
|
|
865
|
+
if py_file.name.startswith("__") and py_file.name.endswith("__.py"):
|
|
866
|
+
continue # Skip __init__.py, __main__.py
|
|
867
|
+
|
|
868
|
+
try:
|
|
869
|
+
tree = ast.parse(py_file.read_text())
|
|
870
|
+
|
|
871
|
+
for node in ast.walk(tree):
|
|
872
|
+
if isinstance(node, ast.FunctionDef):
|
|
873
|
+
if not node.name.startswith("_"): # Public functions
|
|
874
|
+
total_functions += 1
|
|
875
|
+
if ast.get_docstring(node):
|
|
876
|
+
documented_functions += 1
|
|
877
|
+
else:
|
|
878
|
+
missing_docstrings.append(
|
|
879
|
+
f"{py_file.relative_to(self.project_root)}:{node.lineno} - function {node.name}"
|
|
880
|
+
)
|
|
881
|
+
|
|
882
|
+
elif isinstance(node, ast.ClassDef):
|
|
883
|
+
if not node.name.startswith("_"): # Public classes
|
|
884
|
+
total_classes += 1
|
|
885
|
+
if ast.get_docstring(node):
|
|
886
|
+
documented_classes += 1
|
|
887
|
+
else:
|
|
888
|
+
missing_docstrings.append(
|
|
889
|
+
f"{py_file.relative_to(self.project_root)}:{node.lineno} - class {node.name}"
|
|
890
|
+
)
|
|
891
|
+
|
|
892
|
+
except Exception as e:
|
|
893
|
+
logger.warning(f"Failed to parse {py_file}: {e}")
|
|
894
|
+
continue
|
|
895
|
+
|
|
896
|
+
# Calculate completeness
|
|
897
|
+
total_items = total_functions + total_classes
|
|
898
|
+
documented_items = documented_functions + documented_classes
|
|
899
|
+
|
|
900
|
+
if total_items > 0:
|
|
901
|
+
completeness_percentage = (documented_items / total_items) * 100
|
|
902
|
+
else:
|
|
903
|
+
completeness_percentage = 100.0 # No public APIs, consider complete
|
|
904
|
+
|
|
905
|
+
passed = completeness_percentage >= 80.0
|
|
906
|
+
|
|
907
|
+
logger.info(
|
|
908
|
+
f"Documentation analysis complete: {completeness_percentage:.1f}% "
|
|
909
|
+
f"({documented_items}/{total_items} items documented)"
|
|
910
|
+
)
|
|
911
|
+
|
|
912
|
+
return DocumentationReport(
|
|
913
|
+
completeness_percentage=completeness_percentage,
|
|
914
|
+
total_functions=total_functions,
|
|
915
|
+
documented_functions=documented_functions,
|
|
916
|
+
total_classes=total_classes,
|
|
917
|
+
documented_classes=documented_classes,
|
|
918
|
+
missing_docstrings=missing_docstrings[:10], # Limit to first 10
|
|
919
|
+
passed=passed,
|
|
920
|
+
)
|
|
921
|
+
|
|
922
|
+
|
|
923
|
+
# Tool registry for agents
|
|
924
|
+
REAL_TOOLS = {
|
|
925
|
+
"coverage_analyzer": RealCoverageAnalyzer,
|
|
926
|
+
"test_generator": RealTestGenerator,
|
|
927
|
+
"test_validator": RealTestValidator,
|
|
928
|
+
"security_auditor": RealSecurityAuditor,
|
|
929
|
+
"code_quality_analyzer": RealCodeQualityAnalyzer,
|
|
930
|
+
"documentation_analyzer": RealDocumentationAnalyzer,
|
|
931
|
+
}
|