empathy-framework 2.4.0__py3-none-any.whl → 3.8.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (329) hide show
  1. coach_wizards/__init__.py +13 -12
  2. coach_wizards/accessibility_wizard.py +12 -12
  3. coach_wizards/api_wizard.py +12 -12
  4. coach_wizards/base_wizard.py +26 -20
  5. coach_wizards/cicd_wizard.py +15 -13
  6. coach_wizards/code_reviewer_README.md +60 -0
  7. coach_wizards/code_reviewer_wizard.py +180 -0
  8. coach_wizards/compliance_wizard.py +12 -12
  9. coach_wizards/database_wizard.py +12 -12
  10. coach_wizards/debugging_wizard.py +12 -12
  11. coach_wizards/documentation_wizard.py +12 -12
  12. coach_wizards/generate_wizards.py +1 -2
  13. coach_wizards/localization_wizard.py +101 -19
  14. coach_wizards/migration_wizard.py +12 -12
  15. coach_wizards/monitoring_wizard.py +12 -12
  16. coach_wizards/observability_wizard.py +12 -12
  17. coach_wizards/performance_wizard.py +12 -12
  18. coach_wizards/prompt_engineering_wizard.py +661 -0
  19. coach_wizards/refactoring_wizard.py +12 -12
  20. coach_wizards/scaling_wizard.py +12 -12
  21. coach_wizards/security_wizard.py +12 -12
  22. coach_wizards/testing_wizard.py +12 -12
  23. empathy_framework-3.8.2.dist-info/METADATA +1176 -0
  24. empathy_framework-3.8.2.dist-info/RECORD +333 -0
  25. empathy_framework-3.8.2.dist-info/entry_points.txt +22 -0
  26. {empathy_framework-2.4.0.dist-info → empathy_framework-3.8.2.dist-info}/top_level.txt +5 -1
  27. empathy_healthcare_plugin/__init__.py +1 -2
  28. empathy_healthcare_plugin/monitors/__init__.py +9 -0
  29. empathy_healthcare_plugin/monitors/clinical_protocol_monitor.py +315 -0
  30. empathy_healthcare_plugin/monitors/monitoring/__init__.py +44 -0
  31. empathy_healthcare_plugin/monitors/monitoring/protocol_checker.py +300 -0
  32. empathy_healthcare_plugin/monitors/monitoring/protocol_loader.py +214 -0
  33. empathy_healthcare_plugin/monitors/monitoring/sensor_parsers.py +306 -0
  34. empathy_healthcare_plugin/monitors/monitoring/trajectory_analyzer.py +389 -0
  35. empathy_llm_toolkit/__init__.py +7 -7
  36. empathy_llm_toolkit/agent_factory/__init__.py +53 -0
  37. empathy_llm_toolkit/agent_factory/adapters/__init__.py +85 -0
  38. empathy_llm_toolkit/agent_factory/adapters/autogen_adapter.py +312 -0
  39. empathy_llm_toolkit/agent_factory/adapters/crewai_adapter.py +454 -0
  40. empathy_llm_toolkit/agent_factory/adapters/haystack_adapter.py +298 -0
  41. empathy_llm_toolkit/agent_factory/adapters/langchain_adapter.py +362 -0
  42. empathy_llm_toolkit/agent_factory/adapters/langgraph_adapter.py +333 -0
  43. empathy_llm_toolkit/agent_factory/adapters/native.py +228 -0
  44. empathy_llm_toolkit/agent_factory/adapters/wizard_adapter.py +426 -0
  45. empathy_llm_toolkit/agent_factory/base.py +305 -0
  46. empathy_llm_toolkit/agent_factory/crews/__init__.py +67 -0
  47. empathy_llm_toolkit/agent_factory/crews/code_review.py +1113 -0
  48. empathy_llm_toolkit/agent_factory/crews/health_check.py +1246 -0
  49. empathy_llm_toolkit/agent_factory/crews/refactoring.py +1128 -0
  50. empathy_llm_toolkit/agent_factory/crews/security_audit.py +1018 -0
  51. empathy_llm_toolkit/agent_factory/decorators.py +286 -0
  52. empathy_llm_toolkit/agent_factory/factory.py +558 -0
  53. empathy_llm_toolkit/agent_factory/framework.py +192 -0
  54. empathy_llm_toolkit/agent_factory/memory_integration.py +324 -0
  55. empathy_llm_toolkit/agent_factory/resilient.py +320 -0
  56. empathy_llm_toolkit/claude_memory.py +14 -15
  57. empathy_llm_toolkit/cli/__init__.py +8 -0
  58. empathy_llm_toolkit/cli/sync_claude.py +487 -0
  59. empathy_llm_toolkit/code_health.py +186 -28
  60. empathy_llm_toolkit/config/__init__.py +29 -0
  61. empathy_llm_toolkit/config/unified.py +295 -0
  62. empathy_llm_toolkit/contextual_patterns.py +11 -12
  63. empathy_llm_toolkit/core.py +168 -53
  64. empathy_llm_toolkit/git_pattern_extractor.py +17 -13
  65. empathy_llm_toolkit/levels.py +6 -13
  66. empathy_llm_toolkit/pattern_confidence.py +14 -18
  67. empathy_llm_toolkit/pattern_resolver.py +10 -12
  68. empathy_llm_toolkit/pattern_summary.py +16 -14
  69. empathy_llm_toolkit/providers.py +194 -28
  70. empathy_llm_toolkit/routing/__init__.py +32 -0
  71. empathy_llm_toolkit/routing/model_router.py +362 -0
  72. empathy_llm_toolkit/security/IMPLEMENTATION_SUMMARY.md +413 -0
  73. empathy_llm_toolkit/security/PHASE2_COMPLETE.md +384 -0
  74. empathy_llm_toolkit/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
  75. empathy_llm_toolkit/security/QUICK_REFERENCE.md +316 -0
  76. empathy_llm_toolkit/security/README.md +262 -0
  77. empathy_llm_toolkit/security/__init__.py +62 -0
  78. empathy_llm_toolkit/security/audit_logger.py +929 -0
  79. empathy_llm_toolkit/security/audit_logger_example.py +152 -0
  80. empathy_llm_toolkit/security/pii_scrubber.py +640 -0
  81. empathy_llm_toolkit/security/secrets_detector.py +678 -0
  82. empathy_llm_toolkit/security/secrets_detector_example.py +304 -0
  83. empathy_llm_toolkit/security/secure_memdocs.py +1192 -0
  84. empathy_llm_toolkit/security/secure_memdocs_example.py +278 -0
  85. empathy_llm_toolkit/session_status.py +20 -22
  86. empathy_llm_toolkit/state.py +28 -21
  87. empathy_llm_toolkit/wizards/__init__.py +38 -0
  88. empathy_llm_toolkit/wizards/base_wizard.py +364 -0
  89. empathy_llm_toolkit/wizards/customer_support_wizard.py +190 -0
  90. empathy_llm_toolkit/wizards/healthcare_wizard.py +362 -0
  91. empathy_llm_toolkit/wizards/patient_assessment_README.md +64 -0
  92. empathy_llm_toolkit/wizards/patient_assessment_wizard.py +193 -0
  93. empathy_llm_toolkit/wizards/technology_wizard.py +194 -0
  94. empathy_os/__init__.py +125 -84
  95. empathy_os/adaptive/__init__.py +13 -0
  96. empathy_os/adaptive/task_complexity.py +127 -0
  97. empathy_os/{monitoring.py → agent_monitoring.py} +28 -28
  98. empathy_os/cache/__init__.py +117 -0
  99. empathy_os/cache/base.py +166 -0
  100. empathy_os/cache/dependency_manager.py +253 -0
  101. empathy_os/cache/hash_only.py +248 -0
  102. empathy_os/cache/hybrid.py +390 -0
  103. empathy_os/cache/storage.py +282 -0
  104. empathy_os/cli.py +1516 -70
  105. empathy_os/cli_unified.py +597 -0
  106. empathy_os/config/__init__.py +63 -0
  107. empathy_os/config/xml_config.py +239 -0
  108. empathy_os/config.py +95 -37
  109. empathy_os/coordination.py +72 -68
  110. empathy_os/core.py +94 -107
  111. empathy_os/cost_tracker.py +74 -55
  112. empathy_os/dashboard/__init__.py +15 -0
  113. empathy_os/dashboard/server.py +743 -0
  114. empathy_os/discovery.py +17 -14
  115. empathy_os/emergence.py +21 -22
  116. empathy_os/exceptions.py +18 -30
  117. empathy_os/feedback_loops.py +30 -33
  118. empathy_os/levels.py +32 -35
  119. empathy_os/leverage_points.py +31 -32
  120. empathy_os/logging_config.py +19 -16
  121. empathy_os/memory/__init__.py +195 -0
  122. empathy_os/memory/claude_memory.py +466 -0
  123. empathy_os/memory/config.py +224 -0
  124. empathy_os/memory/control_panel.py +1298 -0
  125. empathy_os/memory/edges.py +179 -0
  126. empathy_os/memory/graph.py +567 -0
  127. empathy_os/memory/long_term.py +1194 -0
  128. empathy_os/memory/nodes.py +179 -0
  129. empathy_os/memory/redis_bootstrap.py +540 -0
  130. empathy_os/memory/security/__init__.py +31 -0
  131. empathy_os/memory/security/audit_logger.py +930 -0
  132. empathy_os/memory/security/pii_scrubber.py +640 -0
  133. empathy_os/memory/security/secrets_detector.py +678 -0
  134. empathy_os/memory/short_term.py +2119 -0
  135. empathy_os/memory/storage/__init__.py +15 -0
  136. empathy_os/memory/summary_index.py +583 -0
  137. empathy_os/memory/unified.py +619 -0
  138. empathy_os/metrics/__init__.py +12 -0
  139. empathy_os/metrics/prompt_metrics.py +190 -0
  140. empathy_os/models/__init__.py +136 -0
  141. empathy_os/models/__main__.py +13 -0
  142. empathy_os/models/cli.py +655 -0
  143. empathy_os/models/empathy_executor.py +354 -0
  144. empathy_os/models/executor.py +252 -0
  145. empathy_os/models/fallback.py +671 -0
  146. empathy_os/models/provider_config.py +563 -0
  147. empathy_os/models/registry.py +382 -0
  148. empathy_os/models/tasks.py +302 -0
  149. empathy_os/models/telemetry.py +548 -0
  150. empathy_os/models/token_estimator.py +378 -0
  151. empathy_os/models/validation.py +274 -0
  152. empathy_os/monitoring/__init__.py +52 -0
  153. empathy_os/monitoring/alerts.py +23 -0
  154. empathy_os/monitoring/alerts_cli.py +268 -0
  155. empathy_os/monitoring/multi_backend.py +271 -0
  156. empathy_os/monitoring/otel_backend.py +363 -0
  157. empathy_os/optimization/__init__.py +19 -0
  158. empathy_os/optimization/context_optimizer.py +272 -0
  159. empathy_os/pattern_library.py +30 -29
  160. empathy_os/persistence.py +35 -37
  161. empathy_os/platform_utils.py +261 -0
  162. empathy_os/plugins/__init__.py +28 -0
  163. empathy_os/plugins/base.py +361 -0
  164. empathy_os/plugins/registry.py +268 -0
  165. empathy_os/project_index/__init__.py +30 -0
  166. empathy_os/project_index/cli.py +335 -0
  167. empathy_os/project_index/crew_integration.py +430 -0
  168. empathy_os/project_index/index.py +425 -0
  169. empathy_os/project_index/models.py +501 -0
  170. empathy_os/project_index/reports.py +473 -0
  171. empathy_os/project_index/scanner.py +538 -0
  172. empathy_os/prompts/__init__.py +61 -0
  173. empathy_os/prompts/config.py +77 -0
  174. empathy_os/prompts/context.py +177 -0
  175. empathy_os/prompts/parser.py +285 -0
  176. empathy_os/prompts/registry.py +313 -0
  177. empathy_os/prompts/templates.py +208 -0
  178. empathy_os/redis_config.py +144 -58
  179. empathy_os/redis_memory.py +79 -77
  180. empathy_os/resilience/__init__.py +56 -0
  181. empathy_os/resilience/circuit_breaker.py +256 -0
  182. empathy_os/resilience/fallback.py +179 -0
  183. empathy_os/resilience/health.py +300 -0
  184. empathy_os/resilience/retry.py +209 -0
  185. empathy_os/resilience/timeout.py +135 -0
  186. empathy_os/routing/__init__.py +43 -0
  187. empathy_os/routing/chain_executor.py +433 -0
  188. empathy_os/routing/classifier.py +217 -0
  189. empathy_os/routing/smart_router.py +234 -0
  190. empathy_os/routing/wizard_registry.py +307 -0
  191. empathy_os/templates.py +19 -14
  192. empathy_os/trust/__init__.py +28 -0
  193. empathy_os/trust/circuit_breaker.py +579 -0
  194. empathy_os/trust_building.py +67 -58
  195. empathy_os/validation/__init__.py +19 -0
  196. empathy_os/validation/xml_validator.py +281 -0
  197. empathy_os/wizard_factory_cli.py +170 -0
  198. empathy_os/{workflows.py → workflow_commands.py} +131 -37
  199. empathy_os/workflows/__init__.py +360 -0
  200. empathy_os/workflows/base.py +1660 -0
  201. empathy_os/workflows/bug_predict.py +962 -0
  202. empathy_os/workflows/code_review.py +960 -0
  203. empathy_os/workflows/code_review_adapters.py +310 -0
  204. empathy_os/workflows/code_review_pipeline.py +720 -0
  205. empathy_os/workflows/config.py +600 -0
  206. empathy_os/workflows/dependency_check.py +648 -0
  207. empathy_os/workflows/document_gen.py +1069 -0
  208. empathy_os/workflows/documentation_orchestrator.py +1205 -0
  209. empathy_os/workflows/health_check.py +679 -0
  210. empathy_os/workflows/keyboard_shortcuts/__init__.py +39 -0
  211. empathy_os/workflows/keyboard_shortcuts/generators.py +386 -0
  212. empathy_os/workflows/keyboard_shortcuts/parsers.py +414 -0
  213. empathy_os/workflows/keyboard_shortcuts/prompts.py +295 -0
  214. empathy_os/workflows/keyboard_shortcuts/schema.py +193 -0
  215. empathy_os/workflows/keyboard_shortcuts/workflow.py +505 -0
  216. empathy_os/workflows/manage_documentation.py +804 -0
  217. empathy_os/workflows/new_sample_workflow1.py +146 -0
  218. empathy_os/workflows/new_sample_workflow1_README.md +150 -0
  219. empathy_os/workflows/perf_audit.py +687 -0
  220. empathy_os/workflows/pr_review.py +748 -0
  221. empathy_os/workflows/progress.py +445 -0
  222. empathy_os/workflows/progress_server.py +322 -0
  223. empathy_os/workflows/refactor_plan.py +693 -0
  224. empathy_os/workflows/release_prep.py +808 -0
  225. empathy_os/workflows/research_synthesis.py +404 -0
  226. empathy_os/workflows/secure_release.py +585 -0
  227. empathy_os/workflows/security_adapters.py +297 -0
  228. empathy_os/workflows/security_audit.py +1046 -0
  229. empathy_os/workflows/step_config.py +234 -0
  230. empathy_os/workflows/test5.py +125 -0
  231. empathy_os/workflows/test5_README.md +158 -0
  232. empathy_os/workflows/test_gen.py +1855 -0
  233. empathy_os/workflows/test_lifecycle.py +526 -0
  234. empathy_os/workflows/test_maintenance.py +626 -0
  235. empathy_os/workflows/test_maintenance_cli.py +590 -0
  236. empathy_os/workflows/test_maintenance_crew.py +821 -0
  237. empathy_os/workflows/xml_enhanced_crew.py +285 -0
  238. empathy_software_plugin/__init__.py +1 -2
  239. empathy_software_plugin/cli/__init__.py +120 -0
  240. empathy_software_plugin/cli/inspect.py +362 -0
  241. empathy_software_plugin/cli.py +49 -27
  242. empathy_software_plugin/plugin.py +4 -8
  243. empathy_software_plugin/wizards/__init__.py +42 -0
  244. empathy_software_plugin/wizards/advanced_debugging_wizard.py +392 -0
  245. empathy_software_plugin/wizards/agent_orchestration_wizard.py +511 -0
  246. empathy_software_plugin/wizards/ai_collaboration_wizard.py +503 -0
  247. empathy_software_plugin/wizards/ai_context_wizard.py +441 -0
  248. empathy_software_plugin/wizards/ai_documentation_wizard.py +503 -0
  249. empathy_software_plugin/wizards/base_wizard.py +288 -0
  250. empathy_software_plugin/wizards/book_chapter_wizard.py +519 -0
  251. empathy_software_plugin/wizards/code_review_wizard.py +606 -0
  252. empathy_software_plugin/wizards/debugging/__init__.py +50 -0
  253. empathy_software_plugin/wizards/debugging/bug_risk_analyzer.py +414 -0
  254. empathy_software_plugin/wizards/debugging/config_loaders.py +442 -0
  255. empathy_software_plugin/wizards/debugging/fix_applier.py +469 -0
  256. empathy_software_plugin/wizards/debugging/language_patterns.py +383 -0
  257. empathy_software_plugin/wizards/debugging/linter_parsers.py +470 -0
  258. empathy_software_plugin/wizards/debugging/verification.py +369 -0
  259. empathy_software_plugin/wizards/enhanced_testing_wizard.py +537 -0
  260. empathy_software_plugin/wizards/memory_enhanced_debugging_wizard.py +816 -0
  261. empathy_software_plugin/wizards/multi_model_wizard.py +501 -0
  262. empathy_software_plugin/wizards/pattern_extraction_wizard.py +422 -0
  263. empathy_software_plugin/wizards/pattern_retriever_wizard.py +400 -0
  264. empathy_software_plugin/wizards/performance/__init__.py +9 -0
  265. empathy_software_plugin/wizards/performance/bottleneck_detector.py +221 -0
  266. empathy_software_plugin/wizards/performance/profiler_parsers.py +278 -0
  267. empathy_software_plugin/wizards/performance/trajectory_analyzer.py +429 -0
  268. empathy_software_plugin/wizards/performance_profiling_wizard.py +305 -0
  269. empathy_software_plugin/wizards/prompt_engineering_wizard.py +425 -0
  270. empathy_software_plugin/wizards/rag_pattern_wizard.py +461 -0
  271. empathy_software_plugin/wizards/security/__init__.py +32 -0
  272. empathy_software_plugin/wizards/security/exploit_analyzer.py +290 -0
  273. empathy_software_plugin/wizards/security/owasp_patterns.py +241 -0
  274. empathy_software_plugin/wizards/security/vulnerability_scanner.py +604 -0
  275. empathy_software_plugin/wizards/security_analysis_wizard.py +322 -0
  276. empathy_software_plugin/wizards/security_learning_wizard.py +740 -0
  277. empathy_software_plugin/wizards/tech_debt_wizard.py +726 -0
  278. empathy_software_plugin/wizards/testing/__init__.py +27 -0
  279. empathy_software_plugin/wizards/testing/coverage_analyzer.py +459 -0
  280. empathy_software_plugin/wizards/testing/quality_analyzer.py +531 -0
  281. empathy_software_plugin/wizards/testing/test_suggester.py +533 -0
  282. empathy_software_plugin/wizards/testing_wizard.py +274 -0
  283. hot_reload/README.md +473 -0
  284. hot_reload/__init__.py +62 -0
  285. hot_reload/config.py +84 -0
  286. hot_reload/integration.py +228 -0
  287. hot_reload/reloader.py +298 -0
  288. hot_reload/watcher.py +179 -0
  289. hot_reload/websocket.py +176 -0
  290. scaffolding/README.md +589 -0
  291. scaffolding/__init__.py +35 -0
  292. scaffolding/__main__.py +14 -0
  293. scaffolding/cli.py +240 -0
  294. test_generator/__init__.py +38 -0
  295. test_generator/__main__.py +14 -0
  296. test_generator/cli.py +226 -0
  297. test_generator/generator.py +325 -0
  298. test_generator/risk_analyzer.py +216 -0
  299. workflow_patterns/__init__.py +33 -0
  300. workflow_patterns/behavior.py +249 -0
  301. workflow_patterns/core.py +76 -0
  302. workflow_patterns/output.py +99 -0
  303. workflow_patterns/registry.py +255 -0
  304. workflow_patterns/structural.py +288 -0
  305. workflow_scaffolding/__init__.py +11 -0
  306. workflow_scaffolding/__main__.py +12 -0
  307. workflow_scaffolding/cli.py +206 -0
  308. workflow_scaffolding/generator.py +265 -0
  309. agents/code_inspection/patterns/inspection/recurring_B112.json +0 -18
  310. agents/code_inspection/patterns/inspection/recurring_F541.json +0 -16
  311. agents/code_inspection/patterns/inspection/recurring_FORMAT.json +0 -25
  312. agents/code_inspection/patterns/inspection/recurring_bug_20250822_def456.json +0 -16
  313. agents/code_inspection/patterns/inspection/recurring_bug_20250915_abc123.json +0 -16
  314. agents/code_inspection/patterns/inspection/recurring_bug_20251212_3c5b9951.json +0 -16
  315. agents/code_inspection/patterns/inspection/recurring_bug_20251212_97c0f72f.json +0 -16
  316. agents/code_inspection/patterns/inspection/recurring_bug_20251212_a0871d53.json +0 -16
  317. agents/code_inspection/patterns/inspection/recurring_bug_20251212_a9b6ec41.json +0 -16
  318. agents/code_inspection/patterns/inspection/recurring_bug_null_001.json +0 -16
  319. agents/code_inspection/patterns/inspection/recurring_builtin.json +0 -16
  320. agents/compliance_anticipation_agent.py +0 -1427
  321. agents/epic_integration_wizard.py +0 -541
  322. agents/trust_building_behaviors.py +0 -891
  323. empathy_framework-2.4.0.dist-info/METADATA +0 -485
  324. empathy_framework-2.4.0.dist-info/RECORD +0 -102
  325. empathy_framework-2.4.0.dist-info/entry_points.txt +0 -6
  326. empathy_llm_toolkit/htmlcov/status.json +0 -1
  327. empathy_llm_toolkit/security/htmlcov/status.json +0 -1
  328. {empathy_framework-2.4.0.dist-info → empathy_framework-3.8.2.dist-info}/WHEEL +0 -0
  329. {empathy_framework-2.4.0.dist-info → empathy_framework-3.8.2.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,821 @@
1
+ """Test Maintenance Crew - CrewAI-Based Automated Test Management
2
+
3
+ A crew of specialized agents that collaboratively manage the test lifecycle:
4
+ - Test Analyst: Analyzes coverage gaps and prioritizes work
5
+ - Test Generator: Creates new tests using LLM
6
+ - Test Validator: Verifies generated tests work correctly
7
+ - Test Reporter: Generates status reports and recommendations
8
+
9
+ The crew can operate autonomously on a schedule or be triggered by events.
10
+
11
+ Copyright 2025 Smart AI Memory, LLC
12
+ Licensed under Fair Source 0.9
13
+ """
14
+
15
+ import logging
16
+ from dataclasses import dataclass, field
17
+ from datetime import datetime
18
+ from pathlib import Path
19
+ from typing import Any
20
+
21
+ from ..project_index import ProjectIndex
22
+ from ..project_index.reports import ReportGenerator
23
+ from .test_maintenance import TestAction, TestMaintenanceWorkflow, TestPlanItem, TestPriority
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+
28
+ @dataclass
29
+ class AgentResult:
30
+ """Result from an agent's work."""
31
+
32
+ agent: str
33
+ task: str
34
+ success: bool
35
+ output: dict[str, Any]
36
+ duration_ms: int = 0
37
+ timestamp: datetime = field(default_factory=datetime.now)
38
+
39
+
40
+ @dataclass
41
+ class CrewConfig:
42
+ """Configuration for the test maintenance crew."""
43
+
44
+ # Agent settings
45
+ enable_auto_generation: bool = True
46
+ enable_auto_validation: bool = True
47
+ max_files_per_run: int = 10
48
+
49
+ # Thresholds
50
+ min_coverage_target: float = 80.0
51
+ staleness_threshold_days: int = 7
52
+ high_impact_threshold: float = 5.0
53
+
54
+ # Scheduling
55
+ auto_run_interval_hours: int = 24
56
+ run_on_commit: bool = True
57
+
58
+ # LLM settings
59
+ test_gen_model: str = "sonnet"
60
+ validation_model: str = "haiku"
61
+
62
+ # Validation settings
63
+ validation_timeout_seconds: int = 120 # Per-file timeout
64
+ validation_optional: bool = True # Don't fail crew if validation fails
65
+ skip_validation_on_timeout: bool = True # Continue on timeout
66
+
67
+
68
+ class TestAnalystAgent:
69
+ """Analyzes test coverage and prioritizes work.
70
+
71
+ Responsibilities:
72
+ - Identify files needing tests
73
+ - Calculate priority based on impact
74
+ - Generate maintenance plans
75
+ - Track test health metrics
76
+ """
77
+
78
+ def __init__(self, index: ProjectIndex, config: CrewConfig):
79
+ self.index = index
80
+ self.config = config
81
+ self.name = "Test Analyst"
82
+
83
+ async def analyze_coverage_gaps(self) -> AgentResult:
84
+ """Identify files with coverage gaps."""
85
+ start = datetime.now()
86
+
87
+ files_needing_tests = self.index.get_files_needing_tests()
88
+ high_impact = [
89
+ f for f in files_needing_tests if f.impact_score >= self.config.high_impact_threshold
90
+ ]
91
+
92
+ output = {
93
+ "total_gaps": len(files_needing_tests),
94
+ "high_impact_gaps": len(high_impact),
95
+ "priority_files": [
96
+ {
97
+ "path": f.path,
98
+ "impact": f.impact_score,
99
+ "loc": f.lines_of_code,
100
+ }
101
+ for f in sorted(high_impact, key=lambda x: -x.impact_score)[:10]
102
+ ],
103
+ "recommendation": self._generate_recommendation(files_needing_tests, high_impact),
104
+ }
105
+
106
+ duration = int((datetime.now() - start).total_seconds() * 1000)
107
+
108
+ return AgentResult(
109
+ agent=self.name,
110
+ task="analyze_coverage_gaps",
111
+ success=True,
112
+ output=output,
113
+ duration_ms=duration,
114
+ )
115
+
116
+ async def analyze_staleness(self) -> AgentResult:
117
+ """Identify files with stale tests."""
118
+ start = datetime.now()
119
+
120
+ stale_files = self.index.get_stale_files()
121
+
122
+ output = {
123
+ "stale_count": len(stale_files),
124
+ "avg_staleness_days": (
125
+ sum(f.staleness_days for f in stale_files) / len(stale_files) if stale_files else 0
126
+ ),
127
+ "stale_files": [
128
+ {
129
+ "path": f.path,
130
+ "staleness_days": f.staleness_days,
131
+ "test_file": f.test_file_path,
132
+ }
133
+ for f in sorted(stale_files, key=lambda x: -x.staleness_days)[:10]
134
+ ],
135
+ }
136
+
137
+ duration = int((datetime.now() - start).total_seconds() * 1000)
138
+
139
+ return AgentResult(
140
+ agent=self.name,
141
+ task="analyze_staleness",
142
+ success=True,
143
+ output=output,
144
+ duration_ms=duration,
145
+ )
146
+
147
+ async def generate_plan(self) -> AgentResult:
148
+ """Generate a prioritized maintenance plan."""
149
+ start = datetime.now()
150
+
151
+ workflow = TestMaintenanceWorkflow(str(self.index.project_root), self.index)
152
+ result = await workflow.run(
153
+ {
154
+ "mode": "analyze",
155
+ "max_items": self.config.max_files_per_run,
156
+ },
157
+ )
158
+
159
+ duration = int((datetime.now() - start).total_seconds() * 1000)
160
+
161
+ return AgentResult(
162
+ agent=self.name,
163
+ task="generate_plan",
164
+ success=True,
165
+ output=result,
166
+ duration_ms=duration,
167
+ )
168
+
169
+ def _generate_recommendation(self, all_gaps: list, high_impact: list) -> str:
170
+ """Generate actionable recommendation."""
171
+ if len(high_impact) > 5:
172
+ return f"URGENT: {len(high_impact)} high-impact files need tests. Start with the top 5."
173
+ if len(high_impact) > 0:
174
+ return f"Prioritize {len(high_impact)} high-impact files before addressing remaining {len(all_gaps) - len(high_impact)} gaps."
175
+ if len(all_gaps) > 20:
176
+ return f"Consider batch test generation for {len(all_gaps)} files."
177
+ if len(all_gaps) > 0:
178
+ return f"Address {len(all_gaps)} remaining test gaps to improve coverage."
179
+ return "Excellent! All files requiring tests have coverage."
180
+
181
+
182
+ class TestGeneratorAgent:
183
+ """Generates tests for source files.
184
+
185
+ Responsibilities:
186
+ - Read source file and understand its structure
187
+ - Generate appropriate test cases
188
+ - Follow project testing patterns
189
+ - Write test files to correct location
190
+ """
191
+
192
+ def __init__(self, project_root: Path, index: ProjectIndex, config: CrewConfig):
193
+ self.project_root = project_root
194
+ self.index = index
195
+ self.config = config
196
+ self.name = "Test Generator"
197
+
198
+ async def generate_tests(self, plan_items: list[TestPlanItem]) -> AgentResult:
199
+ """Generate tests for files in the plan."""
200
+ start = datetime.now()
201
+
202
+ results = []
203
+ succeeded = 0
204
+ failed = 0
205
+
206
+ for item in plan_items:
207
+ if item.action != TestAction.CREATE:
208
+ continue
209
+
210
+ try:
211
+ result = await self._generate_test_for_file(item)
212
+ results.append(result)
213
+ if result["success"]:
214
+ succeeded += 1
215
+ else:
216
+ failed += 1
217
+ except Exception as e:
218
+ logger.error(f"Failed to generate tests for {item.file_path}: {e}")
219
+ failed += 1
220
+ results.append(
221
+ {
222
+ "file": item.file_path,
223
+ "success": False,
224
+ "error": str(e),
225
+ },
226
+ )
227
+
228
+ duration = int((datetime.now() - start).total_seconds() * 1000)
229
+
230
+ return AgentResult(
231
+ agent=self.name,
232
+ task="generate_tests",
233
+ success=failed == 0,
234
+ output={
235
+ "processed": len(results),
236
+ "succeeded": succeeded,
237
+ "failed": failed,
238
+ "results": results,
239
+ },
240
+ duration_ms=duration,
241
+ )
242
+
243
+ async def _generate_test_for_file(self, item: TestPlanItem) -> dict[str, Any]:
244
+ """Generate tests for a single file."""
245
+ source_path = self.project_root / item.file_path
246
+
247
+ if not source_path.exists():
248
+ return {
249
+ "file": item.file_path,
250
+ "success": False,
251
+ "error": "Source file not found",
252
+ }
253
+
254
+ # Determine test file path
255
+ test_file_path = self._determine_test_path(item.file_path)
256
+ full_test_path = self.project_root / test_file_path
257
+
258
+ # Skip if test file already exists and has content
259
+ if full_test_path.exists():
260
+ existing_content = full_test_path.read_text(encoding="utf-8")
261
+ # Only skip if file has real tests (not just placeholder)
262
+ if "def test_" in existing_content and "assert True # Replace" not in existing_content:
263
+ return {
264
+ "file": item.file_path,
265
+ "test_file": test_file_path,
266
+ "success": True,
267
+ "skipped": True,
268
+ "reason": "Test file already exists with real tests",
269
+ }
270
+
271
+ # Read source file
272
+ try:
273
+ source_code = source_path.read_text(encoding="utf-8")
274
+ except Exception as e:
275
+ return {
276
+ "file": item.file_path,
277
+ "success": False,
278
+ "error": f"Failed to read source: {e}",
279
+ }
280
+
281
+ # Generate test code (placeholder - would use LLM)
282
+ test_code = self._generate_test_code(item.file_path, source_code, item.metadata)
283
+
284
+ # Write test file
285
+ try:
286
+ full_test_path.parent.mkdir(parents=True, exist_ok=True)
287
+ full_test_path.write_text(test_code, encoding="utf-8")
288
+ except Exception as e:
289
+ return {
290
+ "file": item.file_path,
291
+ "success": False,
292
+ "error": f"Failed to write test file: {e}",
293
+ }
294
+
295
+ # Update index
296
+ self.index.update_file(
297
+ item.file_path,
298
+ tests_exist=True,
299
+ test_file_path=test_file_path,
300
+ tests_last_modified=datetime.now(),
301
+ is_stale=False,
302
+ staleness_days=0,
303
+ )
304
+
305
+ return {
306
+ "file": item.file_path,
307
+ "test_file": test_file_path,
308
+ "success": True,
309
+ "lines_generated": len(test_code.split("\n")),
310
+ }
311
+
312
+ def _determine_test_path(self, source_path: str) -> str:
313
+ """Determine the test file path for a source file."""
314
+ path = Path(source_path)
315
+
316
+ # Standard pattern: src/module/file.py -> tests/test_file.py
317
+ if path.parts[0] == "src":
318
+ test_name = f"test_{path.stem}.py"
319
+ return f"tests/{test_name}"
320
+
321
+ # Module in root: module/file.py -> tests/test_file.py
322
+ test_name = f"test_{path.stem}.py"
323
+ return f"tests/{test_name}"
324
+
325
+ def _generate_test_code(
326
+ self,
327
+ source_path: str,
328
+ source_code: str,
329
+ metadata: dict[str, Any],
330
+ ) -> str:
331
+ """Generate test code for a source file."""
332
+ # This is a placeholder - would integrate with LLM for real generation
333
+ module_name = Path(source_path).stem
334
+ class_name = "".join(word.capitalize() for word in module_name.split("_"))
335
+
336
+ return f'''"""
337
+ Tests for {source_path}
338
+
339
+ Auto-generated by Test Maintenance Crew.
340
+ Review and enhance as needed.
341
+ """
342
+
343
+ import pytest
344
+
345
+ # TODO: Import the module being tested
346
+ # from {module_name} import ...
347
+
348
+
349
+ class Test{class_name}:
350
+ """Tests for {module_name} module."""
351
+
352
+ def test_placeholder(self):
353
+ """Placeholder test - implement actual tests."""
354
+ # TODO: Implement actual tests
355
+ # Source file has {metadata.get("lines_of_code", "unknown")} lines
356
+ # Complexity score: {metadata.get("complexity", "unknown")}
357
+ assert True # Replace with actual assertions
358
+
359
+
360
+ # TODO: Add more test cases based on the source code
361
+ '''
362
+
363
+
364
+ class TestValidatorAgent:
365
+ """Validates generated tests.
366
+
367
+ Responsibilities:
368
+ - Run generated tests to verify they pass
369
+ - Check test coverage
370
+ - Identify issues with generated tests
371
+ - Suggest improvements
372
+ """
373
+
374
+ def __init__(self, project_root: Path, config: CrewConfig):
375
+ self.project_root = project_root
376
+ self.config = config
377
+ self.name = "Test Validator"
378
+
379
+ async def validate_tests(self, test_files: list[str]) -> AgentResult:
380
+ """Validate that tests run correctly."""
381
+ start = datetime.now()
382
+
383
+ results = []
384
+ passed = 0
385
+ failed = 0
386
+ skipped = 0
387
+
388
+ for test_file in test_files:
389
+ try:
390
+ result = await self._run_test_file(test_file)
391
+ results.append(result)
392
+ if result.get("skipped"):
393
+ skipped += 1
394
+ elif result["passed"]:
395
+ passed += 1
396
+ else:
397
+ failed += 1
398
+ except Exception as e:
399
+ logger.error(f"Validation error for {test_file}: {e}")
400
+ results.append(
401
+ {
402
+ "file": test_file,
403
+ "passed": False,
404
+ "error": str(e),
405
+ },
406
+ )
407
+ failed += 1
408
+
409
+ duration = int((datetime.now() - start).total_seconds() * 1000)
410
+
411
+ # Success depends on config - if validation is optional, we succeed even with failures
412
+ success = (failed == 0) or self.config.validation_optional
413
+
414
+ return AgentResult(
415
+ agent=self.name,
416
+ task="validate_tests",
417
+ success=success,
418
+ output={
419
+ "total": len(test_files),
420
+ "passed": passed,
421
+ "failed": failed,
422
+ "skipped": skipped,
423
+ "results": results,
424
+ "validation_optional": self.config.validation_optional,
425
+ },
426
+ duration_ms=duration,
427
+ )
428
+
429
+ async def validate_single(self, test_file: str) -> AgentResult:
430
+ """Validate a single test file (for validate-only mode)."""
431
+ start = datetime.now()
432
+ result = await self._run_test_file(test_file)
433
+ duration = int((datetime.now() - start).total_seconds() * 1000)
434
+
435
+ return AgentResult(
436
+ agent=self.name,
437
+ task="validate_single",
438
+ success=result["passed"],
439
+ output=result,
440
+ duration_ms=duration,
441
+ )
442
+
443
+ async def _run_test_file(self, test_file: str) -> dict[str, Any]:
444
+ """Run a single test file."""
445
+ import subprocess
446
+
447
+ full_path = self.project_root / test_file
448
+
449
+ if not full_path.exists():
450
+ return {
451
+ "file": test_file,
452
+ "passed": False,
453
+ "error": "Test file not found",
454
+ }
455
+
456
+ timeout = self.config.validation_timeout_seconds
457
+
458
+ try:
459
+ # Run pytest without coverage to avoid coverage threshold failures
460
+ result = subprocess.run(
461
+ ["python", "-m", "pytest", str(full_path), "-v", "--tb=short", "-x", "--no-cov"],
462
+ check=False,
463
+ capture_output=True,
464
+ text=True,
465
+ timeout=timeout,
466
+ cwd=str(self.project_root),
467
+ )
468
+
469
+ # Check if tests passed (look for "passed" in output even if returncode != 0)
470
+ tests_passed = result.returncode == 0
471
+ if not tests_passed and result.stdout:
472
+ # pytest may return non-zero for coverage issues even when tests pass
473
+ if "passed" in result.stdout and "failed" not in result.stdout.lower():
474
+ tests_passed = True
475
+
476
+ return {
477
+ "file": test_file,
478
+ "passed": tests_passed,
479
+ "output": result.stdout[-1000:] if result.stdout else "",
480
+ "errors": result.stderr[-500:] if result.stderr else "",
481
+ }
482
+
483
+ except subprocess.TimeoutExpired:
484
+ logger.warning(f"Test timeout for {test_file} after {timeout}s")
485
+ if self.config.skip_validation_on_timeout:
486
+ return {
487
+ "file": test_file,
488
+ "passed": False,
489
+ "skipped": True,
490
+ "error": f"Test timeout after {timeout}s - skipped",
491
+ }
492
+ return {
493
+ "file": test_file,
494
+ "passed": False,
495
+ "error": f"Test timeout after {timeout}s",
496
+ }
497
+ except Exception as e:
498
+ logger.error(f"Validation error for {test_file}: {e}")
499
+ return {
500
+ "file": test_file,
501
+ "passed": False,
502
+ "error": str(e),
503
+ }
504
+
505
+
506
+ class TestReporterAgent:
507
+ """Generates reports and recommendations.
508
+
509
+ Responsibilities:
510
+ - Generate test health reports
511
+ - Track progress over time
512
+ - Provide actionable recommendations
513
+ - Format output for different consumers
514
+ """
515
+
516
+ def __init__(self, index: ProjectIndex, config: CrewConfig):
517
+ self.index = index
518
+ self.config = config
519
+ self.name = "Test Reporter"
520
+
521
+ async def generate_status_report(self) -> AgentResult:
522
+ """Generate comprehensive status report."""
523
+ start = datetime.now()
524
+
525
+ summary = self.index.get_summary()
526
+ generator = ReportGenerator(summary, self.index.get_all_files())
527
+
528
+ output = {
529
+ "health": generator.health_report(),
530
+ "test_gaps": generator.test_gap_report(),
531
+ "staleness": generator.staleness_report(),
532
+ "recommendations": self._generate_recommendations(summary),
533
+ }
534
+
535
+ duration = int((datetime.now() - start).total_seconds() * 1000)
536
+
537
+ return AgentResult(
538
+ agent=self.name,
539
+ task="generate_status_report",
540
+ success=True,
541
+ output=output,
542
+ duration_ms=duration,
543
+ )
544
+
545
+ async def generate_maintenance_summary(
546
+ self,
547
+ crew_results: list[AgentResult],
548
+ ) -> AgentResult:
549
+ """Generate summary of maintenance run."""
550
+ start = datetime.now()
551
+
552
+ total_duration = sum(r.duration_ms for r in crew_results)
553
+ successful = sum(1 for r in crew_results if r.success)
554
+
555
+ output = {
556
+ "run_timestamp": datetime.now().isoformat(),
557
+ "agents_executed": len(crew_results),
558
+ "agents_succeeded": successful,
559
+ "total_duration_ms": total_duration,
560
+ "agent_results": [
561
+ {
562
+ "agent": r.agent,
563
+ "task": r.task,
564
+ "success": r.success,
565
+ "duration_ms": r.duration_ms,
566
+ }
567
+ for r in crew_results
568
+ ],
569
+ "overall_success": successful == len(crew_results),
570
+ }
571
+
572
+ duration = int((datetime.now() - start).total_seconds() * 1000)
573
+
574
+ return AgentResult(
575
+ agent=self.name,
576
+ task="generate_maintenance_summary",
577
+ success=True,
578
+ output=output,
579
+ duration_ms=duration,
580
+ )
581
+
582
+ def _generate_recommendations(self, summary) -> list[str]:
583
+ """Generate actionable recommendations."""
584
+ recommendations = []
585
+
586
+ # Coverage recommendations
587
+ if summary.test_coverage_avg < 50:
588
+ recommendations.append(
589
+ f"CRITICAL: Test coverage is {summary.test_coverage_avg:.1f}%. "
590
+ f"Target is {self.config.min_coverage_target}%. Prioritize test creation.",
591
+ )
592
+ elif summary.test_coverage_avg < self.config.min_coverage_target:
593
+ recommendations.append(
594
+ f"Coverage is {summary.test_coverage_avg:.1f}%, "
595
+ f"below target of {self.config.min_coverage_target}%.",
596
+ )
597
+
598
+ # Test gap recommendations
599
+ if summary.files_without_tests > 20:
600
+ recommendations.append(
601
+ f"Large test gap: {summary.files_without_tests} files need tests. "
602
+ "Consider batch generation.",
603
+ )
604
+ elif summary.files_without_tests > 0:
605
+ recommendations.append(f"{summary.files_without_tests} files still need tests.")
606
+
607
+ # Staleness recommendations
608
+ if summary.stale_file_count > 10:
609
+ recommendations.append(
610
+ f"{summary.stale_file_count} files have stale tests. Run test update workflow.",
611
+ )
612
+ elif summary.stale_file_count > 0:
613
+ recommendations.append(f"{summary.stale_file_count} files have stale tests.")
614
+
615
+ # Critical files
616
+ if summary.critical_untested_files:
617
+ recommendations.append(
618
+ f"PRIORITY: {len(summary.critical_untested_files)} high-impact files "
619
+ "lack tests. Address immediately.",
620
+ )
621
+
622
+ if not recommendations:
623
+ recommendations.append("Test health is good. Maintain current coverage.")
624
+
625
+ return recommendations
626
+
627
+
628
+ class TestMaintenanceCrew:
629
+ """Coordinates the test maintenance agents.
630
+
631
+ The crew can run different types of maintenance operations:
632
+ - full: Run all agents in sequence
633
+ - analyze: Only run analysis (no generation)
634
+ - generate: Run analysis and generation
635
+ - validate: Run analysis, generation, and validation
636
+ - report: Only generate reports
637
+ """
638
+
639
+ def __init__(
640
+ self,
641
+ project_root: str,
642
+ index: ProjectIndex | None = None,
643
+ config: CrewConfig | None = None,
644
+ ):
645
+ self.project_root = Path(project_root)
646
+ self.index = index or ProjectIndex(str(project_root))
647
+ self.config = config or CrewConfig()
648
+
649
+ # Initialize agents
650
+ self.analyst = TestAnalystAgent(self.index, self.config)
651
+ self.generator = TestGeneratorAgent(self.project_root, self.index, self.config)
652
+ self.validator = TestValidatorAgent(self.project_root, self.config)
653
+ self.reporter = TestReporterAgent(self.index, self.config)
654
+
655
+ # Results tracking
656
+ self._run_history: list[dict[str, Any]] = []
657
+
658
+ async def run(self, mode: str = "full", test_files: list[str] | None = None) -> dict[str, Any]:
659
+ """Run the crew with specified mode.
660
+
661
+ Modes:
662
+ - full: Complete maintenance cycle
663
+ - analyze: Only analysis
664
+ - generate: Analysis + generation
665
+ - validate: Analysis + generation + validation
666
+ - validate-only: Only validate specified test files (pass test_files param)
667
+ - report: Only reporting
668
+ """
669
+ logger.info(f"Starting test maintenance crew in {mode} mode")
670
+
671
+ results: list[AgentResult] = []
672
+ plan = None
673
+
674
+ # Handle validate-only mode separately
675
+ if mode == "validate-only":
676
+ if not test_files:
677
+ return {
678
+ "mode": mode,
679
+ "success": False,
680
+ "error": "validate-only mode requires test_files parameter",
681
+ }
682
+
683
+ for test_file in test_files:
684
+ val_result = await self.validator.validate_single(test_file)
685
+ results.append(val_result)
686
+
687
+ summary_result = await self.reporter.generate_maintenance_summary(results)
688
+ results.append(summary_result)
689
+
690
+ return {
691
+ "mode": mode,
692
+ "timestamp": datetime.now().isoformat(),
693
+ "results": [r.output for r in results],
694
+ "summary": summary_result.output,
695
+ "success": all(
696
+ r.success for r in results if r.task != "generate_maintenance_summary"
697
+ ),
698
+ }
699
+
700
+ # Ensure index is fresh
701
+ self.index.refresh()
702
+
703
+ # Phase 1: Analysis (always run except for report-only)
704
+ if mode != "report":
705
+ coverage_result = await self.analyst.analyze_coverage_gaps()
706
+ results.append(coverage_result)
707
+
708
+ staleness_result = await self.analyst.analyze_staleness()
709
+ results.append(staleness_result)
710
+
711
+ plan_result = await self.analyst.generate_plan()
712
+ results.append(plan_result)
713
+ plan = plan_result.output.get("plan", {})
714
+
715
+ # Phase 2: Generation (for generate, validate, full modes)
716
+ if mode in ["generate", "validate", "full"] and plan:
717
+ plan_items = [
718
+ TestPlanItem(
719
+ file_path=item["file_path"],
720
+ action=TestAction(item["action"]),
721
+ priority=TestPriority(item["priority"]),
722
+ reason=item.get("reason", ""),
723
+ metadata=item.get("metadata", {}),
724
+ )
725
+ for item in plan.get("items", [])
726
+ if item["action"] == "create"
727
+ ]
728
+
729
+ if plan_items:
730
+ gen_result = await self.generator.generate_tests(plan_items)
731
+ results.append(gen_result)
732
+
733
+ # Phase 3: Validation (for validate, full modes)
734
+ if mode in ["validate", "full"] and self.config.enable_auto_validation:
735
+ # Get test files from generation results
736
+ generated_test_files = []
737
+ for result in results:
738
+ if result.agent == "Test Generator":
739
+ for item in result.output.get("results", []):
740
+ if item.get("success") and item.get("test_file"):
741
+ generated_test_files.append(item["test_file"])
742
+
743
+ if generated_test_files:
744
+ try:
745
+ val_result = await self.validator.validate_tests(generated_test_files)
746
+ results.append(val_result)
747
+ except Exception as e:
748
+ logger.error(f"Validation failed with error: {e}")
749
+ if not self.config.validation_optional:
750
+ raise
751
+ # Log but continue if validation is optional
752
+ results.append(
753
+ AgentResult(
754
+ agent="Test Validator",
755
+ task="validate_tests",
756
+ success=True, # Mark as success since validation is optional
757
+ output={
758
+ "error": str(e),
759
+ "validation_skipped": True,
760
+ "validation_optional": True,
761
+ },
762
+ duration_ms=0,
763
+ ),
764
+ )
765
+
766
+ # Phase 4: Reporting (always run)
767
+ status_result = await self.reporter.generate_status_report()
768
+ results.append(status_result)
769
+
770
+ summary_result = await self.reporter.generate_maintenance_summary(results)
771
+ results.append(summary_result)
772
+
773
+ # Compile final output
774
+ output = {
775
+ "mode": mode,
776
+ "timestamp": datetime.now().isoformat(),
777
+ "results": [r.output for r in results],
778
+ "summary": summary_result.output,
779
+ "success": all(r.success for r in results),
780
+ }
781
+
782
+ # Save to history
783
+ self._run_history.append(output)
784
+
785
+ return output
786
+
787
+ def get_run_history(self, limit: int = 10) -> list[dict[str, Any]]:
788
+ """Get recent run history."""
789
+ return self._run_history[-limit:]
790
+
791
+ def get_crew_status(self) -> dict[str, Any]:
792
+ """Get current crew status."""
793
+ return {
794
+ "project_root": str(self.project_root),
795
+ "config": {
796
+ "auto_generation": self.config.enable_auto_generation,
797
+ "auto_validation": self.config.enable_auto_validation,
798
+ "max_files_per_run": self.config.max_files_per_run,
799
+ },
800
+ "index_status": {
801
+ "total_files": self.index.get_summary().total_files,
802
+ "files_needing_tests": self.index.get_summary().files_without_tests,
803
+ },
804
+ "run_count": len(self._run_history),
805
+ }
806
+
807
+
808
+ def create_crew_config_from_dict(config_dict: dict[str, Any]) -> CrewConfig:
809
+ """Create CrewConfig from dictionary."""
810
+ return CrewConfig(
811
+ enable_auto_generation=config_dict.get("enable_auto_generation", True),
812
+ enable_auto_validation=config_dict.get("enable_auto_validation", True),
813
+ max_files_per_run=config_dict.get("max_files_per_run", 10),
814
+ min_coverage_target=config_dict.get("min_coverage_target", 80.0),
815
+ staleness_threshold_days=config_dict.get("staleness_threshold_days", 7),
816
+ high_impact_threshold=config_dict.get("high_impact_threshold", 5.0),
817
+ auto_run_interval_hours=config_dict.get("auto_run_interval_hours", 24),
818
+ run_on_commit=config_dict.get("run_on_commit", True),
819
+ test_gen_model=config_dict.get("test_gen_model", "sonnet"),
820
+ validation_model=config_dict.get("validation_model", "haiku"),
821
+ )