empathy-framework 5.1.1__py3-none-any.whl → 5.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (106) hide show
  1. {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/METADATA +79 -6
  2. {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/RECORD +83 -64
  3. empathy_os/__init__.py +1 -1
  4. empathy_os/cache/hybrid.py +5 -1
  5. empathy_os/cli/commands/batch.py +8 -0
  6. empathy_os/cli/commands/profiling.py +4 -0
  7. empathy_os/cli/commands/workflow.py +8 -4
  8. empathy_os/cli_router.py +9 -0
  9. empathy_os/config.py +15 -2
  10. empathy_os/core_modules/__init__.py +15 -0
  11. empathy_os/dashboard/simple_server.py +62 -30
  12. empathy_os/mcp/__init__.py +10 -0
  13. empathy_os/mcp/server.py +506 -0
  14. empathy_os/memory/control_panel.py +1 -131
  15. empathy_os/memory/control_panel_support.py +145 -0
  16. empathy_os/memory/encryption.py +159 -0
  17. empathy_os/memory/long_term.py +46 -631
  18. empathy_os/memory/long_term_types.py +99 -0
  19. empathy_os/memory/mixins/__init__.py +25 -0
  20. empathy_os/memory/mixins/backend_init_mixin.py +249 -0
  21. empathy_os/memory/mixins/capabilities_mixin.py +208 -0
  22. empathy_os/memory/mixins/handoff_mixin.py +208 -0
  23. empathy_os/memory/mixins/lifecycle_mixin.py +49 -0
  24. empathy_os/memory/mixins/long_term_mixin.py +352 -0
  25. empathy_os/memory/mixins/promotion_mixin.py +109 -0
  26. empathy_os/memory/mixins/short_term_mixin.py +182 -0
  27. empathy_os/memory/short_term.py +61 -12
  28. empathy_os/memory/simple_storage.py +302 -0
  29. empathy_os/memory/storage_backend.py +167 -0
  30. empathy_os/memory/types.py +8 -3
  31. empathy_os/memory/unified.py +21 -1120
  32. empathy_os/meta_workflows/cli_commands/__init__.py +56 -0
  33. empathy_os/meta_workflows/cli_commands/agent_commands.py +321 -0
  34. empathy_os/meta_workflows/cli_commands/analytics_commands.py +442 -0
  35. empathy_os/meta_workflows/cli_commands/config_commands.py +232 -0
  36. empathy_os/meta_workflows/cli_commands/memory_commands.py +182 -0
  37. empathy_os/meta_workflows/cli_commands/template_commands.py +354 -0
  38. empathy_os/meta_workflows/cli_commands/workflow_commands.py +382 -0
  39. empathy_os/meta_workflows/cli_meta_workflows.py +52 -1802
  40. empathy_os/models/telemetry/__init__.py +71 -0
  41. empathy_os/models/telemetry/analytics.py +594 -0
  42. empathy_os/models/telemetry/backend.py +196 -0
  43. empathy_os/models/telemetry/data_models.py +431 -0
  44. empathy_os/models/telemetry/storage.py +489 -0
  45. empathy_os/orchestration/__init__.py +35 -0
  46. empathy_os/orchestration/execution_strategies.py +481 -0
  47. empathy_os/orchestration/meta_orchestrator.py +488 -1
  48. empathy_os/routing/workflow_registry.py +36 -0
  49. empathy_os/telemetry/agent_coordination.py +2 -3
  50. empathy_os/telemetry/agent_tracking.py +26 -7
  51. empathy_os/telemetry/approval_gates.py +18 -24
  52. empathy_os/telemetry/cli.py +19 -724
  53. empathy_os/telemetry/commands/__init__.py +14 -0
  54. empathy_os/telemetry/commands/dashboard_commands.py +696 -0
  55. empathy_os/telemetry/event_streaming.py +7 -3
  56. empathy_os/telemetry/feedback_loop.py +28 -15
  57. empathy_os/tools.py +183 -0
  58. empathy_os/workflows/__init__.py +5 -0
  59. empathy_os/workflows/autonomous_test_gen.py +860 -161
  60. empathy_os/workflows/base.py +6 -2
  61. empathy_os/workflows/code_review.py +4 -1
  62. empathy_os/workflows/document_gen/__init__.py +25 -0
  63. empathy_os/workflows/document_gen/config.py +30 -0
  64. empathy_os/workflows/document_gen/report_formatter.py +162 -0
  65. empathy_os/workflows/{document_gen.py → document_gen/workflow.py} +5 -184
  66. empathy_os/workflows/output.py +4 -1
  67. empathy_os/workflows/progress.py +8 -2
  68. empathy_os/workflows/security_audit.py +2 -2
  69. empathy_os/workflows/security_audit_phase3.py +7 -4
  70. empathy_os/workflows/seo_optimization.py +633 -0
  71. empathy_os/workflows/test_gen/__init__.py +52 -0
  72. empathy_os/workflows/test_gen/ast_analyzer.py +249 -0
  73. empathy_os/workflows/test_gen/config.py +88 -0
  74. empathy_os/workflows/test_gen/data_models.py +38 -0
  75. empathy_os/workflows/test_gen/report_formatter.py +289 -0
  76. empathy_os/workflows/test_gen/test_templates.py +381 -0
  77. empathy_os/workflows/test_gen/workflow.py +655 -0
  78. empathy_os/workflows/test_gen.py +42 -1905
  79. empathy_os/cli/parsers/cache 2.py +0 -65
  80. empathy_os/cli_router 2.py +0 -416
  81. empathy_os/dashboard/app 2.py +0 -512
  82. empathy_os/dashboard/simple_server 2.py +0 -403
  83. empathy_os/dashboard/standalone_server 2.py +0 -536
  84. empathy_os/memory/types 2.py +0 -441
  85. empathy_os/models/adaptive_routing 2.py +0 -437
  86. empathy_os/models/telemetry.py +0 -1660
  87. empathy_os/project_index/scanner_parallel 2.py +0 -291
  88. empathy_os/telemetry/agent_coordination 2.py +0 -478
  89. empathy_os/telemetry/agent_tracking 2.py +0 -350
  90. empathy_os/telemetry/approval_gates 2.py +0 -563
  91. empathy_os/telemetry/event_streaming 2.py +0 -405
  92. empathy_os/telemetry/feedback_loop 2.py +0 -557
  93. empathy_os/vscode_bridge 2.py +0 -173
  94. empathy_os/workflows/progressive/__init__ 2.py +0 -92
  95. empathy_os/workflows/progressive/cli 2.py +0 -242
  96. empathy_os/workflows/progressive/core 2.py +0 -488
  97. empathy_os/workflows/progressive/orchestrator 2.py +0 -701
  98. empathy_os/workflows/progressive/reports 2.py +0 -528
  99. empathy_os/workflows/progressive/telemetry 2.py +0 -280
  100. empathy_os/workflows/progressive/test_gen 2.py +0 -514
  101. empathy_os/workflows/progressive/workflow 2.py +0 -628
  102. {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/WHEEL +0 -0
  103. {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/entry_points.txt +0 -0
  104. {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/licenses/LICENSE +0 -0
  105. {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +0 -0
  106. {empathy_framework-5.1.1.dist-info → empathy_framework-5.3.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,249 @@
1
+ """AST-based Function and Class Analyzer.
2
+
3
+ Extracts function signatures, exception types, side effects, and complexity
4
+ from Python source code using AST parsing.
5
+
6
+ Copyright 2025 Smart-AI-Memory
7
+ Licensed under Fair Source License 0.9
8
+ """
9
+
10
+ import ast
11
+
12
+ from .data_models import ClassSignature, FunctionSignature
13
+
14
+
15
+ class ASTFunctionAnalyzer(ast.NodeVisitor):
16
+ """AST-based function analyzer for accurate test generation.
17
+
18
+ Extracts:
19
+ - Function signatures with types
20
+ - Exception types raised
21
+ - Side effects detection
22
+ - Complexity estimation
23
+
24
+ Parse errors are tracked in the `last_error` attribute for debugging.
25
+ """
26
+
27
+ def __init__(self):
28
+ self.functions: list[FunctionSignature] = []
29
+ self.classes: list[ClassSignature] = []
30
+ self._current_class: str | None = None
31
+ self.last_error: str | None = None # Track parse errors for debugging
32
+
33
+ def analyze(
34
+ self,
35
+ code: str,
36
+ file_path: str = "",
37
+ ) -> tuple[list[FunctionSignature], list[ClassSignature]]:
38
+ """Analyze code and extract function/class signatures.
39
+
40
+ Args:
41
+ code: Python source code to analyze
42
+ file_path: Optional file path for error reporting
43
+
44
+ Returns:
45
+ Tuple of (functions, classes) lists. If parsing fails,
46
+ returns empty lists and sets self.last_error with details.
47
+
48
+ """
49
+ self.last_error = None
50
+ try:
51
+ tree = ast.parse(code)
52
+ self.functions = []
53
+ self.classes = []
54
+ self.visit(tree)
55
+ return self.functions, self.classes
56
+ except SyntaxError as e:
57
+ # Track the error for debugging instead of silent failure
58
+ location = f" at line {e.lineno}" if e.lineno else ""
59
+ file_info = f" in {file_path}" if file_path else ""
60
+ self.last_error = f"SyntaxError{file_info}{location}: {e.msg}"
61
+ return [], []
62
+
63
+ def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
64
+ """Extract function signature."""
65
+ if self._current_class is None: # Only top-level functions
66
+ sig = self._extract_function_signature(node)
67
+ self.functions.append(sig)
68
+ # Don't visit nested functions - we only want top-level
69
+ else:
70
+ # Inside a class - this is a method, visit it
71
+ self.generic_visit(node)
72
+
73
+ def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> None:
74
+ """Extract async function signature."""
75
+ if self._current_class is None:
76
+ sig = self._extract_function_signature(node, is_async=True)
77
+ self.functions.append(sig)
78
+ # Don't visit nested functions - we only want top-level
79
+ else:
80
+ # Inside a class - this is a method, visit it
81
+ self.generic_visit(node)
82
+
83
+ def visit_ClassDef(self, node: ast.ClassDef) -> None:
84
+ """Extract class signature with methods."""
85
+ self._current_class = node.name
86
+ methods = []
87
+ init_params: list[tuple[str, str, str | None]] = []
88
+
89
+ # Extract base classes
90
+ base_classes = []
91
+ for base in node.bases:
92
+ if isinstance(base, ast.Name):
93
+ base_classes.append(base.id)
94
+ elif isinstance(base, ast.Attribute):
95
+ base_classes.append(ast.unparse(base))
96
+
97
+ # Detect if this is an Enum
98
+ enum_bases = {"Enum", "IntEnum", "StrEnum", "Flag", "IntFlag", "auto"}
99
+ is_enum = any(b in enum_bases for b in base_classes)
100
+
101
+ # Detect if this is a dataclass
102
+ is_dataclass = False
103
+ for decorator in node.decorator_list:
104
+ if isinstance(decorator, ast.Name) and decorator.id == "dataclass":
105
+ is_dataclass = True
106
+ elif isinstance(decorator, ast.Call):
107
+ if isinstance(decorator.func, ast.Name) and decorator.func.id == "dataclass":
108
+ is_dataclass = True
109
+
110
+ # Process methods
111
+ for item in node.body:
112
+ if isinstance(item, ast.FunctionDef | ast.AsyncFunctionDef):
113
+ method_sig = self._extract_function_signature(
114
+ item,
115
+ is_async=isinstance(item, ast.AsyncFunctionDef),
116
+ )
117
+ methods.append(method_sig)
118
+
119
+ # Extract __init__ params
120
+ if item.name == "__init__":
121
+ init_params = method_sig.params[1:] # Skip 'self'
122
+
123
+ # Count required init params (those without defaults)
124
+ required_init_params = sum(1 for p in init_params if p[2] is None)
125
+
126
+ self.classes.append(
127
+ ClassSignature(
128
+ name=node.name,
129
+ methods=methods,
130
+ init_params=init_params,
131
+ base_classes=base_classes,
132
+ docstring=ast.get_docstring(node),
133
+ is_enum=is_enum,
134
+ is_dataclass=is_dataclass,
135
+ required_init_params=required_init_params,
136
+ ),
137
+ )
138
+
139
+ self._current_class = None
140
+ # Don't call generic_visit to avoid processing methods again
141
+
142
+ def _extract_function_signature(
143
+ self,
144
+ node: ast.FunctionDef | ast.AsyncFunctionDef,
145
+ is_async: bool = False,
146
+ ) -> FunctionSignature:
147
+ """Extract detailed signature from function node."""
148
+ # Extract parameters with types and defaults
149
+ params = []
150
+ defaults = list(node.args.defaults)
151
+ num_defaults = len(defaults)
152
+ num_args = len(node.args.args)
153
+
154
+ for i, arg in enumerate(node.args.args):
155
+ param_name = arg.arg
156
+ param_type = ast.unparse(arg.annotation) if arg.annotation else "Any"
157
+
158
+ # Calculate default index
159
+ default_idx = i - (num_args - num_defaults)
160
+ default_val = None
161
+ if default_idx >= 0:
162
+ try:
163
+ default_val = ast.unparse(defaults[default_idx])
164
+ except Exception:
165
+ default_val = "..."
166
+
167
+ params.append((param_name, param_type, default_val))
168
+
169
+ # Extract return type
170
+ return_type = ast.unparse(node.returns) if node.returns else None
171
+
172
+ # Find raised exceptions
173
+ raises: set[str] = set()
174
+ for child in ast.walk(node):
175
+ if isinstance(child, ast.Raise) and child.exc:
176
+ if isinstance(child.exc, ast.Call):
177
+ if isinstance(child.exc.func, ast.Name):
178
+ raises.add(child.exc.func.id)
179
+ elif isinstance(child.exc.func, ast.Attribute):
180
+ raises.add(child.exc.func.attr)
181
+ elif isinstance(child.exc, ast.Name):
182
+ raises.add(child.exc.id)
183
+
184
+ # Detect side effects (simple heuristic)
185
+ has_side_effects = self._detect_side_effects(node)
186
+
187
+ # Estimate complexity
188
+ complexity = self._estimate_complexity(node)
189
+
190
+ # Extract decorators
191
+ decorators = []
192
+ for dec in node.decorator_list:
193
+ if isinstance(dec, ast.Name):
194
+ decorators.append(dec.id)
195
+ elif isinstance(dec, ast.Attribute):
196
+ decorators.append(ast.unparse(dec))
197
+ elif isinstance(dec, ast.Call):
198
+ if isinstance(dec.func, ast.Name):
199
+ decorators.append(dec.func.id)
200
+
201
+ return FunctionSignature(
202
+ name=node.name,
203
+ params=params,
204
+ return_type=return_type,
205
+ is_async=is_async or isinstance(node, ast.AsyncFunctionDef),
206
+ raises=raises,
207
+ has_side_effects=has_side_effects,
208
+ docstring=ast.get_docstring(node),
209
+ complexity=complexity,
210
+ decorators=decorators,
211
+ )
212
+
213
+ def _detect_side_effects(self, node: ast.FunctionDef | ast.AsyncFunctionDef) -> bool:
214
+ """Detect if function has side effects (writes to files, global state, etc.)."""
215
+ side_effect_names = {
216
+ "print",
217
+ "write",
218
+ "open",
219
+ "save",
220
+ "delete",
221
+ "remove",
222
+ "update",
223
+ "insert",
224
+ "execute",
225
+ "send",
226
+ "post",
227
+ "put",
228
+ "patch",
229
+ }
230
+
231
+ for child in ast.walk(node):
232
+ if isinstance(child, ast.Call):
233
+ if isinstance(child.func, ast.Name):
234
+ if child.func.id.lower() in side_effect_names:
235
+ return True
236
+ elif isinstance(child.func, ast.Attribute):
237
+ if child.func.attr.lower() in side_effect_names:
238
+ return True
239
+ return False
240
+
241
+ def _estimate_complexity(self, node: ast.FunctionDef | ast.AsyncFunctionDef) -> int:
242
+ """Estimate cyclomatic complexity (simplified)."""
243
+ complexity = 1
244
+ for child in ast.walk(node):
245
+ if isinstance(child, ast.If | ast.While | ast.For | ast.ExceptHandler):
246
+ complexity += 1
247
+ elif isinstance(child, ast.BoolOp):
248
+ complexity += len(child.values) - 1
249
+ return complexity
@@ -0,0 +1,88 @@
1
+ """Test Generation Configuration.
2
+
3
+ Default patterns and step configurations for test generation workflow.
4
+
5
+ Copyright 2025 Smart-AI-Memory
6
+ Licensed under Fair Source License 0.9
7
+ """
8
+
9
+ from ..step_config import WorkflowStepConfig
10
+
11
+ # =============================================================================
12
+ # Default Configuration
13
+ # =============================================================================
14
+
15
+ # Directories to skip during file scanning (configurable via input_data["skip_patterns"])
16
+ DEFAULT_SKIP_PATTERNS = [
17
+ # Version control
18
+ ".git",
19
+ ".hg",
20
+ ".svn",
21
+ # Dependencies
22
+ "node_modules",
23
+ "bower_components",
24
+ "vendor",
25
+ # Python caches
26
+ "__pycache__",
27
+ ".mypy_cache",
28
+ ".pytest_cache",
29
+ ".ruff_cache",
30
+ ".hypothesis",
31
+ # Virtual environments
32
+ "venv",
33
+ ".venv",
34
+ "env",
35
+ ".env",
36
+ "virtualenv",
37
+ ".virtualenv",
38
+ # Build tools
39
+ ".tox",
40
+ ".nox",
41
+ # Build outputs
42
+ "build",
43
+ "dist",
44
+ "eggs",
45
+ ".eggs",
46
+ "site-packages",
47
+ # IDE
48
+ ".idea",
49
+ ".vscode",
50
+ # Framework-specific
51
+ "migrations",
52
+ "alembic",
53
+ # Documentation
54
+ "_build",
55
+ "docs/_build",
56
+ ]
57
+
58
+ # Define step configurations for executor-based execution
59
+ TEST_GEN_STEPS = {
60
+ "identify": WorkflowStepConfig(
61
+ name="identify",
62
+ task_type="triage", # Cheap tier task
63
+ tier_hint="cheap",
64
+ description="Identify files needing tests",
65
+ max_tokens=2000,
66
+ ),
67
+ "analyze": WorkflowStepConfig(
68
+ name="analyze",
69
+ task_type="code_analysis", # Capable tier task
70
+ tier_hint="capable",
71
+ description="Analyze code structure for test generation",
72
+ max_tokens=3000,
73
+ ),
74
+ "generate": WorkflowStepConfig(
75
+ name="generate",
76
+ task_type="code_generation", # Capable tier task
77
+ tier_hint="capable",
78
+ description="Generate test cases",
79
+ max_tokens=4000,
80
+ ),
81
+ "review": WorkflowStepConfig(
82
+ name="review",
83
+ task_type="final_review", # Premium tier task
84
+ tier_hint="premium",
85
+ description="Review and improve generated test suite",
86
+ max_tokens=3000,
87
+ ),
88
+ }
@@ -0,0 +1,38 @@
1
+ """Test Generation Data Models.
2
+
3
+ Dataclass definitions for function and class signatures.
4
+
5
+ Copyright 2025 Smart-AI-Memory
6
+ Licensed under Fair Source License 0.9
7
+ """
8
+
9
+ from dataclasses import dataclass, field
10
+
11
+
12
+ @dataclass
13
+ class FunctionSignature:
14
+ """Detailed function analysis for test generation."""
15
+
16
+ name: str
17
+ params: list[tuple[str, str, str | None]] # (name, type_hint, default)
18
+ return_type: str | None
19
+ is_async: bool
20
+ raises: set[str]
21
+ has_side_effects: bool
22
+ docstring: str | None
23
+ complexity: int = 1 # Rough complexity estimate
24
+ decorators: list[str] = field(default_factory=list)
25
+
26
+
27
+ @dataclass
28
+ class ClassSignature:
29
+ """Detailed class analysis for test generation."""
30
+
31
+ name: str
32
+ methods: list[FunctionSignature]
33
+ init_params: list[tuple[str, str, str | None]] # Constructor params
34
+ base_classes: list[str]
35
+ docstring: str | None
36
+ is_enum: bool = False # True if class inherits from Enum
37
+ is_dataclass: bool = False # True if class has @dataclass decorator
38
+ required_init_params: int = 0 # Number of params without defaults
@@ -0,0 +1,289 @@
1
+ """Test Generation Report Formatter.
2
+
3
+ Format test generation output as human-readable reports.
4
+
5
+ Copyright 2025 Smart-AI-Memory
6
+ Licensed under Fair Source License 0.9
7
+ """
8
+
9
+ import re
10
+
11
+
12
+ def format_test_gen_report(result: dict, input_data: dict) -> str:
13
+ """Format test generation output as a human-readable report.
14
+
15
+ Args:
16
+ result: The review stage result
17
+ input_data: Input data from previous stages
18
+
19
+ Returns:
20
+ Formatted report string
21
+
22
+ """
23
+ lines = []
24
+
25
+ # Header
26
+ total_tests = result.get("total_tests", 0)
27
+ files_covered = result.get("files_covered", 0)
28
+
29
+ lines.append("=" * 60)
30
+ lines.append("TEST GAP ANALYSIS REPORT")
31
+ lines.append("=" * 60)
32
+ lines.append("")
33
+
34
+ # Summary stats
35
+ total_candidates = input_data.get("total_candidates", 0)
36
+ hotspot_count = input_data.get("hotspot_count", 0)
37
+ untested_count = input_data.get("untested_count", 0)
38
+
39
+ lines.append("-" * 60)
40
+ lines.append("SUMMARY")
41
+ lines.append("-" * 60)
42
+ lines.append(f"Tests Generated: {total_tests}")
43
+ lines.append(f"Files Covered: {files_covered}")
44
+ lines.append(f"Total Candidates: {total_candidates}")
45
+ lines.append(f"Bug Hotspots Found: {hotspot_count}")
46
+ lines.append(f"Untested Files: {untested_count}")
47
+ lines.append("")
48
+
49
+ # Status indicator
50
+ if total_tests == 0:
51
+ lines.append("⚠️ No tests were generated")
52
+ elif total_tests < 5:
53
+ lines.append(f"🟡 Generated {total_tests} test(s) - consider adding more coverage")
54
+ elif total_tests < 20:
55
+ lines.append(f"🟢 Generated {total_tests} tests - good coverage")
56
+ else:
57
+ lines.append(f"✅ Generated {total_tests} tests - excellent coverage")
58
+ lines.append("")
59
+
60
+ # Scope notice for enterprise clarity
61
+ total_source = input_data.get("total_source_files", 0)
62
+ existing_tests = input_data.get("existing_test_files", 0)
63
+ coverage_pct = input_data.get("analysis_coverage_percent", 100)
64
+ large_project = input_data.get("large_project_warning", False)
65
+
66
+ if total_source > 0 or existing_tests > 0:
67
+ lines.append("-" * 60)
68
+ lines.append("SCOPE NOTICE")
69
+ lines.append("-" * 60)
70
+
71
+ if large_project:
72
+ lines.append("⚠️ LARGE PROJECT: Only high-priority files analyzed")
73
+ lines.append(f" Coverage: {coverage_pct:.0f}% of candidate files")
74
+ lines.append("")
75
+
76
+ lines.append(f"Source Files Found: {total_source}")
77
+ lines.append(f"Existing Test Files: {existing_tests}")
78
+ lines.append(f"Files Analyzed: {files_covered}")
79
+
80
+ if existing_tests > 0:
81
+ lines.append("")
82
+ lines.append("Note: This report identifies gaps in untested files.")
83
+ lines.append("Run 'pytest --co -q' for full test suite statistics.")
84
+ lines.append("")
85
+
86
+ # Parse XML review feedback if present
87
+ review = result.get("review_feedback", "")
88
+ xml_summary = ""
89
+ xml_findings = []
90
+ xml_tests = []
91
+ coverage_improvement = ""
92
+
93
+ if review and "<response>" in review:
94
+ # Extract summary
95
+ summary_match = re.search(r"<summary>(.*?)</summary>", review, re.DOTALL)
96
+ if summary_match:
97
+ xml_summary = summary_match.group(1).strip()
98
+
99
+ # Extract coverage improvement
100
+ coverage_match = re.search(
101
+ r"<coverage-improvement>(.*?)</coverage-improvement>",
102
+ review,
103
+ re.DOTALL,
104
+ )
105
+ if coverage_match:
106
+ coverage_improvement = coverage_match.group(1).strip()
107
+
108
+ # Extract findings
109
+ for finding_match in re.finditer(
110
+ r'<finding severity="(\w+)">(.*?)</finding>',
111
+ review,
112
+ re.DOTALL,
113
+ ):
114
+ severity = finding_match.group(1)
115
+ finding_content = finding_match.group(2)
116
+
117
+ title_match = re.search(r"<title>(.*?)</title>", finding_content, re.DOTALL)
118
+ location_match = re.search(r"<location>(.*?)</location>", finding_content, re.DOTALL)
119
+ fix_match = re.search(r"<fix>(.*?)</fix>", finding_content, re.DOTALL)
120
+
121
+ xml_findings.append(
122
+ {
123
+ "severity": severity,
124
+ "title": title_match.group(1).strip() if title_match else "Unknown",
125
+ "location": location_match.group(1).strip() if location_match else "",
126
+ "fix": fix_match.group(1).strip() if fix_match else "",
127
+ },
128
+ )
129
+
130
+ # Extract suggested tests
131
+ for test_match in re.finditer(r'<test target="([^"]+)">(.*?)</test>', review, re.DOTALL):
132
+ target = test_match.group(1)
133
+ test_content = test_match.group(2)
134
+
135
+ type_match = re.search(r"<type>(.*?)</type>", test_content, re.DOTALL)
136
+ desc_match = re.search(r"<description>(.*?)</description>", test_content, re.DOTALL)
137
+
138
+ xml_tests.append(
139
+ {
140
+ "target": target,
141
+ "type": type_match.group(1).strip() if type_match else "unit",
142
+ "description": desc_match.group(1).strip() if desc_match else "",
143
+ },
144
+ )
145
+
146
+ # Show parsed summary
147
+ if xml_summary:
148
+ lines.append("-" * 60)
149
+ lines.append("QUALITY ASSESSMENT")
150
+ lines.append("-" * 60)
151
+ # Word wrap the summary
152
+ words = xml_summary.split()
153
+ current_line = ""
154
+ for word in words:
155
+ if len(current_line) + len(word) + 1 <= 58:
156
+ current_line += (" " if current_line else "") + word
157
+ else:
158
+ lines.append(current_line)
159
+ current_line = word
160
+ if current_line:
161
+ lines.append(current_line)
162
+ lines.append("")
163
+
164
+ if coverage_improvement:
165
+ lines.append(f"📈 {coverage_improvement}")
166
+ lines.append("")
167
+
168
+ # Show findings by severity
169
+ if xml_findings:
170
+ lines.append("-" * 60)
171
+ lines.append("QUALITY FINDINGS")
172
+ lines.append("-" * 60)
173
+
174
+ severity_emoji = {"high": "🔴", "medium": "🟠", "low": "🟡", "info": "🔵"}
175
+ severity_order = {"high": 0, "medium": 1, "low": 2, "info": 3}
176
+
177
+ sorted_findings = sorted(xml_findings, key=lambda f: severity_order.get(f["severity"], 4))
178
+
179
+ for finding in sorted_findings:
180
+ emoji = severity_emoji.get(finding["severity"], "⚪")
181
+ lines.append(f"{emoji} [{finding['severity'].upper()}] {finding['title']}")
182
+ if finding["location"]:
183
+ lines.append(f" Location: {finding['location']}")
184
+ if finding["fix"]:
185
+ # Truncate long fix recommendations
186
+ fix_text = finding["fix"]
187
+ if len(fix_text) > 70:
188
+ fix_text = fix_text[:67] + "..."
189
+ lines.append(f" Fix: {fix_text}")
190
+ lines.append("")
191
+
192
+ # Show suggested tests
193
+ if xml_tests:
194
+ lines.append("-" * 60)
195
+ lines.append("SUGGESTED TESTS TO ADD")
196
+ lines.append("-" * 60)
197
+
198
+ for i, test in enumerate(xml_tests[:5], 1): # Limit to 5
199
+ lines.append(f"{i}. {test['target']} ({test['type']})")
200
+ if test["description"]:
201
+ desc = test["description"]
202
+ if len(desc) > 55:
203
+ desc = desc[:52] + "..."
204
+ lines.append(f" {desc}")
205
+ lines.append("")
206
+
207
+ if len(xml_tests) > 5:
208
+ lines.append(f" ... and {len(xml_tests) - 5} more suggested tests")
209
+ lines.append("")
210
+
211
+ # Generated tests breakdown (if no XML data)
212
+ generated_tests = input_data.get("generated_tests", [])
213
+ if generated_tests and not xml_findings:
214
+ lines.append("-" * 60)
215
+ lines.append("GENERATED TESTS BY FILE")
216
+ lines.append("-" * 60)
217
+ for test_file in generated_tests[:10]: # Limit display
218
+ source = test_file.get("source_file", "unknown")
219
+ test_count = test_file.get("test_count", 0)
220
+ # Shorten path for display
221
+ if len(source) > 50:
222
+ source = "..." + source[-47:]
223
+ lines.append(f" 📁 {source}")
224
+ lines.append(
225
+ f" └─ {test_count} test(s) → {test_file.get('test_file', 'test_*.py')}",
226
+ )
227
+ if len(generated_tests) > 10:
228
+ lines.append(f" ... and {len(generated_tests) - 10} more files")
229
+ lines.append("")
230
+
231
+ # Written files section
232
+ written_files = input_data.get("written_files", [])
233
+ if written_files:
234
+ lines.append("-" * 60)
235
+ lines.append("TESTS WRITTEN TO DISK")
236
+ lines.append("-" * 60)
237
+ for file_path in written_files[:10]:
238
+ # Shorten path for display
239
+ if len(file_path) > 55:
240
+ file_path = "..." + file_path[-52:]
241
+ lines.append(f" ✅ {file_path}")
242
+ if len(written_files) > 10:
243
+ lines.append(f" ... and {len(written_files) - 10} more files")
244
+ lines.append("")
245
+ lines.append(" Run: pytest <file> to execute these tests")
246
+ lines.append("")
247
+ elif input_data.get("tests_written") is False and total_tests > 0:
248
+ lines.append("-" * 60)
249
+ lines.append("GENERATED TESTS (NOT WRITTEN)")
250
+ lines.append("-" * 60)
251
+ lines.append(" ⚠️ Tests were generated but not written to disk.")
252
+ lines.append(" To write tests, run with: write_tests=True")
253
+ lines.append("")
254
+
255
+ # Recommendations
256
+ lines.append("-" * 60)
257
+ lines.append("NEXT STEPS")
258
+ lines.append("-" * 60)
259
+
260
+ high_findings = sum(1 for f in xml_findings if f["severity"] == "high")
261
+ medium_findings = sum(1 for f in xml_findings if f["severity"] == "medium")
262
+
263
+ if high_findings > 0:
264
+ lines.append(f" 🔴 Address {high_findings} high-priority finding(s) first")
265
+
266
+ if medium_findings > 0:
267
+ lines.append(f" 🟠 Review {medium_findings} medium-priority finding(s)")
268
+
269
+ if xml_tests:
270
+ lines.append(f" 📝 Consider adding {len(xml_tests)} suggested test(s)")
271
+
272
+ if hotspot_count > 0:
273
+ lines.append(f" 🔥 {hotspot_count} bug hotspot file(s) need priority testing")
274
+
275
+ if untested_count > 0:
276
+ lines.append(f" 📁 {untested_count} file(s) have no existing tests")
277
+
278
+ if not any([high_findings, medium_findings, xml_tests, hotspot_count, untested_count]):
279
+ lines.append(" ✅ Test suite is in good shape!")
280
+
281
+ lines.append("")
282
+
283
+ # Footer
284
+ lines.append("=" * 60)
285
+ model_tier = result.get("model_tier_used", "unknown")
286
+ lines.append(f"Review completed using {model_tier} tier model")
287
+ lines.append("=" * 60)
288
+
289
+ return "\n".join(lines)