empathy-framework 4.8.0__py3-none-any.whl → 4.9.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {empathy_framework-4.8.0.dist-info → empathy_framework-4.9.1.dist-info}/METADATA +64 -25
- {empathy_framework-4.8.0.dist-info → empathy_framework-4.9.1.dist-info}/RECORD +28 -39
- empathy_os/__init__.py +2 -2
- empathy_os/cache/hash_only.py +3 -6
- empathy_os/cache/hybrid.py +3 -6
- empathy_os/cli_legacy.py +1 -27
- empathy_os/cli_unified.py +0 -25
- empathy_os/memory/__init__.py +5 -19
- empathy_os/memory/short_term.py +132 -10
- empathy_os/memory/types.py +4 -0
- empathy_os/models/registry.py +4 -4
- empathy_os/project_index/scanner.py +3 -2
- empathy_os/socratic/ab_testing.py +1 -1
- empathy_os/workflow_commands.py +9 -9
- empathy_os/workflows/__init__.py +4 -4
- empathy_os/workflows/base.py +8 -54
- empathy_os/workflows/bug_predict.py +2 -2
- empathy_os/workflows/history.py +5 -3
- empathy_os/workflows/perf_audit.py +4 -4
- empathy_os/workflows/progress.py +22 -324
- empathy_os/workflows/routing.py +0 -5
- empathy_os/workflows/security_audit.py +0 -189
- empathy_os/workflows/security_audit_phase3.py +26 -2
- empathy_os/workflows/test_gen.py +7 -7
- empathy_os/vscode_bridge 2.py +0 -173
- empathy_os/workflows/output.py +0 -410
- empathy_os/workflows/progressive/README 2.md +0 -454
- empathy_os/workflows/progressive/__init__ 2.py +0 -92
- empathy_os/workflows/progressive/cli 2.py +0 -242
- empathy_os/workflows/progressive/core 2.py +0 -488
- empathy_os/workflows/progressive/orchestrator 2.py +0 -701
- empathy_os/workflows/progressive/reports 2.py +0 -528
- empathy_os/workflows/progressive/telemetry 2.py +0 -280
- empathy_os/workflows/progressive/test_gen 2.py +0 -514
- empathy_os/workflows/progressive/workflow 2.py +0 -628
- {empathy_framework-4.8.0.dist-info → empathy_framework-4.9.1.dist-info}/WHEEL +0 -0
- {empathy_framework-4.8.0.dist-info → empathy_framework-4.9.1.dist-info}/entry_points.txt +0 -0
- {empathy_framework-4.8.0.dist-info → empathy_framework-4.9.1.dist-info}/licenses/LICENSE +0 -0
- {empathy_framework-4.8.0.dist-info → empathy_framework-4.9.1.dist-info}/top_level.txt +0 -0
empathy_os/workflows/test_gen.py
CHANGED
|
@@ -597,8 +597,8 @@ class TestGenerationWorkflow(BaseWorkflow):
|
|
|
597
597
|
{
|
|
598
598
|
"candidates": candidates[:max_candidates],
|
|
599
599
|
"total_candidates": len(candidates),
|
|
600
|
-
"hotspot_count":
|
|
601
|
-
"untested_count":
|
|
600
|
+
"hotspot_count": sum(1 for c in candidates if c["is_hotspot"]),
|
|
601
|
+
"untested_count": sum(1 for c in candidates if not c["has_tests"]),
|
|
602
602
|
# Scope awareness fields for enterprise reporting
|
|
603
603
|
"total_source_files": total_source_files,
|
|
604
604
|
"existing_test_files": existing_test_files,
|
|
@@ -1503,13 +1503,13 @@ END OF REQUIRED FORMAT - output nothing after recommendations."""
|
|
|
1503
1503
|
lines.append(f"| **Total Test Functions** | {total_test_count} |")
|
|
1504
1504
|
lines.append(f"| **Files Covered** | {len(generated_tests)} |")
|
|
1505
1505
|
|
|
1506
|
-
# Count classes and functions
|
|
1506
|
+
# Count classes and functions (generator expressions for memory efficiency)
|
|
1507
1507
|
total_classes = sum(
|
|
1508
|
-
|
|
1508
|
+
sum(1 for t in item.get("tests", []) if t.get("type") == "class")
|
|
1509
1509
|
for item in generated_tests
|
|
1510
1510
|
)
|
|
1511
1511
|
total_functions = sum(
|
|
1512
|
-
|
|
1512
|
+
sum(1 for t in item.get("tests", []) if t.get("type") == "function")
|
|
1513
1513
|
for item in generated_tests
|
|
1514
1514
|
)
|
|
1515
1515
|
lines.append(f"| **Classes Tested** | {total_classes} |")
|
|
@@ -1799,8 +1799,8 @@ def format_test_gen_report(result: dict, input_data: dict) -> str:
|
|
|
1799
1799
|
lines.append("NEXT STEPS")
|
|
1800
1800
|
lines.append("-" * 60)
|
|
1801
1801
|
|
|
1802
|
-
high_findings =
|
|
1803
|
-
medium_findings =
|
|
1802
|
+
high_findings = sum(1 for f in xml_findings if f["severity"] == "high")
|
|
1803
|
+
medium_findings = sum(1 for f in xml_findings if f["severity"] == "medium")
|
|
1804
1804
|
|
|
1805
1805
|
if high_findings > 0:
|
|
1806
1806
|
lines.append(f" 🔴 Address {high_findings} high-priority finding(s) first")
|
empathy_os/vscode_bridge 2.py
DELETED
|
@@ -1,173 +0,0 @@
|
|
|
1
|
-
"""VS Code Extension Bridge
|
|
2
|
-
|
|
3
|
-
Provides functions to write data that the VS Code extension can pick up.
|
|
4
|
-
Enables Claude Code CLI output to appear in VS Code webview panels.
|
|
5
|
-
|
|
6
|
-
Copyright 2026 Smart-AI-Memory
|
|
7
|
-
Licensed under Fair Source License 0.9
|
|
8
|
-
"""
|
|
9
|
-
|
|
10
|
-
import json
|
|
11
|
-
from dataclasses import asdict, dataclass
|
|
12
|
-
from datetime import datetime
|
|
13
|
-
from pathlib import Path
|
|
14
|
-
from typing import Any
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
@dataclass
|
|
18
|
-
class ReviewFinding:
|
|
19
|
-
"""A code review finding."""
|
|
20
|
-
|
|
21
|
-
id: str
|
|
22
|
-
file: str
|
|
23
|
-
line: int
|
|
24
|
-
severity: str # 'critical' | 'high' | 'medium' | 'low' | 'info'
|
|
25
|
-
category: str # 'security' | 'performance' | 'maintainability' | 'style' | 'correctness'
|
|
26
|
-
message: str
|
|
27
|
-
column: int = 1
|
|
28
|
-
details: str | None = None
|
|
29
|
-
recommendation: str | None = None
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
@dataclass
|
|
33
|
-
class CodeReviewResult:
|
|
34
|
-
"""Code review results for VS Code bridge."""
|
|
35
|
-
|
|
36
|
-
findings: list[dict[str, Any]]
|
|
37
|
-
summary: dict[str, Any]
|
|
38
|
-
verdict: str # 'approve' | 'approve_with_suggestions' | 'request_changes' | 'reject'
|
|
39
|
-
security_score: int
|
|
40
|
-
formatted_report: str
|
|
41
|
-
model_tier_used: str
|
|
42
|
-
timestamp: str
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
def get_empathy_dir() -> Path:
|
|
46
|
-
"""Get the .empathy directory, creating if needed."""
|
|
47
|
-
empathy_dir = Path(".empathy")
|
|
48
|
-
empathy_dir.mkdir(exist_ok=True)
|
|
49
|
-
return empathy_dir
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
def write_code_review_results(
|
|
53
|
-
findings: list[dict[str, Any]] | None = None,
|
|
54
|
-
summary: dict[str, Any] | None = None,
|
|
55
|
-
verdict: str = "approve_with_suggestions",
|
|
56
|
-
security_score: int = 85,
|
|
57
|
-
formatted_report: str = "",
|
|
58
|
-
model_tier_used: str = "capable",
|
|
59
|
-
) -> Path:
|
|
60
|
-
"""Write code review results for VS Code extension to pick up.
|
|
61
|
-
|
|
62
|
-
Args:
|
|
63
|
-
findings: List of finding dicts with keys: id, file, line, severity, category, message
|
|
64
|
-
summary: Summary dict with keys: total_findings, by_severity, by_category, files_affected
|
|
65
|
-
verdict: One of 'approve', 'approve_with_suggestions', 'request_changes', 'reject'
|
|
66
|
-
security_score: 0-100 score
|
|
67
|
-
formatted_report: Markdown formatted report
|
|
68
|
-
model_tier_used: 'cheap', 'capable', or 'premium'
|
|
69
|
-
|
|
70
|
-
Returns:
|
|
71
|
-
Path to the written file
|
|
72
|
-
"""
|
|
73
|
-
findings = findings or []
|
|
74
|
-
|
|
75
|
-
# Build summary if not provided
|
|
76
|
-
if summary is None:
|
|
77
|
-
by_severity: dict[str, int] = {}
|
|
78
|
-
by_category: dict[str, int] = {}
|
|
79
|
-
files_affected: set[str] = set()
|
|
80
|
-
|
|
81
|
-
for f in findings:
|
|
82
|
-
sev = f.get("severity", "info")
|
|
83
|
-
cat = f.get("category", "correctness")
|
|
84
|
-
by_severity[sev] = by_severity.get(sev, 0) + 1
|
|
85
|
-
by_category[cat] = by_category.get(cat, 0) + 1
|
|
86
|
-
if f.get("file"):
|
|
87
|
-
files_affected.add(f["file"])
|
|
88
|
-
|
|
89
|
-
summary = {
|
|
90
|
-
"total_findings": len(findings),
|
|
91
|
-
"by_severity": by_severity,
|
|
92
|
-
"by_category": by_category,
|
|
93
|
-
"files_affected": list(files_affected),
|
|
94
|
-
}
|
|
95
|
-
|
|
96
|
-
result = CodeReviewResult(
|
|
97
|
-
findings=findings,
|
|
98
|
-
summary=summary,
|
|
99
|
-
verdict=verdict,
|
|
100
|
-
security_score=security_score,
|
|
101
|
-
formatted_report=formatted_report,
|
|
102
|
-
model_tier_used=model_tier_used,
|
|
103
|
-
timestamp=datetime.now().isoformat(),
|
|
104
|
-
)
|
|
105
|
-
|
|
106
|
-
output_path = get_empathy_dir() / "code-review-results.json"
|
|
107
|
-
|
|
108
|
-
with open(output_path, "w") as f:
|
|
109
|
-
json.dump(asdict(result), f, indent=2)
|
|
110
|
-
|
|
111
|
-
return output_path
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
def write_pr_review_results(
|
|
115
|
-
pr_number: int | str,
|
|
116
|
-
title: str,
|
|
117
|
-
findings: list[dict[str, Any]],
|
|
118
|
-
verdict: str = "approve_with_suggestions",
|
|
119
|
-
summary_text: str = "",
|
|
120
|
-
) -> Path:
|
|
121
|
-
"""Write PR review results for VS Code extension.
|
|
122
|
-
|
|
123
|
-
Convenience wrapper for PR reviews from GitHub.
|
|
124
|
-
|
|
125
|
-
Args:
|
|
126
|
-
pr_number: The PR number
|
|
127
|
-
title: PR title
|
|
128
|
-
findings: List of review findings
|
|
129
|
-
verdict: Review verdict
|
|
130
|
-
summary_text: Summary of the review
|
|
131
|
-
|
|
132
|
-
Returns:
|
|
133
|
-
Path to the written file
|
|
134
|
-
"""
|
|
135
|
-
formatted_report = f"""## PR #{pr_number}: {title}
|
|
136
|
-
|
|
137
|
-
{summary_text}
|
|
138
|
-
|
|
139
|
-
### Findings ({len(findings)})
|
|
140
|
-
|
|
141
|
-
"""
|
|
142
|
-
for f in findings:
|
|
143
|
-
formatted_report += f"- **{f.get('severity', 'info').upper()}** [{f.get('file', 'unknown')}:{f.get('line', 0)}]: {f.get('message', '')}\n"
|
|
144
|
-
|
|
145
|
-
return write_code_review_results(
|
|
146
|
-
findings=findings,
|
|
147
|
-
verdict=verdict,
|
|
148
|
-
formatted_report=formatted_report,
|
|
149
|
-
model_tier_used="capable",
|
|
150
|
-
)
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
# Quick helper for Claude Code to call
|
|
154
|
-
def send_to_vscode(
|
|
155
|
-
message: str,
|
|
156
|
-
findings: list[dict[str, Any]] | None = None,
|
|
157
|
-
verdict: str = "approve_with_suggestions",
|
|
158
|
-
) -> str:
|
|
159
|
-
"""Quick helper to send review results to VS Code.
|
|
160
|
-
|
|
161
|
-
Usage in Claude Code:
|
|
162
|
-
from empathy_os.vscode_bridge import send_to_vscode
|
|
163
|
-
send_to_vscode("Review complete", findings=[...])
|
|
164
|
-
|
|
165
|
-
Returns:
|
|
166
|
-
Confirmation message
|
|
167
|
-
"""
|
|
168
|
-
path = write_code_review_results(
|
|
169
|
-
findings=findings or [],
|
|
170
|
-
formatted_report=message,
|
|
171
|
-
verdict=verdict,
|
|
172
|
-
)
|
|
173
|
-
return f"Results written to {path} - VS Code will update automatically"
|
empathy_os/workflows/output.py
DELETED
|
@@ -1,410 +0,0 @@
|
|
|
1
|
-
"""Unified output formatting for workflows.
|
|
2
|
-
|
|
3
|
-
Provides consistent Rich-based output components for workflow results:
|
|
4
|
-
- WorkflowReport: Main report container with sections
|
|
5
|
-
- FindingsTable: Render findings as Rich Table or plain text
|
|
6
|
-
- MetricsPanel: Color-coded score display
|
|
7
|
-
- ReportSection: Individual report sections
|
|
8
|
-
|
|
9
|
-
Supports graceful fallback to plain text when Rich is unavailable.
|
|
10
|
-
|
|
11
|
-
Copyright 2025 Smart-AI-Memory
|
|
12
|
-
Licensed under Fair Source License 0.9
|
|
13
|
-
"""
|
|
14
|
-
|
|
15
|
-
from __future__ import annotations
|
|
16
|
-
|
|
17
|
-
from dataclasses import dataclass, field
|
|
18
|
-
from typing import TYPE_CHECKING, Any
|
|
19
|
-
|
|
20
|
-
# Rich imports with fallback
|
|
21
|
-
try:
|
|
22
|
-
from rich.console import Console
|
|
23
|
-
from rich.panel import Panel
|
|
24
|
-
from rich.table import Table
|
|
25
|
-
from rich.text import Text
|
|
26
|
-
|
|
27
|
-
RICH_AVAILABLE = True
|
|
28
|
-
except ImportError:
|
|
29
|
-
RICH_AVAILABLE = False
|
|
30
|
-
Console = None # type: ignore
|
|
31
|
-
Panel = None # type: ignore
|
|
32
|
-
Table = None # type: ignore
|
|
33
|
-
Text = None # type: ignore
|
|
34
|
-
|
|
35
|
-
if TYPE_CHECKING:
|
|
36
|
-
from rich.console import Console as ConsoleType
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
# =============================================================================
|
|
40
|
-
# DATA CLASSES
|
|
41
|
-
# =============================================================================
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
@dataclass
|
|
45
|
-
class Finding:
|
|
46
|
-
"""Individual finding from a workflow."""
|
|
47
|
-
|
|
48
|
-
severity: str # "high", "medium", "low", "info"
|
|
49
|
-
file: str
|
|
50
|
-
line: int | None = None
|
|
51
|
-
message: str = ""
|
|
52
|
-
code: str | None = None
|
|
53
|
-
|
|
54
|
-
@property
|
|
55
|
-
def severity_icon(self) -> str:
|
|
56
|
-
"""Get icon for severity level."""
|
|
57
|
-
icons = {
|
|
58
|
-
"high": "[red]:x:[/red]" if RICH_AVAILABLE else "X",
|
|
59
|
-
"medium": "[yellow]:warning:[/yellow]" if RICH_AVAILABLE else "!",
|
|
60
|
-
"low": "[blue]:information:[/blue]" if RICH_AVAILABLE else "i",
|
|
61
|
-
"info": "[dim]o[/dim]" if RICH_AVAILABLE else "o",
|
|
62
|
-
}
|
|
63
|
-
return icons.get(self.severity.lower(), "o")
|
|
64
|
-
|
|
65
|
-
@property
|
|
66
|
-
def location(self) -> str:
|
|
67
|
-
"""Get file:line location string."""
|
|
68
|
-
if self.line:
|
|
69
|
-
return f"{self.file}:{self.line}"
|
|
70
|
-
return self.file
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
@dataclass
|
|
74
|
-
class ReportSection:
|
|
75
|
-
"""Individual section of a workflow report."""
|
|
76
|
-
|
|
77
|
-
title: str
|
|
78
|
-
content: Any # str, list[Finding], dict, or Rich renderable
|
|
79
|
-
collapsed: bool = False
|
|
80
|
-
style: str = "default" # "default", "success", "warning", "error"
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
@dataclass
|
|
84
|
-
class WorkflowReport:
|
|
85
|
-
"""Main workflow report container."""
|
|
86
|
-
|
|
87
|
-
title: str
|
|
88
|
-
summary: str = ""
|
|
89
|
-
sections: list[ReportSection] = field(default_factory=list)
|
|
90
|
-
score: int | None = None
|
|
91
|
-
level: str = "info" # "info", "success", "warning", "error"
|
|
92
|
-
metadata: dict[str, Any] = field(default_factory=dict)
|
|
93
|
-
|
|
94
|
-
def add_section(
|
|
95
|
-
self,
|
|
96
|
-
title: str,
|
|
97
|
-
content: Any,
|
|
98
|
-
collapsed: bool = False,
|
|
99
|
-
style: str = "default",
|
|
100
|
-
) -> None:
|
|
101
|
-
"""Add a section to the report."""
|
|
102
|
-
self.sections.append(
|
|
103
|
-
ReportSection(title=title, content=content, collapsed=collapsed, style=style)
|
|
104
|
-
)
|
|
105
|
-
|
|
106
|
-
def render(self, console: ConsoleType | None = None, use_rich: bool = True) -> str:
|
|
107
|
-
"""Render the report.
|
|
108
|
-
|
|
109
|
-
Args:
|
|
110
|
-
console: Rich Console instance (optional)
|
|
111
|
-
use_rich: Whether to use Rich formatting
|
|
112
|
-
|
|
113
|
-
Returns:
|
|
114
|
-
Rendered report as string (for plain text) or prints to console (for Rich)
|
|
115
|
-
"""
|
|
116
|
-
if use_rich and RICH_AVAILABLE and console is not None:
|
|
117
|
-
self._render_rich(console)
|
|
118
|
-
return ""
|
|
119
|
-
return self._render_plain()
|
|
120
|
-
|
|
121
|
-
def _render_rich(self, console: ConsoleType) -> None:
|
|
122
|
-
"""Render report using Rich."""
|
|
123
|
-
# Header with score
|
|
124
|
-
header_parts = [f"[bold]{self.title}[/bold]"]
|
|
125
|
-
if self.score is not None:
|
|
126
|
-
score_panel = MetricsPanel.render_score(self.score)
|
|
127
|
-
console.print(score_panel)
|
|
128
|
-
|
|
129
|
-
if self.summary:
|
|
130
|
-
console.print(f"\n{self.summary}\n")
|
|
131
|
-
|
|
132
|
-
# Sections
|
|
133
|
-
for section in self.sections:
|
|
134
|
-
self._render_section_rich(console, section)
|
|
135
|
-
|
|
136
|
-
def _render_section_rich(self, console: ConsoleType, section: ReportSection) -> None:
|
|
137
|
-
"""Render a single section using Rich."""
|
|
138
|
-
border_style = {
|
|
139
|
-
"success": "green",
|
|
140
|
-
"warning": "yellow",
|
|
141
|
-
"error": "red",
|
|
142
|
-
"default": "blue",
|
|
143
|
-
}.get(section.style, "blue")
|
|
144
|
-
|
|
145
|
-
if isinstance(section.content, str):
|
|
146
|
-
console.print(
|
|
147
|
-
Panel(section.content, title=section.title, border_style=border_style)
|
|
148
|
-
)
|
|
149
|
-
elif isinstance(section.content, list) and all(
|
|
150
|
-
isinstance(f, Finding) for f in section.content
|
|
151
|
-
):
|
|
152
|
-
table = FindingsTable(section.content).to_rich_table()
|
|
153
|
-
console.print(Panel(table, title=section.title, border_style=border_style))
|
|
154
|
-
elif isinstance(section.content, dict):
|
|
155
|
-
# Render dict as key-value table
|
|
156
|
-
table = Table(show_header=False, box=None)
|
|
157
|
-
table.add_column("Key", style="cyan")
|
|
158
|
-
table.add_column("Value")
|
|
159
|
-
for key, value in section.content.items():
|
|
160
|
-
table.add_row(str(key), str(value))
|
|
161
|
-
console.print(Panel(table, title=section.title, border_style=border_style))
|
|
162
|
-
else:
|
|
163
|
-
# Try to print directly (might be a Rich renderable)
|
|
164
|
-
try:
|
|
165
|
-
console.print(
|
|
166
|
-
Panel(section.content, title=section.title, border_style=border_style)
|
|
167
|
-
)
|
|
168
|
-
except Exception: # noqa: BLE001
|
|
169
|
-
# INTENTIONAL: Graceful fallback for unknown content types
|
|
170
|
-
console.print(f"\n[bold]{section.title}[/bold]")
|
|
171
|
-
console.print(str(section.content))
|
|
172
|
-
|
|
173
|
-
def _render_plain(self) -> str:
|
|
174
|
-
"""Render report as plain text."""
|
|
175
|
-
lines = []
|
|
176
|
-
separator = "=" * 60
|
|
177
|
-
|
|
178
|
-
# Header
|
|
179
|
-
lines.append(separator)
|
|
180
|
-
lines.append(self.title.upper())
|
|
181
|
-
lines.append(separator)
|
|
182
|
-
|
|
183
|
-
if self.score is not None:
|
|
184
|
-
level = MetricsPanel.get_level(self.score)
|
|
185
|
-
lines.append(f"Score: {self.score}/100 ({level.upper()})")
|
|
186
|
-
lines.append("")
|
|
187
|
-
|
|
188
|
-
if self.summary:
|
|
189
|
-
lines.append(self.summary)
|
|
190
|
-
lines.append("")
|
|
191
|
-
|
|
192
|
-
# Sections
|
|
193
|
-
for section in self.sections:
|
|
194
|
-
lines.append("-" * 60)
|
|
195
|
-
lines.append(section.title.upper())
|
|
196
|
-
lines.append("-" * 60)
|
|
197
|
-
|
|
198
|
-
if isinstance(section.content, str):
|
|
199
|
-
lines.append(section.content)
|
|
200
|
-
elif isinstance(section.content, list) and all(
|
|
201
|
-
isinstance(f, Finding) for f in section.content
|
|
202
|
-
):
|
|
203
|
-
lines.append(FindingsTable(section.content).to_plain())
|
|
204
|
-
elif isinstance(section.content, dict):
|
|
205
|
-
for key, value in section.content.items():
|
|
206
|
-
lines.append(f" {key}: {value}")
|
|
207
|
-
else:
|
|
208
|
-
lines.append(str(section.content))
|
|
209
|
-
|
|
210
|
-
lines.append("")
|
|
211
|
-
|
|
212
|
-
lines.append(separator)
|
|
213
|
-
return "\n".join(lines)
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
# =============================================================================
|
|
217
|
-
# FINDINGS TABLE
|
|
218
|
-
# =============================================================================
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
class FindingsTable:
|
|
222
|
-
"""Render findings as Rich Table or plain text."""
|
|
223
|
-
|
|
224
|
-
def __init__(self, findings: list[Finding]) -> None:
|
|
225
|
-
"""Initialize with list of findings."""
|
|
226
|
-
self.findings = findings
|
|
227
|
-
|
|
228
|
-
def to_rich_table(self) -> Table:
|
|
229
|
-
"""Convert findings to Rich Table."""
|
|
230
|
-
table = Table(show_header=True, header_style="bold magenta")
|
|
231
|
-
table.add_column("Severity", style="bold", width=8)
|
|
232
|
-
table.add_column("Location", style="cyan")
|
|
233
|
-
table.add_column("Message")
|
|
234
|
-
|
|
235
|
-
for finding in self.findings:
|
|
236
|
-
severity_style = {
|
|
237
|
-
"high": "red",
|
|
238
|
-
"medium": "yellow",
|
|
239
|
-
"low": "blue",
|
|
240
|
-
"info": "dim",
|
|
241
|
-
}.get(finding.severity.lower(), "white")
|
|
242
|
-
|
|
243
|
-
table.add_row(
|
|
244
|
-
Text(finding.severity.upper(), style=severity_style),
|
|
245
|
-
finding.location,
|
|
246
|
-
finding.message,
|
|
247
|
-
)
|
|
248
|
-
|
|
249
|
-
return table
|
|
250
|
-
|
|
251
|
-
def to_plain(self) -> str:
|
|
252
|
-
"""Convert findings to plain text."""
|
|
253
|
-
if not self.findings:
|
|
254
|
-
return " No findings."
|
|
255
|
-
|
|
256
|
-
lines = []
|
|
257
|
-
for finding in self.findings:
|
|
258
|
-
lines.append(f" [{finding.severity.upper()}] {finding.location}")
|
|
259
|
-
if finding.message:
|
|
260
|
-
lines.append(f" {finding.message}")
|
|
261
|
-
|
|
262
|
-
return "\n".join(lines)
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
# =============================================================================
|
|
266
|
-
# METRICS PANEL
|
|
267
|
-
# =============================================================================
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
class MetricsPanel:
|
|
271
|
-
"""Display score with color-coded indicator."""
|
|
272
|
-
|
|
273
|
-
@staticmethod
|
|
274
|
-
def get_level(score: int) -> str:
|
|
275
|
-
"""Get level name for score."""
|
|
276
|
-
if score >= 85:
|
|
277
|
-
return "excellent"
|
|
278
|
-
elif score >= 70:
|
|
279
|
-
return "good"
|
|
280
|
-
elif score >= 50:
|
|
281
|
-
return "needs work"
|
|
282
|
-
return "critical"
|
|
283
|
-
|
|
284
|
-
@staticmethod
|
|
285
|
-
def get_style(score: int) -> str:
|
|
286
|
-
"""Get Rich style for score."""
|
|
287
|
-
if score >= 85:
|
|
288
|
-
return "green"
|
|
289
|
-
elif score >= 70:
|
|
290
|
-
return "yellow"
|
|
291
|
-
elif score >= 50:
|
|
292
|
-
return "orange1"
|
|
293
|
-
return "red"
|
|
294
|
-
|
|
295
|
-
@staticmethod
|
|
296
|
-
def get_icon(score: int) -> str:
|
|
297
|
-
"""Get icon for score."""
|
|
298
|
-
if score >= 85:
|
|
299
|
-
return "[green]:heavy_check_mark:[/green]"
|
|
300
|
-
elif score >= 70:
|
|
301
|
-
return "[yellow]:large_yellow_circle:[/yellow]"
|
|
302
|
-
elif score >= 50:
|
|
303
|
-
return "[orange1]:warning:[/orange1]"
|
|
304
|
-
return "[red]:x:[/red]"
|
|
305
|
-
|
|
306
|
-
@staticmethod
|
|
307
|
-
def get_plain_icon(score: int) -> str:
|
|
308
|
-
"""Get plain text icon for score."""
|
|
309
|
-
if score >= 85:
|
|
310
|
-
return "[OK]"
|
|
311
|
-
elif score >= 70:
|
|
312
|
-
return "[--]"
|
|
313
|
-
elif score >= 50:
|
|
314
|
-
return "[!!]"
|
|
315
|
-
return "[XX]"
|
|
316
|
-
|
|
317
|
-
@classmethod
|
|
318
|
-
def render_score(cls, score: int, label: str = "Score") -> Panel:
|
|
319
|
-
"""Render score as Rich Panel.
|
|
320
|
-
|
|
321
|
-
Args:
|
|
322
|
-
score: Score value (0-100)
|
|
323
|
-
label: Label for the score
|
|
324
|
-
|
|
325
|
-
Returns:
|
|
326
|
-
Rich Panel with formatted score
|
|
327
|
-
"""
|
|
328
|
-
if not RICH_AVAILABLE or Panel is None:
|
|
329
|
-
raise RuntimeError("Rich is not available")
|
|
330
|
-
|
|
331
|
-
style = cls.get_style(score)
|
|
332
|
-
icon = cls.get_icon(score)
|
|
333
|
-
level = cls.get_level(score)
|
|
334
|
-
|
|
335
|
-
content = f"{icon} [bold]{score}[/bold]/100 ({level.upper()})"
|
|
336
|
-
return Panel(content, title=f"[bold]{label}[/bold]", border_style=style)
|
|
337
|
-
|
|
338
|
-
@classmethod
|
|
339
|
-
def render_plain(cls, score: int, label: str = "Score") -> str:
|
|
340
|
-
"""Render score as plain text.
|
|
341
|
-
|
|
342
|
-
Args:
|
|
343
|
-
score: Score value (0-100)
|
|
344
|
-
label: Label for the score
|
|
345
|
-
|
|
346
|
-
Returns:
|
|
347
|
-
Plain text score display
|
|
348
|
-
"""
|
|
349
|
-
icon = cls.get_plain_icon(score)
|
|
350
|
-
level = cls.get_level(score)
|
|
351
|
-
return f"{label}: {icon} {score}/100 ({level.upper()})"
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
# =============================================================================
|
|
355
|
-
# HELPER FUNCTIONS
|
|
356
|
-
# =============================================================================
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
def format_workflow_result(
|
|
360
|
-
title: str,
|
|
361
|
-
summary: str = "",
|
|
362
|
-
findings: list[dict] | None = None,
|
|
363
|
-
score: int | None = None,
|
|
364
|
-
recommendations: str = "",
|
|
365
|
-
metadata: dict[str, Any] | None = None,
|
|
366
|
-
) -> WorkflowReport:
|
|
367
|
-
"""Create a standardized workflow report.
|
|
368
|
-
|
|
369
|
-
Args:
|
|
370
|
-
title: Report title
|
|
371
|
-
summary: Brief summary text
|
|
372
|
-
findings: List of finding dicts with severity, file, line, message
|
|
373
|
-
score: Overall score (0-100)
|
|
374
|
-
recommendations: Recommendations text
|
|
375
|
-
metadata: Additional metadata
|
|
376
|
-
|
|
377
|
-
Returns:
|
|
378
|
-
WorkflowReport instance
|
|
379
|
-
"""
|
|
380
|
-
report = WorkflowReport(
|
|
381
|
-
title=title,
|
|
382
|
-
summary=summary,
|
|
383
|
-
score=score,
|
|
384
|
-
metadata=metadata or {},
|
|
385
|
-
)
|
|
386
|
-
|
|
387
|
-
if findings:
|
|
388
|
-
finding_objs = [
|
|
389
|
-
Finding(
|
|
390
|
-
severity=f.get("severity", "info"),
|
|
391
|
-
file=f.get("file", "unknown"),
|
|
392
|
-
line=f.get("line"),
|
|
393
|
-
message=f.get("message", ""),
|
|
394
|
-
code=f.get("code"),
|
|
395
|
-
)
|
|
396
|
-
for f in findings
|
|
397
|
-
]
|
|
398
|
-
report.add_section("Findings", finding_objs)
|
|
399
|
-
|
|
400
|
-
if recommendations:
|
|
401
|
-
report.add_section("Recommendations", recommendations)
|
|
402
|
-
|
|
403
|
-
return report
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
def get_console() -> Console | None:
|
|
407
|
-
"""Get Rich Console if available."""
|
|
408
|
-
if RICH_AVAILABLE and Console is not None:
|
|
409
|
-
return Console()
|
|
410
|
-
return None
|