reporails-cli 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of reporails-cli might be problematic. Click here for more details.
- reporails_cli/.env.example +1 -0
- reporails_cli/__init__.py +24 -0
- reporails_cli/bundled/.semgrepignore +51 -0
- reporails_cli/bundled/__init__.py +31 -0
- reporails_cli/bundled/capability-patterns.yml +54 -0
- reporails_cli/bundled/levels.yml +99 -0
- reporails_cli/core/__init__.py +35 -0
- reporails_cli/core/agents.py +147 -0
- reporails_cli/core/applicability.py +150 -0
- reporails_cli/core/bootstrap.py +147 -0
- reporails_cli/core/cache.py +352 -0
- reporails_cli/core/capability.py +245 -0
- reporails_cli/core/discover.py +362 -0
- reporails_cli/core/engine.py +177 -0
- reporails_cli/core/init.py +309 -0
- reporails_cli/core/levels.py +177 -0
- reporails_cli/core/models.py +329 -0
- reporails_cli/core/opengrep/__init__.py +34 -0
- reporails_cli/core/opengrep/runner.py +203 -0
- reporails_cli/core/opengrep/semgrepignore.py +39 -0
- reporails_cli/core/opengrep/templates.py +138 -0
- reporails_cli/core/registry.py +155 -0
- reporails_cli/core/sarif.py +181 -0
- reporails_cli/core/scorer.py +178 -0
- reporails_cli/core/semantic.py +193 -0
- reporails_cli/core/utils.py +139 -0
- reporails_cli/formatters/__init__.py +19 -0
- reporails_cli/formatters/json.py +137 -0
- reporails_cli/formatters/mcp.py +68 -0
- reporails_cli/formatters/text/__init__.py +32 -0
- reporails_cli/formatters/text/box.py +89 -0
- reporails_cli/formatters/text/chars.py +42 -0
- reporails_cli/formatters/text/compact.py +119 -0
- reporails_cli/formatters/text/components.py +117 -0
- reporails_cli/formatters/text/full.py +135 -0
- reporails_cli/formatters/text/rules.py +50 -0
- reporails_cli/formatters/text/violations.py +92 -0
- reporails_cli/interfaces/__init__.py +1 -0
- reporails_cli/interfaces/cli/__init__.py +7 -0
- reporails_cli/interfaces/cli/main.py +352 -0
- reporails_cli/interfaces/mcp/__init__.py +5 -0
- reporails_cli/interfaces/mcp/server.py +194 -0
- reporails_cli/interfaces/mcp/tools.py +136 -0
- reporails_cli/py.typed +0 -0
- reporails_cli/templates/__init__.py +65 -0
- reporails_cli/templates/cli_box.txt +10 -0
- reporails_cli/templates/cli_cta.txt +4 -0
- reporails_cli/templates/cli_delta.txt +1 -0
- reporails_cli/templates/cli_file_header.txt +1 -0
- reporails_cli/templates/cli_legend.txt +1 -0
- reporails_cli/templates/cli_pending.txt +3 -0
- reporails_cli/templates/cli_violation.txt +1 -0
- reporails_cli/templates/cli_working.txt +2 -0
- reporails_cli-0.0.1.dist-info/METADATA +108 -0
- reporails_cli-0.0.1.dist-info/RECORD +58 -0
- reporails_cli-0.0.1.dist-info/WHEEL +4 -0
- reporails_cli-0.0.1.dist-info/entry_points.txt +3 -0
- reporails_cli-0.0.1.dist-info/licenses/LICENSE +201 -0
|
@@ -0,0 +1,193 @@
|
|
|
1
|
+
"""Semantic rule request building - creates JudgmentRequests for LLM evaluation.
|
|
2
|
+
|
|
3
|
+
Semantic rules require OpenGrep pattern matches before LLM evaluation.
|
|
4
|
+
No match = rule passes (nothing to evaluate).
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
from reporails_cli.core.models import JudgmentRequest, Rule, RuleType, Severity
|
|
13
|
+
from reporails_cli.core.sarif import extract_rule_id, get_location
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def build_semantic_requests(
|
|
17
|
+
sarif: dict[str, Any],
|
|
18
|
+
rules: dict[str, Rule],
|
|
19
|
+
target: Path,
|
|
20
|
+
) -> list[JudgmentRequest]:
|
|
21
|
+
requests: list[JudgmentRequest] = []
|
|
22
|
+
semantic_rules = {k: v for k, v in rules.items() if v.type == RuleType.SEMANTIC}
|
|
23
|
+
|
|
24
|
+
if not semantic_rules:
|
|
25
|
+
return requests
|
|
26
|
+
|
|
27
|
+
for run in sarif.get("runs", []):
|
|
28
|
+
for result in run.get("results", []):
|
|
29
|
+
sarif_rule_id = result.get("ruleId", "")
|
|
30
|
+
rule_id = extract_rule_id(sarif_rule_id)
|
|
31
|
+
location = get_location(result)
|
|
32
|
+
|
|
33
|
+
rule = semantic_rules.get(rule_id)
|
|
34
|
+
if not rule:
|
|
35
|
+
continue
|
|
36
|
+
|
|
37
|
+
# Extract snippet from SARIF (not whole file!)
|
|
38
|
+
snippet = extract_snippet(result, target)
|
|
39
|
+
if not snippet:
|
|
40
|
+
continue
|
|
41
|
+
|
|
42
|
+
request = build_request(rule, snippet, location)
|
|
43
|
+
if request:
|
|
44
|
+
requests.append(request)
|
|
45
|
+
|
|
46
|
+
return requests
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def extract_snippet(result: dict[str, Any], target: Path) -> str | None:
|
|
50
|
+
"""Extract matched content snippet from SARIF result.
|
|
51
|
+
|
|
52
|
+
SARIF provides the matched region. Use that instead of reading whole file.
|
|
53
|
+
Falls back to context lines around match if snippet not in SARIF.
|
|
54
|
+
"""
|
|
55
|
+
# Try to get snippet from SARIF
|
|
56
|
+
locations = result.get("locations", [])
|
|
57
|
+
if locations:
|
|
58
|
+
physical = locations[0].get("physicalLocation", {})
|
|
59
|
+
region = physical.get("region", {})
|
|
60
|
+
|
|
61
|
+
# SARIF may include the snippet directly
|
|
62
|
+
snippet = region.get("snippet", {}).get("text")
|
|
63
|
+
if snippet:
|
|
64
|
+
return snippet
|
|
65
|
+
|
|
66
|
+
# Fallback: read lines around the match
|
|
67
|
+
location = get_location(result)
|
|
68
|
+
if ":" not in location:
|
|
69
|
+
return None
|
|
70
|
+
|
|
71
|
+
file_path, line_str = location.rsplit(":", 1)
|
|
72
|
+
try:
|
|
73
|
+
line_num = int(line_str)
|
|
74
|
+
except ValueError:
|
|
75
|
+
return None
|
|
76
|
+
|
|
77
|
+
full_path = target / file_path
|
|
78
|
+
try:
|
|
79
|
+
lines = full_path.read_text(encoding="utf-8").splitlines()
|
|
80
|
+
except (OSError, UnicodeDecodeError):
|
|
81
|
+
return None
|
|
82
|
+
|
|
83
|
+
# Get 5 lines of context (2 before, match, 2 after)
|
|
84
|
+
start = max(0, line_num - 3)
|
|
85
|
+
end = min(len(lines), line_num + 2)
|
|
86
|
+
context_lines = lines[start:end]
|
|
87
|
+
|
|
88
|
+
return "\n".join(context_lines)
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
def build_request(
|
|
92
|
+
rule: Rule,
|
|
93
|
+
content: str,
|
|
94
|
+
location: str,
|
|
95
|
+
) -> JudgmentRequest | None:
|
|
96
|
+
"""Build a single JudgmentRequest from a rule.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
rule: Semantic rule definition
|
|
100
|
+
content: File content to evaluate
|
|
101
|
+
location: File path for location
|
|
102
|
+
|
|
103
|
+
Returns:
|
|
104
|
+
JudgmentRequest, or None if rule lacks required fields
|
|
105
|
+
"""
|
|
106
|
+
if not rule.question:
|
|
107
|
+
return None
|
|
108
|
+
|
|
109
|
+
# Parse criteria
|
|
110
|
+
criteria = _parse_criteria(rule.criteria)
|
|
111
|
+
|
|
112
|
+
# Parse choices
|
|
113
|
+
choices = _parse_choices(rule.choices)
|
|
114
|
+
|
|
115
|
+
# Get examples
|
|
116
|
+
examples = rule.examples or {"good": [], "bad": []}
|
|
117
|
+
if not isinstance(examples, dict):
|
|
118
|
+
examples = {"good": [], "bad": []}
|
|
119
|
+
|
|
120
|
+
# Get pass value
|
|
121
|
+
pass_value = rule.pass_value or "pass"
|
|
122
|
+
|
|
123
|
+
# Get severity from first check, or default
|
|
124
|
+
severity = Severity.MEDIUM
|
|
125
|
+
if rule.checks:
|
|
126
|
+
severity = rule.checks[0].severity
|
|
127
|
+
|
|
128
|
+
return JudgmentRequest(
|
|
129
|
+
rule_id=rule.id,
|
|
130
|
+
rule_title=rule.title,
|
|
131
|
+
content=content,
|
|
132
|
+
location=location,
|
|
133
|
+
question=rule.question,
|
|
134
|
+
criteria=criteria,
|
|
135
|
+
examples=examples,
|
|
136
|
+
choices=choices,
|
|
137
|
+
pass_value=pass_value,
|
|
138
|
+
severity=severity,
|
|
139
|
+
points_if_fail=-10, # Default penalty
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def _parse_criteria(criteria: list[dict[str, str]] | str | None) -> dict[str, str]:
|
|
144
|
+
"""Parse criteria field to dict format.
|
|
145
|
+
|
|
146
|
+
Args:
|
|
147
|
+
criteria: Criteria in various formats
|
|
148
|
+
|
|
149
|
+
Returns:
|
|
150
|
+
Dict mapping criterion key to check description
|
|
151
|
+
"""
|
|
152
|
+
if criteria is None:
|
|
153
|
+
return {"pass_condition": "Evaluate based on context"}
|
|
154
|
+
|
|
155
|
+
if isinstance(criteria, str):
|
|
156
|
+
return {"pass_condition": criteria}
|
|
157
|
+
|
|
158
|
+
if isinstance(criteria, list):
|
|
159
|
+
result: dict[str, str] = {}
|
|
160
|
+
for item in criteria:
|
|
161
|
+
if isinstance(item, dict):
|
|
162
|
+
key = item.get("key", f"criterion_{len(result)}")
|
|
163
|
+
check = item.get("check", str(item))
|
|
164
|
+
result[key] = check
|
|
165
|
+
return result if result else {"pass_condition": "Evaluate based on context"}
|
|
166
|
+
|
|
167
|
+
return {"pass_condition": "Evaluate based on context"}
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
def _parse_choices(choices: list[dict[str, str]] | list[str] | None) -> list[str]:
|
|
171
|
+
"""Parse choices field to list format.
|
|
172
|
+
|
|
173
|
+
Args:
|
|
174
|
+
choices: Choices in various formats
|
|
175
|
+
|
|
176
|
+
Returns:
|
|
177
|
+
List of choice values
|
|
178
|
+
"""
|
|
179
|
+
if choices is None:
|
|
180
|
+
return ["pass", "fail"]
|
|
181
|
+
|
|
182
|
+
if not choices:
|
|
183
|
+
return ["pass", "fail"]
|
|
184
|
+
|
|
185
|
+
result: list[str] = []
|
|
186
|
+
for choice in choices:
|
|
187
|
+
if isinstance(choice, dict):
|
|
188
|
+
value = choice.get("value", str(choice))
|
|
189
|
+
result.append(str(value))
|
|
190
|
+
else:
|
|
191
|
+
result.append(str(choice))
|
|
192
|
+
|
|
193
|
+
return result if result else ["pass", "fail"]
|
|
@@ -0,0 +1,139 @@
|
|
|
1
|
+
"""Shared utility functions used across modules.
|
|
2
|
+
|
|
3
|
+
All functions are pure (no I/O) except where noted.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
import hashlib
|
|
9
|
+
import re
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import Any
|
|
12
|
+
|
|
13
|
+
import yaml
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def parse_frontmatter(content: str) -> dict[str, Any]:
|
|
17
|
+
"""Parse YAML frontmatter from markdown content.
|
|
18
|
+
|
|
19
|
+
Pure function — no I/O.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
content: Markdown file content
|
|
23
|
+
|
|
24
|
+
Returns:
|
|
25
|
+
Parsed frontmatter dict
|
|
26
|
+
|
|
27
|
+
Raises:
|
|
28
|
+
ValueError: If frontmatter missing or invalid
|
|
29
|
+
"""
|
|
30
|
+
# Match YAML frontmatter between --- delimiters
|
|
31
|
+
pattern = r"^---\s*\n(.*?)---\s*\n"
|
|
32
|
+
match = re.match(pattern, content, re.DOTALL)
|
|
33
|
+
|
|
34
|
+
if not match:
|
|
35
|
+
msg = "No frontmatter found"
|
|
36
|
+
raise ValueError(msg)
|
|
37
|
+
|
|
38
|
+
yaml_content = match.group(1)
|
|
39
|
+
try:
|
|
40
|
+
return yaml.safe_load(yaml_content) or {}
|
|
41
|
+
except yaml.YAMLError as e:
|
|
42
|
+
msg = f"Invalid YAML in frontmatter: {e}"
|
|
43
|
+
raise ValueError(msg) from e
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def compute_content_hash(file_path: Path) -> str:
|
|
47
|
+
"""Compute SHA256 hash of file content.
|
|
48
|
+
|
|
49
|
+
I/O function — reads file.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
file_path: Path to file
|
|
53
|
+
|
|
54
|
+
Returns:
|
|
55
|
+
Hash string in format "sha256:{hash16}"
|
|
56
|
+
"""
|
|
57
|
+
content = file_path.read_bytes()
|
|
58
|
+
return f"sha256:{hashlib.sha256(content).hexdigest()[:16]}"
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def is_valid_path_reference(path: str) -> bool:
|
|
62
|
+
"""Check if a string looks like a valid file path reference.
|
|
63
|
+
|
|
64
|
+
Pure function.
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
path: Potential path string
|
|
68
|
+
|
|
69
|
+
Returns:
|
|
70
|
+
True if it looks like a valid path reference
|
|
71
|
+
"""
|
|
72
|
+
# Must have at least one slash or dot
|
|
73
|
+
if "/" not in path and "." not in path:
|
|
74
|
+
return False
|
|
75
|
+
|
|
76
|
+
# Filter out URLs
|
|
77
|
+
if path.startswith("http://") or path.startswith("https://"):
|
|
78
|
+
return False
|
|
79
|
+
|
|
80
|
+
# Reject path traversal attempts (../../../etc)
|
|
81
|
+
if path.count("..") > 2:
|
|
82
|
+
return False
|
|
83
|
+
|
|
84
|
+
# Reject absolute paths outside project
|
|
85
|
+
if path.startswith("/") and not path.startswith("./"):
|
|
86
|
+
return False
|
|
87
|
+
|
|
88
|
+
# Filter out common false positives
|
|
89
|
+
false_positives = {"e.g.", "i.e.", "etc.", "vs.", "v1", "v2"}
|
|
90
|
+
return path.lower() not in false_positives
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def relative_to_safe(path: Path, base: Path) -> str:
|
|
94
|
+
"""Get relative path safely, with fallback to absolute.
|
|
95
|
+
|
|
96
|
+
Pure function.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
path: Path to convert
|
|
100
|
+
base: Base directory
|
|
101
|
+
|
|
102
|
+
Returns:
|
|
103
|
+
Relative path string, or absolute if not relative to base
|
|
104
|
+
"""
|
|
105
|
+
try:
|
|
106
|
+
return str(path.relative_to(base))
|
|
107
|
+
except ValueError:
|
|
108
|
+
return str(path)
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def normalize_rule_id(rule_id: str) -> str:
|
|
112
|
+
"""Normalize rule ID to uppercase.
|
|
113
|
+
|
|
114
|
+
Pure function.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
rule_id: Raw rule ID
|
|
118
|
+
|
|
119
|
+
Returns:
|
|
120
|
+
Uppercase rule ID
|
|
121
|
+
"""
|
|
122
|
+
return rule_id.upper()
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def extract_body_content(markdown: str) -> str:
|
|
126
|
+
"""Extract content after frontmatter from markdown.
|
|
127
|
+
|
|
128
|
+
Pure function.
|
|
129
|
+
|
|
130
|
+
Args:
|
|
131
|
+
markdown: Full markdown content
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
Content after frontmatter (or full content if no frontmatter)
|
|
135
|
+
"""
|
|
136
|
+
parts = markdown.split("---", 2)
|
|
137
|
+
if len(parts) >= 3:
|
|
138
|
+
return parts[2].strip()
|
|
139
|
+
return markdown.strip()
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
"""Output formatters for reporails.
|
|
2
|
+
|
|
3
|
+
Each formatter implements the same interface:
|
|
4
|
+
- format_result(result: ValidationResult) -> T
|
|
5
|
+
- format_score(result: ValidationResult) -> T
|
|
6
|
+
- format_rule(rule_id: str, rule_data: dict) -> T
|
|
7
|
+
|
|
8
|
+
Where T is dict for json/mcp, str for text.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from __future__ import annotations
|
|
12
|
+
|
|
13
|
+
from reporails_cli.formatters import json, mcp, text
|
|
14
|
+
|
|
15
|
+
__all__ = [
|
|
16
|
+
"json",
|
|
17
|
+
"mcp",
|
|
18
|
+
"text",
|
|
19
|
+
]
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
"""Canonical JSON serialization for ValidationResult.
|
|
2
|
+
|
|
3
|
+
This is the single source of truth for serializing validation results.
|
|
4
|
+
All other formatters consume this format internally.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from typing import Any
|
|
10
|
+
|
|
11
|
+
from reporails_cli.core.models import PendingSemantic, ScanDelta, ValidationResult
|
|
12
|
+
from reporails_cli.core.scorer import LEVEL_LABELS
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def _format_pending_semantic(pending: PendingSemantic | None) -> dict[str, Any] | None:
|
|
16
|
+
"""Format pending semantic rules for JSON output."""
|
|
17
|
+
if pending is None:
|
|
18
|
+
return None
|
|
19
|
+
return {
|
|
20
|
+
"rule_count": pending.rule_count,
|
|
21
|
+
"file_count": pending.file_count,
|
|
22
|
+
"rules": list(pending.rules),
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def format_result(
|
|
27
|
+
result: ValidationResult,
|
|
28
|
+
delta: ScanDelta | None = None,
|
|
29
|
+
) -> dict[str, Any]:
|
|
30
|
+
"""Convert ValidationResult to canonical dict format.
|
|
31
|
+
|
|
32
|
+
This is the single source of truth for result serialization.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
result: ValidationResult from engine
|
|
36
|
+
delta: Optional ScanDelta for comparison with previous run
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
Canonical dict representation
|
|
40
|
+
"""
|
|
41
|
+
data: dict[str, Any] = {
|
|
42
|
+
"score": result.score,
|
|
43
|
+
"level": result.level.value,
|
|
44
|
+
"capability": LEVEL_LABELS.get(result.level, "Unknown"),
|
|
45
|
+
"feature_summary": result.feature_summary,
|
|
46
|
+
"summary": {
|
|
47
|
+
"rules_checked": result.rules_checked,
|
|
48
|
+
"rules_passed": result.rules_passed,
|
|
49
|
+
"rules_failed": result.rules_failed,
|
|
50
|
+
},
|
|
51
|
+
"violations": [
|
|
52
|
+
{
|
|
53
|
+
"rule_id": v.rule_id,
|
|
54
|
+
"rule_title": v.rule_title,
|
|
55
|
+
"location": v.location,
|
|
56
|
+
"message": v.message,
|
|
57
|
+
"severity": v.severity.value,
|
|
58
|
+
"check_id": v.check_id,
|
|
59
|
+
}
|
|
60
|
+
for v in result.violations
|
|
61
|
+
],
|
|
62
|
+
"judgment_requests": [
|
|
63
|
+
{
|
|
64
|
+
"rule_id": jr.rule_id,
|
|
65
|
+
"rule_title": jr.rule_title,
|
|
66
|
+
"question": jr.question,
|
|
67
|
+
"location": jr.location,
|
|
68
|
+
"criteria": jr.criteria,
|
|
69
|
+
"examples": jr.examples,
|
|
70
|
+
"choices": jr.choices,
|
|
71
|
+
"pass_value": jr.pass_value,
|
|
72
|
+
}
|
|
73
|
+
for jr in result.judgment_requests
|
|
74
|
+
],
|
|
75
|
+
"friction": result.friction.level if result.friction else "none",
|
|
76
|
+
# Evaluation completeness
|
|
77
|
+
"evaluation": "partial" if result.is_partial else "complete",
|
|
78
|
+
"is_partial": result.is_partial,
|
|
79
|
+
"pending_semantic": _format_pending_semantic(result.pending_semantic),
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
# Add delta fields (null if no previous run or unchanged)
|
|
83
|
+
if delta is not None:
|
|
84
|
+
data["score_delta"] = delta.score_delta
|
|
85
|
+
data["level_previous"] = delta.level_previous
|
|
86
|
+
data["level_improved"] = delta.level_improved
|
|
87
|
+
data["violations_delta"] = delta.violations_delta
|
|
88
|
+
else:
|
|
89
|
+
data["score_delta"] = None
|
|
90
|
+
data["level_previous"] = None
|
|
91
|
+
data["level_improved"] = None
|
|
92
|
+
data["violations_delta"] = None
|
|
93
|
+
|
|
94
|
+
return data
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def format_score(result: ValidationResult) -> dict[str, Any]:
|
|
98
|
+
"""Convert ValidationResult to minimal score dict.
|
|
99
|
+
|
|
100
|
+
Args:
|
|
101
|
+
result: ValidationResult from engine
|
|
102
|
+
|
|
103
|
+
Returns:
|
|
104
|
+
Simplified dict with just score info
|
|
105
|
+
"""
|
|
106
|
+
return {
|
|
107
|
+
"score": result.score,
|
|
108
|
+
"level": result.level.value,
|
|
109
|
+
"capability": LEVEL_LABELS.get(result.level, "Unknown"),
|
|
110
|
+
"feature_summary": result.feature_summary,
|
|
111
|
+
"rules_checked": result.rules_checked,
|
|
112
|
+
"violations_count": len(result.violations),
|
|
113
|
+
"has_critical": any(v.severity.value == "critical" for v in result.violations),
|
|
114
|
+
"friction": result.friction.level if result.friction else "none",
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def format_rule(rule_id: str, rule_data: dict[str, Any]) -> dict[str, Any]:
|
|
119
|
+
"""Format rule explanation as dict.
|
|
120
|
+
|
|
121
|
+
Args:
|
|
122
|
+
rule_id: Rule identifier
|
|
123
|
+
rule_data: Rule metadata
|
|
124
|
+
|
|
125
|
+
Returns:
|
|
126
|
+
Dict with rule details
|
|
127
|
+
"""
|
|
128
|
+
return {
|
|
129
|
+
"id": rule_id,
|
|
130
|
+
"title": rule_data.get("title", ""),
|
|
131
|
+
"category": rule_data.get("category", ""),
|
|
132
|
+
"type": rule_data.get("type", ""),
|
|
133
|
+
"level": rule_data.get("level", ""),
|
|
134
|
+
"description": rule_data.get("description", ""),
|
|
135
|
+
"checks": rule_data.get("checks", rule_data.get("antipatterns", [])),
|
|
136
|
+
"see_also": rule_data.get("see_also", []),
|
|
137
|
+
}
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
"""MCP output formatter.
|
|
2
|
+
|
|
3
|
+
Wraps canonical JSON format with MCP-specific transformations if needed.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from reporails_cli.core.models import ScanDelta, ValidationResult
|
|
11
|
+
from reporails_cli.formatters import json as json_formatter
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def format_result(
|
|
15
|
+
result: ValidationResult,
|
|
16
|
+
delta: ScanDelta | None = None,
|
|
17
|
+
) -> dict[str, Any]:
|
|
18
|
+
"""
|
|
19
|
+
Format validation result for MCP response.
|
|
20
|
+
|
|
21
|
+
Adds instructions for Claude to evaluate JudgmentRequests inline.
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
result: ValidationResult from engine
|
|
25
|
+
delta: Optional ScanDelta for comparison with previous run
|
|
26
|
+
|
|
27
|
+
Returns:
|
|
28
|
+
Dict suitable for MCP tool response
|
|
29
|
+
"""
|
|
30
|
+
data = json_formatter.format_result(result, delta)
|
|
31
|
+
|
|
32
|
+
# Add evaluation instructions if there are judgment requests
|
|
33
|
+
if data.get("judgment_requests"):
|
|
34
|
+
data["_instructions"] = (
|
|
35
|
+
"SEMANTIC RULE EVALUATION REQUIRED: "
|
|
36
|
+
"For each judgment_request, read the file content and evaluate "
|
|
37
|
+
"against the question and criteria provided. Report violations "
|
|
38
|
+
"where criteria are not met. Include these in your response to the user."
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
return data
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def format_score(result: ValidationResult) -> dict[str, Any]:
|
|
45
|
+
"""
|
|
46
|
+
Format quick score response for MCP.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
result: ValidationResult from engine
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
Simplified dict with just score info
|
|
53
|
+
"""
|
|
54
|
+
return json_formatter.format_score(result)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def format_rule(rule_id: str, rule_data: dict[str, Any]) -> dict[str, Any]:
|
|
58
|
+
"""
|
|
59
|
+
Format rule explanation for MCP.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
rule_id: Rule identifier
|
|
63
|
+
rule_data: Rule metadata
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
Dict with rule details
|
|
67
|
+
"""
|
|
68
|
+
return json_formatter.format_rule(rule_id, rule_data)
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
"""Terminal text output formatters.
|
|
2
|
+
|
|
3
|
+
Public API for formatting validation results as terminal text.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from reporails_cli.formatters.text.compact import format_compact, format_score
|
|
7
|
+
from reporails_cli.formatters.text.components import format_legend
|
|
8
|
+
|
|
9
|
+
# Re-export internal functions used by tests
|
|
10
|
+
from reporails_cli.formatters.text.components import (
|
|
11
|
+
format_level_delta as _format_level_delta,
|
|
12
|
+
)
|
|
13
|
+
from reporails_cli.formatters.text.components import (
|
|
14
|
+
format_score_delta as _format_score_delta,
|
|
15
|
+
)
|
|
16
|
+
from reporails_cli.formatters.text.components import (
|
|
17
|
+
format_violations_delta as _format_violations_delta,
|
|
18
|
+
)
|
|
19
|
+
from reporails_cli.formatters.text.full import format_result
|
|
20
|
+
from reporails_cli.formatters.text.rules import format_rule
|
|
21
|
+
|
|
22
|
+
__all__ = [
|
|
23
|
+
"format_result",
|
|
24
|
+
"format_compact",
|
|
25
|
+
"format_score",
|
|
26
|
+
"format_rule",
|
|
27
|
+
"format_legend",
|
|
28
|
+
# Internal helpers exposed for tests
|
|
29
|
+
"_format_score_delta",
|
|
30
|
+
"_format_level_delta",
|
|
31
|
+
"_format_violations_delta",
|
|
32
|
+
]
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
"""Assessment box formatting.
|
|
2
|
+
|
|
3
|
+
Handles the main score/capability box at the top of output.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from reporails_cli.core.models import ScanDelta
|
|
11
|
+
from reporails_cli.core.scorer import LEVEL_LABELS
|
|
12
|
+
from reporails_cli.formatters.text.chars import get_chars
|
|
13
|
+
from reporails_cli.formatters.text.components import (
|
|
14
|
+
build_score_bar,
|
|
15
|
+
format_level_delta,
|
|
16
|
+
format_score_delta,
|
|
17
|
+
format_violations_delta,
|
|
18
|
+
pad_line,
|
|
19
|
+
)
|
|
20
|
+
from reporails_cli.templates import render
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def format_assessment_box(
|
|
24
|
+
data: dict[str, Any],
|
|
25
|
+
ascii_mode: bool | None = None,
|
|
26
|
+
delta: ScanDelta | None = None,
|
|
27
|
+
) -> str:
|
|
28
|
+
"""Format the visual assessment box using templates."""
|
|
29
|
+
chars = get_chars(ascii_mode)
|
|
30
|
+
box_width = 62
|
|
31
|
+
|
|
32
|
+
score = data.get("score", 0.0)
|
|
33
|
+
level = data.get("level", "L1")
|
|
34
|
+
feature_summary = data.get("feature_summary", "")
|
|
35
|
+
summary_info = data.get("summary", {})
|
|
36
|
+
rules_checked = summary_info.get("rules_checked", 0)
|
|
37
|
+
violations = data.get("violations", [])
|
|
38
|
+
is_partial = data.get("is_partial", True)
|
|
39
|
+
|
|
40
|
+
level_label = LEVEL_LABELS.get(level, level)
|
|
41
|
+
|
|
42
|
+
# Delta indicators
|
|
43
|
+
score_delta_str = format_score_delta(delta, ascii_mode)
|
|
44
|
+
level_delta_str = format_level_delta(delta, ascii_mode)
|
|
45
|
+
violations_delta_str = format_violations_delta(delta, ascii_mode)
|
|
46
|
+
|
|
47
|
+
# Partial marker
|
|
48
|
+
partial_marker = "(partial)" if is_partial else ""
|
|
49
|
+
|
|
50
|
+
# Build individual lines
|
|
51
|
+
top_border = chars["tl"] + chars["h"] * box_width + chars["tr"]
|
|
52
|
+
bottom_border = chars["bl"] + chars["h"] * box_width + chars["br"]
|
|
53
|
+
empty_line = chars["v"] + " " * box_width + chars["v"]
|
|
54
|
+
|
|
55
|
+
# Score line with capability and delta
|
|
56
|
+
score_text = f"SCORE: {score:.1f} / 10 {partial_marker}{score_delta_str} | CAPABILITY: {level_label} ({level}){level_delta_str}"
|
|
57
|
+
score_line = pad_line(score_text, box_width, chars["v"])
|
|
58
|
+
|
|
59
|
+
# Progress bar
|
|
60
|
+
bar = build_score_bar(score, ascii_mode)
|
|
61
|
+
bar_line = pad_line(bar, box_width, chars["v"])
|
|
62
|
+
|
|
63
|
+
# Setup line
|
|
64
|
+
setup_text = f"Setup: {feature_summary}"
|
|
65
|
+
setup_line = pad_line(setup_text, box_width, chars["v"])
|
|
66
|
+
|
|
67
|
+
# Summary line - count deduplicated violations
|
|
68
|
+
seen_violations: set[tuple[str, str]] = set()
|
|
69
|
+
for v in violations:
|
|
70
|
+
location = v.get("location", "")
|
|
71
|
+
file_path = location.rsplit(":", 1)[0] if ":" in location else location
|
|
72
|
+
rule_id = v.get("rule_id", "")
|
|
73
|
+
seen_violations.add((file_path, rule_id))
|
|
74
|
+
violation_count = len(seen_violations)
|
|
75
|
+
if violation_count == 0:
|
|
76
|
+
summary = f"No violations · {rules_checked} rules checked"
|
|
77
|
+
else:
|
|
78
|
+
summary = f"{violation_count} violation(s){violations_delta_str} · {rules_checked} rules checked"
|
|
79
|
+
summary_line = pad_line(summary, box_width, chars["v"])
|
|
80
|
+
|
|
81
|
+
return render("cli_box.txt",
|
|
82
|
+
top_border=top_border,
|
|
83
|
+
bottom_border=bottom_border,
|
|
84
|
+
empty_line=empty_line,
|
|
85
|
+
score_line=score_line,
|
|
86
|
+
bar_line=bar_line,
|
|
87
|
+
setup_line=setup_line,
|
|
88
|
+
summary_line=summary_line,
|
|
89
|
+
)
|