reporails-cli 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of reporails-cli might be problematic. Click here for more details.
- reporails_cli/.env.example +1 -0
- reporails_cli/__init__.py +24 -0
- reporails_cli/bundled/.semgrepignore +51 -0
- reporails_cli/bundled/__init__.py +31 -0
- reporails_cli/bundled/capability-patterns.yml +54 -0
- reporails_cli/bundled/levels.yml +99 -0
- reporails_cli/core/__init__.py +35 -0
- reporails_cli/core/agents.py +147 -0
- reporails_cli/core/applicability.py +150 -0
- reporails_cli/core/bootstrap.py +147 -0
- reporails_cli/core/cache.py +352 -0
- reporails_cli/core/capability.py +245 -0
- reporails_cli/core/discover.py +362 -0
- reporails_cli/core/engine.py +177 -0
- reporails_cli/core/init.py +309 -0
- reporails_cli/core/levels.py +177 -0
- reporails_cli/core/models.py +329 -0
- reporails_cli/core/opengrep/__init__.py +34 -0
- reporails_cli/core/opengrep/runner.py +203 -0
- reporails_cli/core/opengrep/semgrepignore.py +39 -0
- reporails_cli/core/opengrep/templates.py +138 -0
- reporails_cli/core/registry.py +155 -0
- reporails_cli/core/sarif.py +181 -0
- reporails_cli/core/scorer.py +178 -0
- reporails_cli/core/semantic.py +193 -0
- reporails_cli/core/utils.py +139 -0
- reporails_cli/formatters/__init__.py +19 -0
- reporails_cli/formatters/json.py +137 -0
- reporails_cli/formatters/mcp.py +68 -0
- reporails_cli/formatters/text/__init__.py +32 -0
- reporails_cli/formatters/text/box.py +89 -0
- reporails_cli/formatters/text/chars.py +42 -0
- reporails_cli/formatters/text/compact.py +119 -0
- reporails_cli/formatters/text/components.py +117 -0
- reporails_cli/formatters/text/full.py +135 -0
- reporails_cli/formatters/text/rules.py +50 -0
- reporails_cli/formatters/text/violations.py +92 -0
- reporails_cli/interfaces/__init__.py +1 -0
- reporails_cli/interfaces/cli/__init__.py +7 -0
- reporails_cli/interfaces/cli/main.py +352 -0
- reporails_cli/interfaces/mcp/__init__.py +5 -0
- reporails_cli/interfaces/mcp/server.py +194 -0
- reporails_cli/interfaces/mcp/tools.py +136 -0
- reporails_cli/py.typed +0 -0
- reporails_cli/templates/__init__.py +65 -0
- reporails_cli/templates/cli_box.txt +10 -0
- reporails_cli/templates/cli_cta.txt +4 -0
- reporails_cli/templates/cli_delta.txt +1 -0
- reporails_cli/templates/cli_file_header.txt +1 -0
- reporails_cli/templates/cli_legend.txt +1 -0
- reporails_cli/templates/cli_pending.txt +3 -0
- reporails_cli/templates/cli_violation.txt +1 -0
- reporails_cli/templates/cli_working.txt +2 -0
- reporails_cli-0.0.1.dist-info/METADATA +108 -0
- reporails_cli-0.0.1.dist-info/RECORD +58 -0
- reporails_cli-0.0.1.dist-info/WHEEL +4 -0
- reporails_cli-0.0.1.dist-info/entry_points.txt +3 -0
- reporails_cli-0.0.1.dist-info/licenses/LICENSE +201 -0
|
@@ -0,0 +1,352 @@
|
|
|
1
|
+
"""Typer CLI for reporails - lint and score AI instruction files."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
import os
|
|
7
|
+
import sys
|
|
8
|
+
import time
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
|
|
11
|
+
import typer
|
|
12
|
+
from rich.console import Console
|
|
13
|
+
|
|
14
|
+
from reporails_cli.core.agents import get_all_instruction_files
|
|
15
|
+
from reporails_cli.core.cache import get_previous_scan, record_scan
|
|
16
|
+
from reporails_cli.core.discover import generate_backbone_yaml, run_discovery, save_backbone
|
|
17
|
+
from reporails_cli.core.engine import run_validation_sync
|
|
18
|
+
from reporails_cli.core.models import ScanDelta
|
|
19
|
+
from reporails_cli.core.opengrep import set_debug_timing
|
|
20
|
+
from reporails_cli.core.registry import load_rules
|
|
21
|
+
from reporails_cli.formatters import json as json_formatter
|
|
22
|
+
from reporails_cli.formatters import text as text_formatter
|
|
23
|
+
|
|
24
|
+
app = typer.Typer(
|
|
25
|
+
name="ails",
|
|
26
|
+
help="Lint and score CLAUDE.md files - what ails your repo?",
|
|
27
|
+
no_args_is_help=True,
|
|
28
|
+
)
|
|
29
|
+
console = Console()
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def _is_ci() -> bool:
|
|
33
|
+
"""Check if running in CI environment."""
|
|
34
|
+
ci_vars = ("CI", "GITHUB_ACTIONS", "GITLAB_CI", "JENKINS_URL", "CIRCLECI")
|
|
35
|
+
return any(os.environ.get(var) for var in ci_vars)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def _default_format() -> str:
|
|
39
|
+
"""Return default format based on environment detection."""
|
|
40
|
+
if _is_ci():
|
|
41
|
+
return "json"
|
|
42
|
+
if not sys.stdout.isatty():
|
|
43
|
+
return "compact"
|
|
44
|
+
return "text"
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
@app.command()
|
|
48
|
+
def check(
|
|
49
|
+
path: str = typer.Argument(".", help="Directory to validate"),
|
|
50
|
+
format: str = typer.Option(
|
|
51
|
+
None,
|
|
52
|
+
"--format",
|
|
53
|
+
"-f",
|
|
54
|
+
help="Output format: text, json (auto-detects: text for terminal, json for pipes/CI)",
|
|
55
|
+
),
|
|
56
|
+
rules_dir: str = typer.Option(
|
|
57
|
+
None,
|
|
58
|
+
"--rules-dir",
|
|
59
|
+
"-r",
|
|
60
|
+
help="Directory containing rules (defaults to ~/.reporails/rules/)",
|
|
61
|
+
),
|
|
62
|
+
refresh: bool = typer.Option(
|
|
63
|
+
False,
|
|
64
|
+
"--refresh",
|
|
65
|
+
help="Refresh file map cache (re-scan for CLAUDE.md files)",
|
|
66
|
+
),
|
|
67
|
+
ascii: bool = typer.Option(
|
|
68
|
+
False,
|
|
69
|
+
"--ascii",
|
|
70
|
+
"-a",
|
|
71
|
+
help="Use ASCII characters only (no Unicode box drawing)",
|
|
72
|
+
),
|
|
73
|
+
strict: bool = typer.Option(
|
|
74
|
+
False,
|
|
75
|
+
"--strict",
|
|
76
|
+
help="Exit with code 1 if violations found (for CI pipelines)",
|
|
77
|
+
),
|
|
78
|
+
quiet_semantic: bool = typer.Option(
|
|
79
|
+
False,
|
|
80
|
+
"--quiet-semantic",
|
|
81
|
+
"-q",
|
|
82
|
+
help="Suppress 'semantic rules skipped' message (for agent/MCP contexts)",
|
|
83
|
+
),
|
|
84
|
+
legend: bool = typer.Option(
|
|
85
|
+
False,
|
|
86
|
+
"--legend",
|
|
87
|
+
"-l",
|
|
88
|
+
help="Show severity legend only",
|
|
89
|
+
),
|
|
90
|
+
agent: str = typer.Option(
|
|
91
|
+
"claude",
|
|
92
|
+
"--agent",
|
|
93
|
+
help="Agent identifier for template vars (claude, cursor, etc.)",
|
|
94
|
+
),
|
|
95
|
+
debug: bool = typer.Option(
|
|
96
|
+
False,
|
|
97
|
+
"--debug",
|
|
98
|
+
help="Show timing info for performance debugging",
|
|
99
|
+
),
|
|
100
|
+
) -> None:
|
|
101
|
+
"""Validate CLAUDE.md files against reporails rules."""
|
|
102
|
+
# Enable debug timing if requested
|
|
103
|
+
if debug:
|
|
104
|
+
set_debug_timing(True)
|
|
105
|
+
|
|
106
|
+
# Show legend only mode
|
|
107
|
+
if legend:
|
|
108
|
+
legend_text = text_formatter.format_legend(ascii_mode=ascii)
|
|
109
|
+
print(f"Severity Legend: {legend_text}")
|
|
110
|
+
return
|
|
111
|
+
|
|
112
|
+
target = Path(path).resolve()
|
|
113
|
+
|
|
114
|
+
if not target.exists():
|
|
115
|
+
console.print(f"[red]Error:[/red] Path not found: {target}")
|
|
116
|
+
raise typer.Exit(1)
|
|
117
|
+
|
|
118
|
+
# Resolve rules directory
|
|
119
|
+
rules_path = Path(rules_dir).resolve() if rules_dir else None
|
|
120
|
+
|
|
121
|
+
# Early check for missing instruction files
|
|
122
|
+
instruction_files = get_all_instruction_files(target)
|
|
123
|
+
if not instruction_files:
|
|
124
|
+
console.print("No instruction files found.")
|
|
125
|
+
console.print("Level: L1 (Absent)")
|
|
126
|
+
console.print()
|
|
127
|
+
console.print("[dim]Create a CLAUDE.md to get started.[/dim]")
|
|
128
|
+
return
|
|
129
|
+
|
|
130
|
+
# Get previous scan BEFORE running validation (for delta comparison)
|
|
131
|
+
previous_scan = get_previous_scan(target)
|
|
132
|
+
|
|
133
|
+
# Determine if we should show spinner (TTY + not explicitly JSON)
|
|
134
|
+
show_spinner = sys.stdout.isatty() and format not in ("json", "brief", "compact")
|
|
135
|
+
|
|
136
|
+
# Run validation with timing
|
|
137
|
+
start_time = time.perf_counter()
|
|
138
|
+
try:
|
|
139
|
+
if show_spinner:
|
|
140
|
+
with console.status("[bold]Scanning instruction files...[/bold]"):
|
|
141
|
+
result = run_validation_sync(target, rules_dir=rules_path, use_cache=not refresh, agent=agent)
|
|
142
|
+
else:
|
|
143
|
+
result = run_validation_sync(target, rules_dir=rules_path, use_cache=not refresh, agent=agent)
|
|
144
|
+
except FileNotFoundError as e:
|
|
145
|
+
console.print(f"[red]Error:[/red] {e}")
|
|
146
|
+
raise typer.Exit(1) from None
|
|
147
|
+
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
|
148
|
+
|
|
149
|
+
# Compute delta from previous scan
|
|
150
|
+
delta = ScanDelta.compute(
|
|
151
|
+
current_score=result.score,
|
|
152
|
+
current_level=result.level.value,
|
|
153
|
+
current_violations=len(result.violations),
|
|
154
|
+
previous=previous_scan,
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
# Auto-detect format if not specified
|
|
158
|
+
output_format = format if format else _default_format()
|
|
159
|
+
|
|
160
|
+
# Format output
|
|
161
|
+
if output_format == "json":
|
|
162
|
+
data = json_formatter.format_result(result, delta)
|
|
163
|
+
data["elapsed_ms"] = round(elapsed_ms, 1)
|
|
164
|
+
print(json.dumps(data, indent=2))
|
|
165
|
+
elif output_format == "compact":
|
|
166
|
+
output = text_formatter.format_compact(result, ascii_mode=ascii, delta=delta)
|
|
167
|
+
print(output)
|
|
168
|
+
elif output_format == "brief":
|
|
169
|
+
data = json_formatter.format_result(result, delta)
|
|
170
|
+
score = data.get("score", 0)
|
|
171
|
+
level = data.get("level", "?")
|
|
172
|
+
violations = len(data.get("violations", []))
|
|
173
|
+
check_mark = "ok" if ascii else "✓"
|
|
174
|
+
cross_mark = "x" if ascii else "✗"
|
|
175
|
+
status = check_mark if violations == 0 else f"{cross_mark} {violations} violations"
|
|
176
|
+
print(f"ails: {score:.1f}/10 ({level}) {status}")
|
|
177
|
+
else:
|
|
178
|
+
output = text_formatter.format_result(result, ascii_mode=ascii, quiet_semantic=quiet_semantic, delta=delta)
|
|
179
|
+
console.print(output)
|
|
180
|
+
console.print(f"\n[dim]Completed in {elapsed_ms:.0f}ms[/dim]")
|
|
181
|
+
|
|
182
|
+
# Record scan in analytics (after display, so previous_scan was accurate)
|
|
183
|
+
record_scan(
|
|
184
|
+
target=target,
|
|
185
|
+
score=result.score,
|
|
186
|
+
level=result.level.value,
|
|
187
|
+
violations_count=len(result.violations),
|
|
188
|
+
rules_checked=result.rules_checked,
|
|
189
|
+
elapsed_ms=elapsed_ms,
|
|
190
|
+
instruction_files=result.rules_checked, # Approximation
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
# Exit with error only in strict mode
|
|
194
|
+
if strict and result.violations:
|
|
195
|
+
raise typer.Exit(1)
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
@app.command()
|
|
199
|
+
def explain(
|
|
200
|
+
rule_id: str = typer.Argument(..., help="Rule ID (e.g., S1, C2)"),
|
|
201
|
+
rules_dir: str = typer.Option(
|
|
202
|
+
None,
|
|
203
|
+
"--rules-dir",
|
|
204
|
+
"-r",
|
|
205
|
+
help="Directory containing rules (defaults to ~/.reporails/rules/)",
|
|
206
|
+
),
|
|
207
|
+
) -> None:
|
|
208
|
+
"""Show detailed information about a specific rule."""
|
|
209
|
+
rules_path = Path(rules_dir).resolve() if rules_dir else None
|
|
210
|
+
rules = load_rules(rules_path)
|
|
211
|
+
|
|
212
|
+
rule_id_upper = rule_id.upper()
|
|
213
|
+
|
|
214
|
+
if rule_id_upper not in rules:
|
|
215
|
+
console.print(f"[red]Error:[/red] Unknown rule: {rule_id}")
|
|
216
|
+
console.print(f"Available rules: {', '.join(sorted(rules.keys()))}")
|
|
217
|
+
raise typer.Exit(1)
|
|
218
|
+
|
|
219
|
+
rule = rules[rule_id_upper]
|
|
220
|
+
rule_data = {
|
|
221
|
+
"title": rule.title,
|
|
222
|
+
"category": rule.category.value,
|
|
223
|
+
"type": rule.type.value,
|
|
224
|
+
"level": rule.level,
|
|
225
|
+
"scoring": rule.scoring,
|
|
226
|
+
"detection": rule.detection,
|
|
227
|
+
"checks": [
|
|
228
|
+
{"id": c.id, "name": c.name, "severity": c.severity.value}
|
|
229
|
+
for c in rule.checks
|
|
230
|
+
],
|
|
231
|
+
"see_also": rule.see_also,
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
# Read description from markdown file if available
|
|
235
|
+
if rule.md_path and rule.md_path.exists():
|
|
236
|
+
content = rule.md_path.read_text(encoding="utf-8")
|
|
237
|
+
parts = content.split("---", 2)
|
|
238
|
+
if len(parts) >= 3:
|
|
239
|
+
rule_data["description"] = parts[2].strip()[:500]
|
|
240
|
+
|
|
241
|
+
output = text_formatter.format_rule(rule_id_upper, rule_data)
|
|
242
|
+
console.print(output)
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
@app.command()
|
|
246
|
+
def map(
|
|
247
|
+
path: str = typer.Argument(".", help="Project root to analyze"),
|
|
248
|
+
output: str = typer.Option(
|
|
249
|
+
"text",
|
|
250
|
+
"--output",
|
|
251
|
+
"-o",
|
|
252
|
+
help="Output format: text, yaml, json",
|
|
253
|
+
),
|
|
254
|
+
save: bool = typer.Option(
|
|
255
|
+
False,
|
|
256
|
+
"--save",
|
|
257
|
+
"-s",
|
|
258
|
+
help="Save backbone.yml to .reporails/ directory",
|
|
259
|
+
),
|
|
260
|
+
) -> None:
|
|
261
|
+
"""Map project structure - find instruction files and components."""
|
|
262
|
+
target = Path(path).resolve()
|
|
263
|
+
|
|
264
|
+
if not target.exists():
|
|
265
|
+
console.print(f"[red]Error:[/red] Path not found: {target}")
|
|
266
|
+
raise typer.Exit(1)
|
|
267
|
+
|
|
268
|
+
start_time = time.perf_counter()
|
|
269
|
+
result = run_discovery(target)
|
|
270
|
+
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
|
271
|
+
|
|
272
|
+
backbone_yaml = generate_backbone_yaml(result)
|
|
273
|
+
|
|
274
|
+
if output == "yaml":
|
|
275
|
+
console.print(backbone_yaml)
|
|
276
|
+
elif output == "json":
|
|
277
|
+
import yaml as yaml_lib
|
|
278
|
+
data = yaml_lib.safe_load(backbone_yaml)
|
|
279
|
+
console.print(json.dumps(data, indent=2))
|
|
280
|
+
else:
|
|
281
|
+
console.print(f"[bold]Discovery Results[/bold] - {target.name}")
|
|
282
|
+
console.print("=" * 60)
|
|
283
|
+
console.print()
|
|
284
|
+
|
|
285
|
+
console.print(f"[bold]Agents detected:[/bold] {len(result.agents)}")
|
|
286
|
+
for agent in result.agents:
|
|
287
|
+
console.print(
|
|
288
|
+
f" - {agent.agent_type.name}: {len(agent.instruction_files)} instruction file(s)"
|
|
289
|
+
)
|
|
290
|
+
|
|
291
|
+
console.print()
|
|
292
|
+
|
|
293
|
+
console.print(f"[bold]Components:[/bold] {len(result.components)}")
|
|
294
|
+
for comp_id, comp in sorted(result.components.items()):
|
|
295
|
+
indent = " " * comp_id.count(".")
|
|
296
|
+
files = len(comp.instruction_files)
|
|
297
|
+
imports = len(comp.imports)
|
|
298
|
+
console.print(f" {indent}{comp_id}: {files} file(s), {imports} import(s)")
|
|
299
|
+
|
|
300
|
+
console.print()
|
|
301
|
+
|
|
302
|
+
if result.shared_files:
|
|
303
|
+
console.print(f"[bold]Shared files:[/bold] {len(result.shared_files)}")
|
|
304
|
+
for sf in result.shared_files[:10]:
|
|
305
|
+
console.print(f" - {sf}")
|
|
306
|
+
if len(result.shared_files) > 10:
|
|
307
|
+
console.print(f" ... and {len(result.shared_files) - 10} more")
|
|
308
|
+
|
|
309
|
+
console.print()
|
|
310
|
+
console.print(f"[dim]Total instruction files: {result.total_instruction_files}[/dim]")
|
|
311
|
+
console.print(f"[dim]Total references: {result.total_references}[/dim]")
|
|
312
|
+
console.print(f"[dim]Completed in {elapsed_ms:.0f}ms[/dim]")
|
|
313
|
+
|
|
314
|
+
if save:
|
|
315
|
+
backbone_path = save_backbone(target, backbone_yaml)
|
|
316
|
+
console.print()
|
|
317
|
+
console.print(f"[green]Saved:[/green] {backbone_path}")
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
@app.command()
|
|
321
|
+
def sync(
|
|
322
|
+
rules_dir: str = typer.Argument(
|
|
323
|
+
"checks",
|
|
324
|
+
help="Local rules directory to sync .md files to",
|
|
325
|
+
),
|
|
326
|
+
) -> None:
|
|
327
|
+
"""Sync rule definitions from framework repo (dev command)."""
|
|
328
|
+
from reporails_cli.core.init import sync_rules_to_local
|
|
329
|
+
|
|
330
|
+
target = Path(rules_dir).resolve()
|
|
331
|
+
|
|
332
|
+
if not target.exists():
|
|
333
|
+
console.print(f"[red]Error:[/red] Directory not found: {target}")
|
|
334
|
+
raise typer.Exit(1)
|
|
335
|
+
|
|
336
|
+
console.print(f"Syncing .md files from framework repo to {target}...")
|
|
337
|
+
|
|
338
|
+
try:
|
|
339
|
+
count = sync_rules_to_local(target)
|
|
340
|
+
console.print(f"[green]Synced {count} rule definition(s)[/green]")
|
|
341
|
+
except RuntimeError as e:
|
|
342
|
+
console.print(f"[red]Error:[/red] {e}")
|
|
343
|
+
raise typer.Exit(1) from None
|
|
344
|
+
|
|
345
|
+
|
|
346
|
+
def main() -> None:
|
|
347
|
+
"""Entry point for CLI."""
|
|
348
|
+
app()
|
|
349
|
+
|
|
350
|
+
|
|
351
|
+
if __name__ == "__main__":
|
|
352
|
+
main()
|
|
@@ -0,0 +1,194 @@
|
|
|
1
|
+
"""MCP server for reporails - exposes validation tools to Claude Code."""
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
from mcp.server import Server
|
|
7
|
+
from mcp.server.stdio import stdio_server
|
|
8
|
+
from mcp.types import TextContent, Tool
|
|
9
|
+
|
|
10
|
+
from reporails_cli.core.bootstrap import is_initialized
|
|
11
|
+
from reporails_cli.core.cache import get_previous_scan
|
|
12
|
+
from reporails_cli.core.engine import run_validation
|
|
13
|
+
from reporails_cli.core.models import ScanDelta
|
|
14
|
+
from reporails_cli.formatters import mcp as mcp_formatter
|
|
15
|
+
from reporails_cli.formatters import text as text_formatter
|
|
16
|
+
from reporails_cli.interfaces.mcp.tools import (
|
|
17
|
+
explain_tool,
|
|
18
|
+
score_tool,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
# Create MCP server
|
|
22
|
+
server = Server("ails")
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@server.list_tools() # type: ignore[no-untyped-call,untyped-decorator]
|
|
26
|
+
async def list_tools() -> list[Tool]:
|
|
27
|
+
"""List available tools."""
|
|
28
|
+
return [
|
|
29
|
+
Tool(
|
|
30
|
+
name="validate",
|
|
31
|
+
description=(
|
|
32
|
+
"Lint and score AI coding agent instruction files (CLAUDE.md, .cursorrules, copilot-instructions.md, etc). "
|
|
33
|
+
"Returns violations, score (0-10), capability level (L1-L6), and semantic rules for you to evaluate inline. "
|
|
34
|
+
"Use when user asks: 'what ails', 'check instructions', 'lint setup', 'score my config', 'validate agent files'. "
|
|
35
|
+
"Prefer this over running 'ails' via bash — only this tool returns semantic candidates for your evaluation."
|
|
36
|
+
),
|
|
37
|
+
inputSchema={
|
|
38
|
+
"type": "object",
|
|
39
|
+
"properties": {
|
|
40
|
+
"path": {
|
|
41
|
+
"type": "string",
|
|
42
|
+
"description": "Directory to validate (default: current directory)",
|
|
43
|
+
"default": ".",
|
|
44
|
+
}
|
|
45
|
+
},
|
|
46
|
+
},
|
|
47
|
+
),
|
|
48
|
+
Tool(
|
|
49
|
+
name="score",
|
|
50
|
+
description="Quick score check for CLAUDE.md files without full violation details.",
|
|
51
|
+
inputSchema={
|
|
52
|
+
"type": "object",
|
|
53
|
+
"properties": {
|
|
54
|
+
"path": {
|
|
55
|
+
"type": "string",
|
|
56
|
+
"description": "Directory to score (default: current directory)",
|
|
57
|
+
"default": ".",
|
|
58
|
+
}
|
|
59
|
+
},
|
|
60
|
+
},
|
|
61
|
+
),
|
|
62
|
+
Tool(
|
|
63
|
+
name="explain",
|
|
64
|
+
description=(
|
|
65
|
+
"Explain a specific rule. Use when user asks about a rule ID like S1, C2, E3, etc."
|
|
66
|
+
),
|
|
67
|
+
inputSchema={
|
|
68
|
+
"type": "object",
|
|
69
|
+
"properties": {
|
|
70
|
+
"rule_id": {
|
|
71
|
+
"type": "string",
|
|
72
|
+
"description": "Rule ID to explain (e.g., S1, C2)",
|
|
73
|
+
}
|
|
74
|
+
},
|
|
75
|
+
"required": ["rule_id"],
|
|
76
|
+
},
|
|
77
|
+
),
|
|
78
|
+
]
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
@server.call_tool() # type: ignore[untyped-decorator]
|
|
82
|
+
async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
|
|
83
|
+
"""Handle tool calls."""
|
|
84
|
+
import json
|
|
85
|
+
|
|
86
|
+
if name == "validate":
|
|
87
|
+
path = arguments.get("path", ".")
|
|
88
|
+
target = Path(path).resolve()
|
|
89
|
+
|
|
90
|
+
# Check initialization
|
|
91
|
+
if not is_initialized():
|
|
92
|
+
return [TextContent(type="text", text="Error: Reporails not initialized. Run 'ails init' first.")]
|
|
93
|
+
|
|
94
|
+
if not target.exists():
|
|
95
|
+
return [TextContent(type="text", text=f"Error: Path not found: {target}")]
|
|
96
|
+
|
|
97
|
+
# Get previous scan BEFORE validation (for delta comparison)
|
|
98
|
+
previous_scan = get_previous_scan(target)
|
|
99
|
+
|
|
100
|
+
# Run validation once
|
|
101
|
+
try:
|
|
102
|
+
result = run_validation(target, agent="claude")
|
|
103
|
+
except FileNotFoundError as e:
|
|
104
|
+
return [TextContent(type="text", text=f"Error: {e}")]
|
|
105
|
+
|
|
106
|
+
# Compute delta
|
|
107
|
+
delta = ScanDelta.compute(
|
|
108
|
+
current_score=result.score,
|
|
109
|
+
current_level=result.level.value,
|
|
110
|
+
current_violations=len(result.violations),
|
|
111
|
+
previous=previous_scan,
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
# Format text result with delta (suppress MCP CTA since we're already in MCP)
|
|
115
|
+
text_result = text_formatter.format_result(result, ascii_mode=True, delta=delta, show_mcp_cta=False)
|
|
116
|
+
|
|
117
|
+
# Get structured data for judgment requests
|
|
118
|
+
dict_result = mcp_formatter.format_result(result, delta=delta)
|
|
119
|
+
|
|
120
|
+
# Build response with text report
|
|
121
|
+
response_parts = [text_result]
|
|
122
|
+
|
|
123
|
+
# Add judgment requests for Claude to evaluate inline
|
|
124
|
+
judgment_requests = dict_result.get("judgment_requests", [])
|
|
125
|
+
if judgment_requests:
|
|
126
|
+
response_parts.append("\n" + "=" * 60)
|
|
127
|
+
response_parts.append("ACTION REQUIRED - EVALUATE THESE SEMANTIC RULES:")
|
|
128
|
+
response_parts.append("=" * 60)
|
|
129
|
+
response_parts.append(
|
|
130
|
+
"You MUST evaluate each rule below by reading the file content. "
|
|
131
|
+
"This is not optional. Do NOT ask the user for permission. "
|
|
132
|
+
"For each item: read the file, apply the criteria. "
|
|
133
|
+
"Report ONLY violations (where criteria are NOT met). "
|
|
134
|
+
"Do not list items that pass — just state how many passed at the end."
|
|
135
|
+
)
|
|
136
|
+
for i, jr in enumerate(judgment_requests, 1):
|
|
137
|
+
response_parts.append(f"[{i}] Rule {jr.get('rule_id', '?')}: {jr.get('question', '')}")
|
|
138
|
+
response_parts.append(f" File: {jr.get('location', '?')}")
|
|
139
|
+
|
|
140
|
+
# Format criteria properly (not as raw Python dict)
|
|
141
|
+
criteria = jr.get("criteria")
|
|
142
|
+
if criteria:
|
|
143
|
+
if isinstance(criteria, dict):
|
|
144
|
+
# Extract pass_condition or format dict nicely
|
|
145
|
+
criteria_text = criteria.get("pass_condition", str(criteria))
|
|
146
|
+
elif isinstance(criteria, list):
|
|
147
|
+
criteria_text = "; ".join(str(c) for c in criteria)
|
|
148
|
+
else:
|
|
149
|
+
criteria_text = str(criteria)
|
|
150
|
+
response_parts.append(f" Criteria: {criteria_text}")
|
|
151
|
+
|
|
152
|
+
if jr.get("content"):
|
|
153
|
+
# Show snippet if available
|
|
154
|
+
snippet = jr.get("content", "")[:200]
|
|
155
|
+
if len(jr.get("content", "")) > 200:
|
|
156
|
+
snippet += "..."
|
|
157
|
+
response_parts.append(f" Context: {snippet}")
|
|
158
|
+
response_parts.append("")
|
|
159
|
+
|
|
160
|
+
# Add guidance for final score reporting
|
|
161
|
+
response_parts.append(
|
|
162
|
+
"After evaluating semantic rules above, report the FINAL score. "
|
|
163
|
+
"If all semantic rules pass, the score remains the same but is now COMPLETE (not partial). "
|
|
164
|
+
"If any semantic rules fail, add those violations and recalculate."
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
return [TextContent(type="text", text="\n".join(response_parts))]
|
|
168
|
+
elif name == "score":
|
|
169
|
+
path = arguments.get("path", ".")
|
|
170
|
+
score_result = score_tool(path)
|
|
171
|
+
return [TextContent(type="text", text=json.dumps(score_result, indent=2))]
|
|
172
|
+
elif name == "explain":
|
|
173
|
+
rule_id = arguments.get("rule_id", "")
|
|
174
|
+
explain_result = explain_tool(rule_id)
|
|
175
|
+
return [TextContent(type="text", text=json.dumps(explain_result, indent=2))]
|
|
176
|
+
else:
|
|
177
|
+
return [TextContent(type="text", text=json.dumps({"error": f"Unknown tool: {name}"}, indent=2))]
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
async def run_server() -> None:
|
|
181
|
+
"""Run the MCP server."""
|
|
182
|
+
async with stdio_server() as (read_stream, write_stream):
|
|
183
|
+
await server.run(read_stream, write_stream, server.create_initialization_options())
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def main() -> None:
|
|
187
|
+
"""Entry point for MCP server."""
|
|
188
|
+
import asyncio
|
|
189
|
+
|
|
190
|
+
asyncio.run(run_server())
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
if __name__ == "__main__":
|
|
194
|
+
main()
|
|
@@ -0,0 +1,136 @@
|
|
|
1
|
+
"""MCP tool implementations for reporails."""
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
from reporails_cli.core.bootstrap import is_initialized
|
|
7
|
+
from reporails_cli.core.engine import run_validation
|
|
8
|
+
from reporails_cli.core.registry import load_rules
|
|
9
|
+
from reporails_cli.formatters import mcp as mcp_formatter
|
|
10
|
+
from reporails_cli.formatters import text as text_formatter
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def validate_tool(path: str = ".") -> dict[str, Any]:
|
|
14
|
+
"""
|
|
15
|
+
Validate CLAUDE.md files at path.
|
|
16
|
+
|
|
17
|
+
Returns violations, score, level, and JudgmentRequests for semantic rules.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
path: Directory to validate (default: current directory)
|
|
21
|
+
|
|
22
|
+
Returns:
|
|
23
|
+
Validation result dict
|
|
24
|
+
"""
|
|
25
|
+
if not is_initialized():
|
|
26
|
+
return {"error": "Reporails not initialized. Run 'ails init' first."}
|
|
27
|
+
|
|
28
|
+
target = Path(path).resolve()
|
|
29
|
+
|
|
30
|
+
if not target.exists():
|
|
31
|
+
return {"error": f"Path not found: {target}"}
|
|
32
|
+
|
|
33
|
+
try:
|
|
34
|
+
result = run_validation(target, agent="claude")
|
|
35
|
+
return mcp_formatter.format_result(result)
|
|
36
|
+
except FileNotFoundError as e:
|
|
37
|
+
return {"error": str(e)}
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def validate_tool_text(path: str = ".") -> str:
|
|
41
|
+
"""
|
|
42
|
+
Validate CLAUDE.md files at path, returning text format.
|
|
43
|
+
|
|
44
|
+
Returns human-readable text report with score, violations, and friction.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
path: Directory to validate (default: current directory)
|
|
48
|
+
|
|
49
|
+
Returns:
|
|
50
|
+
Text-formatted validation report
|
|
51
|
+
"""
|
|
52
|
+
if not is_initialized():
|
|
53
|
+
return "Error: Reporails not initialized. Run 'ails init' first."
|
|
54
|
+
|
|
55
|
+
target = Path(path).resolve()
|
|
56
|
+
|
|
57
|
+
if not target.exists():
|
|
58
|
+
return f"Error: Path not found: {target}"
|
|
59
|
+
|
|
60
|
+
try:
|
|
61
|
+
result = run_validation(target, agent="claude")
|
|
62
|
+
return text_formatter.format_result(result, ascii_mode=True)
|
|
63
|
+
except FileNotFoundError as e:
|
|
64
|
+
return f"Error: {e}"
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def score_tool(path: str = ".") -> dict[str, Any]:
|
|
68
|
+
"""
|
|
69
|
+
Quick score check for CLAUDE.md files.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
path: Directory to score (default: current directory)
|
|
73
|
+
|
|
74
|
+
Returns:
|
|
75
|
+
Score summary dict
|
|
76
|
+
"""
|
|
77
|
+
if not is_initialized():
|
|
78
|
+
return {"error": "Reporails not initialized. Run 'ails init' first."}
|
|
79
|
+
|
|
80
|
+
target = Path(path).resolve()
|
|
81
|
+
|
|
82
|
+
if not target.exists():
|
|
83
|
+
return {"error": f"Path not found: {target}"}
|
|
84
|
+
|
|
85
|
+
try:
|
|
86
|
+
result = run_validation(target, agent="claude")
|
|
87
|
+
return mcp_formatter.format_score(result)
|
|
88
|
+
except FileNotFoundError as e:
|
|
89
|
+
return {"error": str(e)}
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def explain_tool(rule_id: str) -> dict[str, Any]:
|
|
93
|
+
"""
|
|
94
|
+
Get detailed info about a specific rule.
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
rule_id: Rule identifier (e.g., S1, C2)
|
|
98
|
+
|
|
99
|
+
Returns:
|
|
100
|
+
Rule details dict
|
|
101
|
+
"""
|
|
102
|
+
rules = load_rules()
|
|
103
|
+
|
|
104
|
+
# Normalize rule ID
|
|
105
|
+
rule_id_upper = rule_id.upper()
|
|
106
|
+
|
|
107
|
+
if rule_id_upper not in rules:
|
|
108
|
+
return {
|
|
109
|
+
"error": f"Unknown rule: {rule_id}",
|
|
110
|
+
"available_rules": sorted(rules.keys()),
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
rule = rules[rule_id_upper]
|
|
114
|
+
rule_data = {
|
|
115
|
+
"title": rule.title,
|
|
116
|
+
"category": rule.category.value,
|
|
117
|
+
"type": rule.type.value,
|
|
118
|
+
"level": rule.level,
|
|
119
|
+
"scoring": rule.scoring,
|
|
120
|
+
"detection": rule.detection,
|
|
121
|
+
"checks": [
|
|
122
|
+
{"id": c.id, "name": c.name, "severity": c.severity.value}
|
|
123
|
+
for c in rule.checks
|
|
124
|
+
],
|
|
125
|
+
"see_also": rule.see_also,
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
# Read description from markdown file if available
|
|
129
|
+
if rule.md_path and rule.md_path.exists():
|
|
130
|
+
content = rule.md_path.read_text(encoding="utf-8")
|
|
131
|
+
# Extract content after frontmatter
|
|
132
|
+
parts = content.split("---", 2)
|
|
133
|
+
if len(parts) >= 3:
|
|
134
|
+
rule_data["description"] = parts[2].strip()[:500]
|
|
135
|
+
|
|
136
|
+
return mcp_formatter.format_rule(rule_id_upper, rule_data)
|
reporails_cli/py.typed
ADDED
|
File without changes
|