crucible-mcp 0.5.0__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. crucible/cli.py +109 -2
  2. crucible/enforcement/bundled/error-handling.yaml +84 -0
  3. crucible/enforcement/bundled/security.yaml +123 -0
  4. crucible/enforcement/bundled/smart-contract.yaml +110 -0
  5. crucible/hooks/claudecode.py +388 -0
  6. crucible/hooks/precommit.py +117 -25
  7. crucible/knowledge/loader.py +186 -0
  8. crucible/knowledge/principles/API_DESIGN.md +176 -0
  9. crucible/knowledge/principles/COMMITS.md +127 -0
  10. crucible/knowledge/principles/DATABASE.md +138 -0
  11. crucible/knowledge/principles/DOCUMENTATION.md +201 -0
  12. crucible/knowledge/principles/ERROR_HANDLING.md +157 -0
  13. crucible/knowledge/principles/FP.md +162 -0
  14. crucible/knowledge/principles/GITIGNORE.md +218 -0
  15. crucible/knowledge/principles/OBSERVABILITY.md +147 -0
  16. crucible/knowledge/principles/PRECOMMIT.md +201 -0
  17. crucible/knowledge/principles/SECURITY.md +136 -0
  18. crucible/knowledge/principles/SMART_CONTRACT.md +153 -0
  19. crucible/knowledge/principles/SYSTEM_DESIGN.md +153 -0
  20. crucible/knowledge/principles/TESTING.md +129 -0
  21. crucible/knowledge/principles/TYPE_SAFETY.md +170 -0
  22. crucible/skills/accessibility-engineer/SKILL.md +71 -0
  23. crucible/skills/backend-engineer/SKILL.md +69 -0
  24. crucible/skills/customer-success/SKILL.md +69 -0
  25. crucible/skills/data-engineer/SKILL.md +70 -0
  26. crucible/skills/devops-engineer/SKILL.md +69 -0
  27. crucible/skills/fde-engineer/SKILL.md +69 -0
  28. crucible/skills/formal-verification/SKILL.md +86 -0
  29. crucible/skills/gas-optimizer/SKILL.md +89 -0
  30. crucible/skills/incident-responder/SKILL.md +91 -0
  31. crucible/skills/mev-researcher/SKILL.md +87 -0
  32. crucible/skills/mobile-engineer/SKILL.md +70 -0
  33. crucible/skills/performance-engineer/SKILL.md +68 -0
  34. crucible/skills/product-engineer/SKILL.md +68 -0
  35. crucible/skills/protocol-architect/SKILL.md +83 -0
  36. crucible/skills/security-engineer/SKILL.md +63 -0
  37. crucible/skills/tech-lead/SKILL.md +92 -0
  38. crucible/skills/uiux-engineer/SKILL.md +70 -0
  39. crucible/skills/web3-engineer/SKILL.md +79 -0
  40. crucible_mcp-1.0.0.dist-info/METADATA +198 -0
  41. crucible_mcp-1.0.0.dist-info/RECORD +66 -0
  42. crucible_mcp-0.5.0.dist-info/METADATA +0 -161
  43. crucible_mcp-0.5.0.dist-info/RECORD +0 -30
  44. {crucible_mcp-0.5.0.dist-info → crucible_mcp-1.0.0.dist-info}/WHEEL +0 -0
  45. {crucible_mcp-0.5.0.dist-info → crucible_mcp-1.0.0.dist-info}/entry_points.txt +0 -0
  46. {crucible_mcp-0.5.0.dist-info → crucible_mcp-1.0.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,388 @@
1
+ """Claude Code hooks integration.
2
+
3
+ Provides PreToolUse and PostToolUse hooks for Claude Code to enforce
4
+ code quality via Crucible reviews on file writes/edits.
5
+
6
+ Usage:
7
+ crucible hooks claudecode init # Generate .claude/settings.json
8
+ crucible hooks claudecode hook # Run as hook (receives JSON on stdin)
9
+
10
+ The hook receives JSON on stdin from Claude Code:
11
+ {"tool_name": "Write", "tool_input": {"file_path": "...", "content": "..."}}
12
+
13
+ Exit codes:
14
+ 0 = allow (optionally with JSON for structured control)
15
+ 2 = deny (stderr shown to Claude as feedback)
16
+ """
17
+
18
+ import json
19
+ import os
20
+ import sys
21
+ from dataclasses import dataclass
22
+ from pathlib import Path
23
+
24
+ import yaml
25
+
26
+ from crucible.enforcement.patterns import run_pattern_assertions
27
+ from crucible.enforcement.assertions import load_assertions
28
+
29
+ # Config file for Claude Code hook settings
30
+ CONFIG_FILE = Path(".crucible") / "claudecode.yaml"
31
+ CLAUDE_SETTINGS_FILE = Path(".claude") / "settings.json"
32
+
33
+
34
+ @dataclass(frozen=True)
35
+ class ClaudeCodeHookConfig:
36
+ """Configuration for Claude Code hooks."""
37
+
38
+ # What to do when findings are detected
39
+ on_finding: str = "deny" # "deny", "warn", "allow"
40
+
41
+ # Minimum severity to trigger action
42
+ severity_threshold: str = "error" # "error", "warning", "info"
43
+
44
+ # Run pattern assertions (fast, free)
45
+ run_assertions: bool = True
46
+
47
+ # Run LLM assertions (expensive, semantic)
48
+ run_llm_assertions: bool = False
49
+
50
+ # Token budget for LLM assertions
51
+ llm_token_budget: int = 2000
52
+
53
+ # File patterns to exclude
54
+ exclude: tuple[str, ...] = ()
55
+
56
+ # Verbose output to stderr
57
+ verbose: bool = False
58
+
59
+
60
+ def load_claudecode_config(repo_path: str | None = None) -> ClaudeCodeHookConfig:
61
+ """Load Claude Code hook config."""
62
+ config_path = Path(repo_path) / CONFIG_FILE if repo_path else CONFIG_FILE
63
+
64
+ if not config_path.exists():
65
+ return ClaudeCodeHookConfig()
66
+
67
+ try:
68
+ with open(config_path) as f:
69
+ data = yaml.safe_load(f) or {}
70
+ except (yaml.YAMLError, OSError):
71
+ return ClaudeCodeHookConfig()
72
+
73
+ return ClaudeCodeHookConfig(
74
+ on_finding=data.get("on_finding", "deny"),
75
+ severity_threshold=data.get("severity_threshold", "error"),
76
+ run_assertions=data.get("run_assertions", True),
77
+ run_llm_assertions=data.get("run_llm_assertions", False),
78
+ llm_token_budget=data.get("llm_token_budget", 2000),
79
+ exclude=tuple(data.get("exclude", [])),
80
+ verbose=data.get("verbose", False),
81
+ )
82
+
83
+
84
+ def generate_settings_json(repo_path: str | None = None) -> str:
85
+ """Generate .claude/settings.json with Crucible hooks.
86
+
87
+ Returns the path to the generated file.
88
+ """
89
+ base_path = Path(repo_path) if repo_path else Path(".")
90
+ settings_path = base_path / CLAUDE_SETTINGS_FILE
91
+
92
+ # Create .claude directory if needed
93
+ settings_path.parent.mkdir(parents=True, exist_ok=True)
94
+
95
+ # Load existing settings if present
96
+ existing: dict = {}
97
+ if settings_path.exists():
98
+ try:
99
+ with open(settings_path) as f:
100
+ existing = json.load(f)
101
+ except (json.JSONDecodeError, OSError):
102
+ pass
103
+
104
+ # Ensure hooks section exists
105
+ if "hooks" not in existing:
106
+ existing["hooks"] = {}
107
+
108
+ # Add PostToolUse hook for Edit|Write
109
+ post_tool_use = existing["hooks"].get("PostToolUse", [])
110
+
111
+ # Check if crucible hook already exists
112
+ crucible_hook_exists = any(
113
+ "crucible hooks claudecode" in hook.get("hooks", [{}])[0].get("command", "")
114
+ for hook in post_tool_use
115
+ if isinstance(hook, dict) and "hooks" in hook
116
+ )
117
+
118
+ if not crucible_hook_exists:
119
+ post_tool_use.append({
120
+ "matcher": "Edit|Write",
121
+ "hooks": [
122
+ {
123
+ "type": "command",
124
+ "command": "crucible hooks claudecode hook"
125
+ }
126
+ ]
127
+ })
128
+ existing["hooks"]["PostToolUse"] = post_tool_use
129
+
130
+ # Write settings
131
+ with open(settings_path, "w") as f:
132
+ json.dump(existing, f, indent=2)
133
+
134
+ return str(settings_path)
135
+
136
+
137
+ def generate_config_template(repo_path: str | None = None) -> str:
138
+ """Generate .crucible/claudecode.yaml config template.
139
+
140
+ Returns the path to the generated file.
141
+ """
142
+ base_path = Path(repo_path) if repo_path else Path(".")
143
+ config_path = base_path / CONFIG_FILE
144
+
145
+ # Create .crucible directory if needed
146
+ config_path.parent.mkdir(parents=True, exist_ok=True)
147
+
148
+ if config_path.exists():
149
+ return str(config_path) # Don't overwrite
150
+
151
+ template = """\
152
+ # Crucible Claude Code Hook Configuration
153
+ # See: https://github.com/b17z/crucible
154
+
155
+ # What to do when findings are detected
156
+ # Options: deny (block and show to Claude), warn (allow but log), allow (silent)
157
+ on_finding: deny
158
+
159
+ # Minimum severity to trigger action
160
+ # Options: error, warning, info
161
+ severity_threshold: error
162
+
163
+ # Run pattern assertions (fast, free)
164
+ run_assertions: true
165
+
166
+ # Run LLM assertions (expensive, semantic) - off by default for hooks
167
+ run_llm_assertions: false
168
+
169
+ # Token budget for LLM assertions (if enabled)
170
+ llm_token_budget: 2000
171
+
172
+ # File patterns to exclude from review
173
+ exclude:
174
+ - "**/*.md"
175
+ - "**/test_*.py"
176
+ - "**/*_test.py"
177
+
178
+ # Show verbose output in stderr (visible in Claude Code verbose mode)
179
+ verbose: false
180
+ """
181
+
182
+ with open(config_path, "w") as f:
183
+ f.write(template)
184
+
185
+ return str(config_path)
186
+
187
+
188
+ def _should_exclude(file_path: str, exclude_patterns: tuple[str, ...]) -> bool:
189
+ """Check if file should be excluded."""
190
+ from fnmatch import fnmatch
191
+ return any(fnmatch(file_path, pattern) for pattern in exclude_patterns)
192
+
193
+
194
+ def _get_language_from_path(file_path: str) -> str | None:
195
+ """Get language from file extension."""
196
+ ext_map = {
197
+ ".py": "python",
198
+ ".js": "javascript",
199
+ ".ts": "typescript",
200
+ ".tsx": "typescript",
201
+ ".jsx": "javascript",
202
+ ".sol": "solidity",
203
+ ".go": "go",
204
+ ".rs": "rust",
205
+ ".rb": "ruby",
206
+ ".java": "java",
207
+ }
208
+ ext = Path(file_path).suffix.lower()
209
+ return ext_map.get(ext)
210
+
211
+
212
+ def run_hook(stdin_data: str | None = None) -> int:
213
+ """Run the Claude Code hook.
214
+
215
+ Reads tool input from stdin, runs Crucible review, returns exit code.
216
+
217
+ Exit codes:
218
+ 0 = allow (with optional JSON output)
219
+ 2 = deny (stderr shown to Claude)
220
+
221
+ Returns:
222
+ Exit code
223
+ """
224
+ # Read from stdin if not provided
225
+ if stdin_data is None:
226
+ stdin_data = sys.stdin.read()
227
+
228
+ # Parse input
229
+ try:
230
+ input_data = json.loads(stdin_data)
231
+ except json.JSONDecodeError as e:
232
+ print(f"Failed to parse hook input: {e}", file=sys.stderr)
233
+ return 0 # Allow on parse error
234
+
235
+ tool_name = input_data.get("tool_name", "")
236
+ tool_input = input_data.get("tool_input", {})
237
+
238
+ # Only process Edit and Write tools
239
+ if tool_name not in ("Edit", "Write"):
240
+ return 0
241
+
242
+ file_path = tool_input.get("file_path", "")
243
+ if not file_path:
244
+ return 0
245
+
246
+ # Get content for Write, or we'll read from disk for Edit
247
+ content = tool_input.get("content") or tool_input.get("new_string")
248
+
249
+ # Load config
250
+ cwd = input_data.get("cwd", os.getcwd())
251
+ config = load_claudecode_config(cwd)
252
+
253
+ if config.verbose:
254
+ print(f"Crucible hook: reviewing {file_path}", file=sys.stderr)
255
+
256
+ # Check exclusions
257
+ if _should_exclude(file_path, config.exclude):
258
+ if config.verbose:
259
+ print(f"Crucible hook: {file_path} excluded", file=sys.stderr)
260
+ return 0
261
+
262
+ # Skip if assertions disabled
263
+ if not config.run_assertions:
264
+ return 0
265
+
266
+ # Load assertions
267
+ assertions, load_errors = load_assertions()
268
+ if load_errors and config.verbose:
269
+ for err in load_errors:
270
+ print(f"Crucible hook warning: {err}", file=sys.stderr)
271
+
272
+ if not assertions:
273
+ return 0
274
+
275
+ # For Edit tool, we need to read the file and apply the edit
276
+ # For Write tool, we have the content directly
277
+ if tool_name == "Write" and content:
278
+ file_content = content
279
+ elif tool_name == "Edit":
280
+ # For Edit, the file should already exist on disk after PostToolUse
281
+ full_path = Path(cwd) / file_path if not Path(file_path).is_absolute() else Path(file_path)
282
+ if full_path.exists():
283
+ try:
284
+ file_content = full_path.read_text()
285
+ except OSError:
286
+ return 0 # Allow on read error
287
+ else:
288
+ return 0 # Allow if file doesn't exist
289
+ else:
290
+ return 0 # No content to analyze
291
+
292
+ # Run pattern assertions
293
+ findings, checked, skipped = run_pattern_assertions(
294
+ file_path=file_path,
295
+ content=file_content,
296
+ assertions=assertions,
297
+ )
298
+
299
+ # Filter by severity threshold
300
+ severity_order = {"error": 0, "warning": 1, "info": 2}
301
+ threshold = severity_order.get(config.severity_threshold, 1)
302
+
303
+ filtered_findings = [
304
+ f for f in findings
305
+ if severity_order.get(f.severity, 2) <= threshold and not f.suppressed
306
+ ]
307
+
308
+ if not filtered_findings:
309
+ if config.verbose:
310
+ print(f"Crucible hook: {file_path} passed ({checked} assertions)", file=sys.stderr)
311
+ return 0
312
+
313
+ # Handle findings based on config
314
+ if config.on_finding == "allow":
315
+ return 0
316
+
317
+ # Format findings for Claude
318
+ messages = []
319
+ for f in filtered_findings:
320
+ messages.append(f"[{f.severity.upper()}] {f.assertion_id}: {f.message}")
321
+ messages.append(f" at {f.location}")
322
+ if f.match_text:
323
+ messages.append(f" matched: {f.match_text[:100]}")
324
+
325
+ output = f"Crucible found {len(filtered_findings)} issue(s) in {file_path}:\n"
326
+ output += "\n".join(messages)
327
+
328
+ if config.on_finding == "warn":
329
+ # Warn but allow
330
+ print(output, file=sys.stderr)
331
+ return 0
332
+
333
+ # Deny (default)
334
+ print(output, file=sys.stderr)
335
+ return 2 # Exit 2 = block and show to Claude
336
+
337
+
338
+ def main_init(repo_path: str | None = None) -> int:
339
+ """Initialize Claude Code hooks for a project.
340
+
341
+ Creates:
342
+ - .claude/settings.json with PostToolUse hook
343
+ - .crucible/claudecode.yaml config template
344
+
345
+ Returns:
346
+ Exit code
347
+ """
348
+ settings_path = generate_settings_json(repo_path)
349
+ config_path = generate_config_template(repo_path)
350
+
351
+ print(f"Created Claude Code settings: {settings_path}")
352
+ print(f"Created Crucible config: {config_path}")
353
+ print()
354
+ print("Crucible will now review files when Claude edits them.")
355
+ print("Configure behavior in .crucible/claudecode.yaml")
356
+
357
+ return 0
358
+
359
+
360
+ def main() -> int:
361
+ """CLI entry point."""
362
+ import argparse
363
+
364
+ parser = argparse.ArgumentParser(
365
+ prog="crucible-claudecode",
366
+ description="Claude Code hooks integration",
367
+ )
368
+ subparsers = parser.add_subparsers(dest="command", required=True)
369
+
370
+ # init command
371
+ init_parser = subparsers.add_parser("init", help="Initialize Claude Code hooks")
372
+ init_parser.add_argument("path", nargs="?", default=".", help="Project path")
373
+
374
+ # hook command (called by Claude Code)
375
+ subparsers.add_parser("hook", help="Run hook (reads from stdin)")
376
+
377
+ args = parser.parse_args()
378
+
379
+ if args.command == "init":
380
+ return main_init(args.path)
381
+ elif args.command == "hook":
382
+ return run_hook()
383
+
384
+ return 0
385
+
386
+
387
+ if __name__ == "__main__":
388
+ sys.exit(main())
@@ -124,6 +124,12 @@ class PrecommitConfig:
124
124
  verbose: bool = False
125
125
  # Secrets detection: "auto" (gitleaks if available, else builtin), "gitleaks", "builtin", or "none"
126
126
  secrets_tool: str = "auto"
127
+ # Enforcement assertions
128
+ run_assertions: bool = True
129
+ # LLM assertions (expensive, off by default for pre-commit)
130
+ run_llm_assertions: bool = False
131
+ # Token budget for LLM assertions
132
+ llm_token_budget: int = 5000
127
133
 
128
134
 
129
135
  @dataclass(frozen=True)
@@ -136,6 +142,11 @@ class PrecommitResult:
136
142
  severity_counts: dict[str, int]
137
143
  files_checked: int
138
144
  error: str | None = None
145
+ # Enforcement results
146
+ enforcement_findings: tuple = ()
147
+ assertions_checked: int = 0
148
+ assertions_skipped: int = 0
149
+ llm_tokens_used: int = 0
139
150
 
140
151
 
141
152
  def load_precommit_config(repo_path: str | None = None) -> PrecommitConfig:
@@ -188,6 +199,11 @@ def load_precommit_config(repo_path: str | None = None) -> PrecommitConfig:
188
199
  if domain:
189
200
  tools[domain] = list(tool_list)
190
201
 
202
+ # Enforcement config
203
+ run_assertions = config_data.get("run_assertions", True)
204
+ run_llm_assertions = config_data.get("run_llm_assertions", False)
205
+ llm_token_budget = config_data.get("llm_token_budget", 5000)
206
+
191
207
  return PrecommitConfig(
192
208
  fail_on=fail_on,
193
209
  timeout=timeout,
@@ -197,6 +213,9 @@ def load_precommit_config(repo_path: str | None = None) -> PrecommitConfig:
197
213
  skip_tools=skip_tools,
198
214
  verbose=verbose,
199
215
  secrets_tool=secrets_tool,
216
+ run_assertions=run_assertions,
217
+ run_llm_assertions=run_llm_assertions,
218
+ llm_token_budget=llm_token_budget,
200
219
  )
201
220
 
202
221
 
@@ -473,6 +492,34 @@ def run_precommit(
473
492
  if result.is_ok:
474
493
  all_findings.extend(result.value)
475
494
 
495
+ # Step 3.5: Run enforcement assertions
496
+ enforcement_findings = []
497
+ assertions_checked = 0
498
+ assertions_skipped = 0
499
+ llm_tokens_used = 0
500
+
501
+ if config.run_assertions:
502
+ from crucible.enforcement.models import ComplianceConfig
503
+
504
+ compliance_config = ComplianceConfig(
505
+ enabled=config.run_llm_assertions,
506
+ token_budget=config.llm_token_budget,
507
+ )
508
+
509
+ from crucible.review.core import run_enforcement
510
+
511
+ enforcement_findings, enforcement_errors, assertions_checked, assertions_skipped, budget_state = (
512
+ run_enforcement(
513
+ repo_root,
514
+ changed_files=files_to_check,
515
+ repo_root=repo_root,
516
+ compliance_config=compliance_config,
517
+ )
518
+ )
519
+
520
+ if budget_state:
521
+ llm_tokens_used = budget_state.tokens_used
522
+
476
523
  # Step 4: Filter to staged lines only
477
524
  filtered_findings = _filter_findings_to_staged(
478
525
  all_findings, context, config.include_context
@@ -484,6 +531,11 @@ def run_precommit(
484
531
  sev = f.severity.value
485
532
  severity_counts[sev] = severity_counts.get(sev, 0) + 1
486
533
 
534
+ # Count enforcement severities
535
+ for f in enforcement_findings:
536
+ sev = f.severity
537
+ severity_counts[sev] = severity_counts.get(sev, 0) + 1
538
+
487
539
  # Check if any finding meets threshold
488
540
  passed = True
489
541
  for finding in filtered_findings:
@@ -491,12 +543,29 @@ def run_precommit(
491
543
  passed = False
492
544
  break
493
545
 
546
+ # Check enforcement findings (error = HIGH, warning = MEDIUM, info = LOW)
547
+ if passed:
548
+ enforcement_severity_map = {
549
+ "error": Severity.HIGH,
550
+ "warning": Severity.MEDIUM,
551
+ "info": Severity.LOW,
552
+ }
553
+ for finding in enforcement_findings:
554
+ sev = enforcement_severity_map.get(finding.severity, Severity.MEDIUM)
555
+ if _severity_meets_threshold(sev, config.fail_on):
556
+ passed = False
557
+ break
558
+
494
559
  return PrecommitResult(
495
560
  passed=passed,
496
561
  findings=tuple(filtered_findings),
497
562
  blocked_files=(),
498
563
  severity_counts=severity_counts,
499
564
  files_checked=len(files_to_check),
565
+ enforcement_findings=tuple(enforcement_findings),
566
+ assertions_checked=assertions_checked,
567
+ assertions_skipped=assertions_skipped,
568
+ llm_tokens_used=llm_tokens_used,
500
569
  )
501
570
 
502
571
 
@@ -524,43 +593,66 @@ def format_precommit_output(result: PrecommitResult, verbose: bool = False) -> s
524
593
  lines.append("Pre-commit: FAILED")
525
594
  return "\n".join(lines)
526
595
 
527
- if not result.findings:
528
- lines.append(f"Checked {result.files_checked} file(s) - no issues found")
596
+ total_findings = len(result.findings) + len(result.enforcement_findings)
597
+
598
+ if total_findings == 0:
599
+ msg = f"Checked {result.files_checked} file(s)"
600
+ if result.assertions_checked > 0:
601
+ msg += f", {result.assertions_checked} assertion(s)"
602
+ msg += " - no issues found"
603
+ lines.append(msg)
529
604
  return "\n".join(lines)
530
605
 
531
606
  # Header
532
- total = len(result.findings)
533
- lines.append(f"Found {total} issue(s) in {result.files_checked} file(s):")
607
+ lines.append(f"Found {total_findings} issue(s) in {result.files_checked} file(s):")
534
608
  lines.append("")
535
609
 
536
610
  # Severity summary
537
- for sev in ["critical", "high", "medium", "low", "info"]:
611
+ for sev in ["critical", "high", "medium", "low", "info", "error", "warning"]:
538
612
  count = result.severity_counts.get(sev, 0)
539
613
  if count > 0:
540
614
  lines.append(f" {sev.upper()}: {count}")
541
615
 
542
616
  lines.append("")
543
617
 
544
- # Findings
545
- if verbose:
546
- for sev in [Severity.CRITICAL, Severity.HIGH, Severity.MEDIUM, Severity.LOW, Severity.INFO]:
547
- sev_findings = [f for f in result.findings if f.severity == sev]
548
- if not sev_findings:
549
- continue
550
-
551
- lines.append(f"[{sev.value.upper()}]")
552
- for f in sev_findings:
553
- lines.append(f" {f.location}")
554
- lines.append(f" {f.rule}: {f.message}")
555
- if f.suggestion:
556
- lines.append(f" Fix: {f.suggestion}")
557
- lines.append("")
558
- else:
559
- # Compact: just show high+ findings
560
- for f in result.findings:
561
- if f.severity in (Severity.CRITICAL, Severity.HIGH):
562
- lines.append(f" [{f.severity.value.upper()}] {f.location}")
563
- lines.append(f" {f.rule}: {f.message}")
618
+ # Static analysis findings
619
+ if result.findings:
620
+ if verbose:
621
+ for sev in [Severity.CRITICAL, Severity.HIGH, Severity.MEDIUM, Severity.LOW, Severity.INFO]:
622
+ sev_findings = [f for f in result.findings if f.severity == sev]
623
+ if not sev_findings:
624
+ continue
625
+
626
+ lines.append(f"[{sev.value.upper()}]")
627
+ for f in sev_findings:
628
+ lines.append(f" {f.location}")
629
+ lines.append(f" {f.rule}: {f.message}")
630
+ if f.suggestion:
631
+ lines.append(f" Fix: {f.suggestion}")
632
+ lines.append("")
633
+ else:
634
+ # Compact: just show high+ findings
635
+ for f in result.findings:
636
+ if f.severity in (Severity.CRITICAL, Severity.HIGH):
637
+ lines.append(f" [{f.severity.value.upper()}] {f.location}")
638
+ lines.append(f" {f.rule}: {f.message}")
639
+
640
+ # Enforcement findings
641
+ if result.enforcement_findings:
642
+ lines.append("")
643
+ lines.append("Enforcement Assertions:")
644
+ for f in result.enforcement_findings:
645
+ sev_icon = {"error": "🔴", "warning": "🟠", "info": "⚪"}.get(f.severity, "⚪")
646
+ source_tag = "[LLM]" if f.source == "llm" else "[Pattern]"
647
+ lines.append(f" {sev_icon} [{f.severity.upper()}] {source_tag} {f.assertion_id}")
648
+ lines.append(f" {f.location}: {f.message}")
649
+
650
+ # Assertion summary
651
+ if result.assertions_checked > 0 or result.assertions_skipped > 0:
652
+ lines.append("")
653
+ lines.append(f"Assertions: {result.assertions_checked} checked, {result.assertions_skipped} skipped")
654
+ if result.llm_tokens_used > 0:
655
+ lines.append(f" LLM tokens used: {result.llm_tokens_used}")
564
656
 
565
657
  # Status
566
658
  lines.append("")