crucible-mcp 0.1.0__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,15 @@
1
+ """Git hooks for crucible."""
2
+
3
+ from crucible.hooks.precommit import (
4
+ PrecommitConfig,
5
+ PrecommitResult,
6
+ load_precommit_config,
7
+ run_precommit,
8
+ )
9
+
10
+ __all__ = [
11
+ "PrecommitConfig",
12
+ "PrecommitResult",
13
+ "load_precommit_config",
14
+ "run_precommit",
15
+ ]
@@ -0,0 +1,660 @@
1
+ """Pre-commit hook implementation."""
2
+
3
+ import os
4
+ import re
5
+ import sys
6
+ from dataclasses import dataclass, field
7
+ from fnmatch import fnmatch
8
+ from pathlib import Path
9
+
10
+ import yaml
11
+
12
+ from crucible.errors import Result
13
+ from crucible.models import Domain, Severity, ToolFinding
14
+ from crucible.tools.delegation import (
15
+ check_tool,
16
+ delegate_bandit,
17
+ delegate_gitleaks,
18
+ delegate_ruff,
19
+ delegate_semgrep,
20
+ delegate_slither,
21
+ get_semgrep_config,
22
+ )
23
+ from crucible.tools.git import (
24
+ GitContext,
25
+ get_changed_files,
26
+ get_repo_root,
27
+ get_staged_changes,
28
+ )
29
+
30
+ # Config locations (cascade: project > user)
31
+ CONFIG_PROJECT = Path(".crucible") / "precommit.yaml"
32
+ CONFIG_USER = Path.home() / ".claude" / "crucible" / "precommit.yaml"
33
+
34
+ # Default tool selection by domain
35
+ DEFAULT_TOOLS: dict[Domain, list[str]] = {
36
+ Domain.SMART_CONTRACT: ["slither", "semgrep"],
37
+ Domain.BACKEND: ["ruff", "bandit", "semgrep"],
38
+ Domain.FRONTEND: ["semgrep"],
39
+ Domain.INFRASTRUCTURE: ["semgrep"],
40
+ Domain.UNKNOWN: ["semgrep"],
41
+ }
42
+
43
+ # Severity ordering for threshold comparison
44
+ SEVERITY_ORDER = {
45
+ Severity.CRITICAL: 0,
46
+ Severity.HIGH: 1,
47
+ Severity.MEDIUM: 2,
48
+ Severity.LOW: 3,
49
+ Severity.INFO: 4,
50
+ }
51
+
52
+ # =============================================================================
53
+ # Secrets Detection (built-in, no external tool needed)
54
+ # =============================================================================
55
+
56
+ # Patterns for sensitive files that should never be committed
57
+ SENSITIVE_FILE_PATTERNS = [
58
+ # Environment files
59
+ (r"\.env$", "Environment file"),
60
+ (r"\.env\.[^.]+$", "Environment file"), # .env.local, .env.production
61
+ (r"\.envrc$", "direnv file"),
62
+ # Private keys and certificates
63
+ (r"\.(pem|key|p12|pfx|jks)$", "Private key/certificate"),
64
+ # SSH keys
65
+ (r"(^|/)id_(rsa|ed25519|ecdsa|dsa)$", "SSH private key"),
66
+ # Keystores (crypto wallets)
67
+ (r"keystore", "Keystore file"),
68
+ (r"\.keyfile$", "Key file"),
69
+ # Credentials files
70
+ (r"credentials.*\.json$", "Credentials file"),
71
+ (r"secrets.*\.json$", "Secrets file"),
72
+ (r"service.account.*\.json$", "Service account file"),
73
+ # Generic secrets
74
+ (r"\.(secret|secrets)$", "Secret file"),
75
+ ]
76
+
77
+ # Compile patterns for performance
78
+ _SENSITIVE_PATTERNS = [(re.compile(p, re.IGNORECASE), desc) for p, desc in SENSITIVE_FILE_PATTERNS]
79
+
80
+
81
+ def check_sensitive_files(file_paths: list[str]) -> list[ToolFinding]:
82
+ """Check for sensitive files that shouldn't be committed."""
83
+ findings: list[ToolFinding] = []
84
+
85
+ for file_path in file_paths:
86
+ # Skip .example files
87
+ if file_path.endswith(".example"):
88
+ continue
89
+
90
+ for pattern, description in _SENSITIVE_PATTERNS:
91
+ if pattern.search(file_path):
92
+ findings.append(
93
+ ToolFinding(
94
+ tool="crucible",
95
+ rule="sensitive-file",
96
+ severity=Severity.CRITICAL,
97
+ message=f"{description} should not be committed: {file_path}",
98
+ location=file_path,
99
+ suggestion="Add to .gitignore or use .example suffix",
100
+ )
101
+ )
102
+ break # One match per file is enough
103
+
104
+ return findings
105
+
106
+
107
+ # =============================================================================
108
+ # Configuration
109
+ # =============================================================================
110
+
111
+ @dataclass(frozen=True)
112
+ class PrecommitConfig:
113
+ """Pre-commit hook configuration."""
114
+
115
+ fail_on: Severity = Severity.HIGH
116
+ timeout: int = 120
117
+ exclude: tuple[str, ...] = ()
118
+ include_context: bool = False
119
+ # Per-domain tool overrides
120
+ tools: dict[Domain, list[str]] = field(default_factory=dict)
121
+ # Skip specific tools globally
122
+ skip_tools: tuple[str, ...] = ()
123
+ # Verbose output
124
+ verbose: bool = False
125
+ # Secrets detection: "auto" (gitleaks if available, else builtin), "gitleaks", "builtin", or "none"
126
+ secrets_tool: str = "auto"
127
+
128
+
129
+ @dataclass(frozen=True)
130
+ class PrecommitResult:
131
+ """Result of a pre-commit check."""
132
+
133
+ passed: bool
134
+ findings: tuple[ToolFinding, ...]
135
+ blocked_files: tuple[str, ...] # Files blocked by secrets check
136
+ severity_counts: dict[str, int]
137
+ files_checked: int
138
+ error: str | None = None
139
+
140
+
141
+ def load_precommit_config(repo_path: str | None = None) -> PrecommitConfig:
142
+ """Load pre-commit config with cascade priority."""
143
+ config_data: dict = {}
144
+
145
+ # Try project-level first
146
+ project_config = Path(repo_path) / CONFIG_PROJECT if repo_path else CONFIG_PROJECT
147
+
148
+ if project_config.exists():
149
+ try:
150
+ with open(project_config) as f:
151
+ config_data = yaml.safe_load(f) or {}
152
+ except (yaml.YAMLError, OSError):
153
+ pass
154
+
155
+ # Fall back to user-level
156
+ if not config_data and CONFIG_USER.exists():
157
+ try:
158
+ with open(CONFIG_USER) as f:
159
+ config_data = yaml.safe_load(f) or {}
160
+ except (yaml.YAMLError, OSError):
161
+ pass
162
+
163
+ # Parse config
164
+ fail_on_str = config_data.get("fail_on", "high").lower()
165
+ fail_on = _parse_severity(fail_on_str)
166
+
167
+ timeout = config_data.get("timeout", 120)
168
+ exclude = tuple(config_data.get("exclude", []))
169
+ include_context = config_data.get("include_context", False)
170
+ skip_tools = tuple(config_data.get("skip_tools", []))
171
+ verbose = config_data.get("verbose", False)
172
+
173
+ # Handle secrets_tool config (supports old skip_secrets_check for backwards compat)
174
+ if config_data.get("skip_secrets_check", False):
175
+ secrets_tool = "none"
176
+ else:
177
+ secrets_tool = config_data.get("secrets_tool", "auto")
178
+
179
+ # Parse per-domain tools
180
+ tools: dict[Domain, list[str]] = {}
181
+ tools_config = config_data.get("tools", {})
182
+ for domain_str, tool_list in tools_config.items():
183
+ try:
184
+ domain = Domain(domain_str)
185
+ tools[domain] = list(tool_list)
186
+ except ValueError:
187
+ domain = _domain_from_string(domain_str)
188
+ if domain:
189
+ tools[domain] = list(tool_list)
190
+
191
+ return PrecommitConfig(
192
+ fail_on=fail_on,
193
+ timeout=timeout,
194
+ exclude=exclude,
195
+ include_context=include_context,
196
+ tools=tools,
197
+ skip_tools=skip_tools,
198
+ verbose=verbose,
199
+ secrets_tool=secrets_tool,
200
+ )
201
+
202
+
203
+ def _parse_severity(s: str) -> Severity:
204
+ """Parse severity string to Severity enum."""
205
+ mapping = {
206
+ "critical": Severity.CRITICAL,
207
+ "high": Severity.HIGH,
208
+ "medium": Severity.MEDIUM,
209
+ "low": Severity.LOW,
210
+ "info": Severity.INFO,
211
+ }
212
+ return mapping.get(s.lower(), Severity.HIGH)
213
+
214
+
215
+ def _domain_from_string(s: str) -> Domain | None:
216
+ """Map common strings to Domain enum."""
217
+ mapping = {
218
+ "solidity": Domain.SMART_CONTRACT,
219
+ "smart_contract": Domain.SMART_CONTRACT,
220
+ "python": Domain.BACKEND,
221
+ "backend": Domain.BACKEND,
222
+ "frontend": Domain.FRONTEND,
223
+ "react": Domain.FRONTEND,
224
+ "typescript": Domain.FRONTEND,
225
+ "infrastructure": Domain.INFRASTRUCTURE,
226
+ "terraform": Domain.INFRASTRUCTURE,
227
+ }
228
+ return mapping.get(s.lower())
229
+
230
+
231
+ # =============================================================================
232
+ # Domain Detection & Tool Selection
233
+ # =============================================================================
234
+
235
+ def _detect_domain_from_path(path: str) -> tuple[Domain, list[str]]:
236
+ """Detect domain from file path extension."""
237
+ if path.endswith(".sol"):
238
+ return Domain.SMART_CONTRACT, ["solidity", "smart_contract", "web3"]
239
+ elif path.endswith(".vy"):
240
+ return Domain.SMART_CONTRACT, ["vyper", "smart_contract", "web3"]
241
+ elif path.endswith(".py"):
242
+ return Domain.BACKEND, ["python", "backend"]
243
+ elif path.endswith((".ts", ".tsx")):
244
+ return Domain.FRONTEND, ["typescript", "frontend"]
245
+ elif path.endswith((".js", ".jsx")):
246
+ return Domain.FRONTEND, ["javascript", "frontend"]
247
+ elif path.endswith(".go"):
248
+ return Domain.BACKEND, ["go", "backend"]
249
+ elif path.endswith(".rs"):
250
+ return Domain.BACKEND, ["rust", "backend"]
251
+ elif path.endswith((".tf", ".yaml", ".yml")):
252
+ return Domain.INFRASTRUCTURE, ["infrastructure", "devops"]
253
+ else:
254
+ return Domain.UNKNOWN, []
255
+
256
+
257
+ def _get_tools_for_file(file_path: str, config: PrecommitConfig) -> list[str]:
258
+ """Get the tools to run for a file based on its domain."""
259
+ domain, _ = _detect_domain_from_path(file_path)
260
+
261
+ if domain in config.tools:
262
+ tools = config.tools[domain]
263
+ else:
264
+ tools = DEFAULT_TOOLS.get(domain, DEFAULT_TOOLS[Domain.UNKNOWN])
265
+
266
+ return [t for t in tools if t not in config.skip_tools]
267
+
268
+
269
+ def _should_exclude(file_path: str, exclude_patterns: tuple[str, ...]) -> bool:
270
+ """Check if a file should be excluded based on patterns."""
271
+ return any(fnmatch(file_path, pattern) for pattern in exclude_patterns)
272
+
273
+
274
+ def _severity_meets_threshold(severity: Severity, threshold: Severity) -> bool:
275
+ """Check if a severity meets or exceeds the threshold."""
276
+ return SEVERITY_ORDER[severity] <= SEVERITY_ORDER[threshold]
277
+
278
+
279
+ # =============================================================================
280
+ # Finding Filtering
281
+ # =============================================================================
282
+
283
+ def _filter_findings_to_staged(
284
+ findings: list[ToolFinding],
285
+ context: GitContext,
286
+ include_context: bool = False,
287
+ ) -> list[ToolFinding]:
288
+ """Filter findings to only those in staged lines."""
289
+ changed_ranges: dict[str, list[tuple[int, int]]] = {}
290
+ for change in context.changes:
291
+ if change.status == "D":
292
+ continue
293
+ ranges = [(r.start, r.end) for r in change.added_lines]
294
+ changed_ranges[change.path] = ranges
295
+
296
+ context_lines = 5 if include_context else 0
297
+ filtered: list[ToolFinding] = []
298
+
299
+ for finding in findings:
300
+ parts = finding.location.split(":")
301
+ if len(parts) < 2:
302
+ # No line number - include if file matches (e.g., sensitive file check)
303
+ file_path = parts[0]
304
+ for changed_file in changed_ranges:
305
+ if file_path.endswith(changed_file) or changed_file.endswith(file_path):
306
+ filtered.append(finding)
307
+ break
308
+ continue
309
+
310
+ file_path = parts[0]
311
+ try:
312
+ line_num = int(parts[1])
313
+ except ValueError:
314
+ continue
315
+
316
+ matching_file = None
317
+ for changed_file in changed_ranges:
318
+ if file_path.endswith(changed_file) or changed_file.endswith(file_path):
319
+ matching_file = changed_file
320
+ break
321
+
322
+ if not matching_file:
323
+ continue
324
+
325
+ ranges = changed_ranges[matching_file]
326
+ for start, end in ranges:
327
+ if start - context_lines <= line_num <= end + context_lines:
328
+ filtered.append(finding)
329
+ break
330
+
331
+ return filtered
332
+
333
+
334
+ # =============================================================================
335
+ # Main Entry Point
336
+ # =============================================================================
337
+
338
+ def run_precommit(
339
+ repo_path: str | None = None,
340
+ config: PrecommitConfig | None = None,
341
+ ) -> PrecommitResult:
342
+ """
343
+ Run pre-commit checks on staged changes.
344
+
345
+ Checks performed:
346
+ 1. Secrets/sensitive file detection (built-in, always runs first)
347
+ 2. Static analysis via delegated tools (semgrep, ruff, bandit, slither)
348
+
349
+ Args:
350
+ repo_path: Repository path (defaults to cwd)
351
+ config: Pre-commit config (loads from file if not provided)
352
+
353
+ Returns:
354
+ PrecommitResult with pass/fail status and findings
355
+ """
356
+ # Get repo root
357
+ path = repo_path or os.getcwd()
358
+ root_result = get_repo_root(path)
359
+ if root_result.is_err:
360
+ return PrecommitResult(
361
+ passed=False,
362
+ findings=(),
363
+ blocked_files=(),
364
+ severity_counts={},
365
+ files_checked=0,
366
+ error=root_result.error,
367
+ )
368
+ repo_root = root_result.value
369
+
370
+ # Load config if not provided
371
+ if config is None:
372
+ config = load_precommit_config(repo_root)
373
+
374
+ # Get staged changes
375
+ context_result = get_staged_changes(repo_root)
376
+ if context_result.is_err:
377
+ return PrecommitResult(
378
+ passed=False,
379
+ findings=(),
380
+ blocked_files=(),
381
+ severity_counts={},
382
+ files_checked=0,
383
+ error=context_result.error,
384
+ )
385
+ context = context_result.value
386
+
387
+ # Get changed files (excluding deleted)
388
+ changed_files = get_changed_files(context)
389
+ if not changed_files:
390
+ return PrecommitResult(
391
+ passed=True,
392
+ findings=(),
393
+ blocked_files=(),
394
+ severity_counts={},
395
+ files_checked=0,
396
+ )
397
+
398
+ all_findings: list[ToolFinding] = []
399
+ blocked_files: list[str] = []
400
+
401
+ # Step 1: Secrets detection (configurable tool)
402
+ secrets_findings: list[ToolFinding] = []
403
+
404
+ if config.secrets_tool != "none":
405
+ use_gitleaks = False
406
+
407
+ if config.secrets_tool == "gitleaks":
408
+ use_gitleaks = True
409
+ elif config.secrets_tool == "auto":
410
+ # Use gitleaks if available, otherwise builtin
411
+ gitleaks_status = check_tool("gitleaks")
412
+ use_gitleaks = gitleaks_status.installed
413
+
414
+ if use_gitleaks:
415
+ # Delegate to gitleaks
416
+ gitleaks_result = delegate_gitleaks(repo_root, staged_only=True, timeout=config.timeout)
417
+ if gitleaks_result.is_ok:
418
+ secrets_findings = gitleaks_result.value
419
+ # If gitleaks fails, fall back to builtin (unless explicitly configured)
420
+ elif config.secrets_tool == "auto":
421
+ secrets_findings = check_sensitive_files(changed_files)
422
+ else:
423
+ # Use built-in detection
424
+ secrets_findings = check_sensitive_files(changed_files)
425
+
426
+ if secrets_findings:
427
+ blocked_files = [f.location for f in secrets_findings]
428
+ all_findings.extend(secrets_findings)
429
+ # Fail fast on secrets - don't run other tools
430
+ return PrecommitResult(
431
+ passed=False,
432
+ findings=tuple(all_findings),
433
+ blocked_files=tuple(blocked_files),
434
+ severity_counts={"critical": len(secrets_findings)},
435
+ files_checked=len(changed_files),
436
+ )
437
+
438
+ # Step 2: Filter excluded files for static analysis
439
+ files_to_check = [
440
+ f for f in changed_files if not _should_exclude(f, config.exclude)
441
+ ]
442
+
443
+ if not files_to_check:
444
+ return PrecommitResult(
445
+ passed=True,
446
+ findings=(),
447
+ blocked_files=(),
448
+ severity_counts={},
449
+ files_checked=0,
450
+ )
451
+
452
+ # Step 3: Run static analysis on each file
453
+ for file_path in files_to_check:
454
+ full_path = f"{repo_root}/{file_path}"
455
+ tools = _get_tools_for_file(file_path, config)
456
+ domain, _ = _detect_domain_from_path(file_path)
457
+
458
+ for tool in tools:
459
+ result: Result[list[ToolFinding], str]
460
+
461
+ if tool == "semgrep":
462
+ semgrep_config = get_semgrep_config(domain)
463
+ result = delegate_semgrep(full_path, semgrep_config, config.timeout)
464
+ elif tool == "ruff":
465
+ result = delegate_ruff(full_path, config.timeout)
466
+ elif tool == "bandit":
467
+ result = delegate_bandit(full_path, config.timeout)
468
+ elif tool == "slither":
469
+ result = delegate_slither(full_path, timeout=config.timeout)
470
+ else:
471
+ continue
472
+
473
+ if result.is_ok:
474
+ all_findings.extend(result.value)
475
+
476
+ # Step 4: Filter to staged lines only
477
+ filtered_findings = _filter_findings_to_staged(
478
+ all_findings, context, config.include_context
479
+ )
480
+
481
+ # Count severities
482
+ severity_counts: dict[str, int] = {}
483
+ for f in filtered_findings:
484
+ sev = f.severity.value
485
+ severity_counts[sev] = severity_counts.get(sev, 0) + 1
486
+
487
+ # Check if any finding meets threshold
488
+ passed = True
489
+ for finding in filtered_findings:
490
+ if _severity_meets_threshold(finding.severity, config.fail_on):
491
+ passed = False
492
+ break
493
+
494
+ return PrecommitResult(
495
+ passed=passed,
496
+ findings=tuple(filtered_findings),
497
+ blocked_files=(),
498
+ severity_counts=severity_counts,
499
+ files_checked=len(files_to_check),
500
+ )
501
+
502
+
503
+ # =============================================================================
504
+ # Output Formatting
505
+ # =============================================================================
506
+
507
+ def format_precommit_output(result: PrecommitResult, verbose: bool = False) -> str:
508
+ """Format pre-commit result for terminal output."""
509
+ lines: list[str] = []
510
+
511
+ if result.error:
512
+ lines.append(f"Error: {result.error}")
513
+ return "\n".join(lines)
514
+
515
+ # Blocked files (sensitive files detected)
516
+ if result.blocked_files:
517
+ lines.append("BLOCKED: Sensitive files detected in staged changes:")
518
+ lines.append("")
519
+ for f in result.blocked_files:
520
+ lines.append(f" - {f}")
521
+ lines.append("")
522
+ lines.append("Remove these files from staging or add to .gitignore")
523
+ lines.append("")
524
+ lines.append("Pre-commit: FAILED")
525
+ return "\n".join(lines)
526
+
527
+ if not result.findings:
528
+ lines.append(f"Checked {result.files_checked} file(s) - no issues found")
529
+ return "\n".join(lines)
530
+
531
+ # Header
532
+ total = len(result.findings)
533
+ lines.append(f"Found {total} issue(s) in {result.files_checked} file(s):")
534
+ lines.append("")
535
+
536
+ # Severity summary
537
+ for sev in ["critical", "high", "medium", "low", "info"]:
538
+ count = result.severity_counts.get(sev, 0)
539
+ if count > 0:
540
+ lines.append(f" {sev.upper()}: {count}")
541
+
542
+ lines.append("")
543
+
544
+ # Findings
545
+ if verbose:
546
+ for sev in [Severity.CRITICAL, Severity.HIGH, Severity.MEDIUM, Severity.LOW, Severity.INFO]:
547
+ sev_findings = [f for f in result.findings if f.severity == sev]
548
+ if not sev_findings:
549
+ continue
550
+
551
+ lines.append(f"[{sev.value.upper()}]")
552
+ for f in sev_findings:
553
+ lines.append(f" {f.location}")
554
+ lines.append(f" {f.rule}: {f.message}")
555
+ if f.suggestion:
556
+ lines.append(f" Fix: {f.suggestion}")
557
+ lines.append("")
558
+ else:
559
+ # Compact: just show high+ findings
560
+ for f in result.findings:
561
+ if f.severity in (Severity.CRITICAL, Severity.HIGH):
562
+ lines.append(f" [{f.severity.value.upper()}] {f.location}")
563
+ lines.append(f" {f.rule}: {f.message}")
564
+
565
+ # Status
566
+ lines.append("")
567
+ if result.passed:
568
+ lines.append("Pre-commit: PASSED (findings below threshold)")
569
+ else:
570
+ lines.append("Pre-commit: FAILED")
571
+
572
+ return "\n".join(lines)
573
+
574
+
575
+ # Exit codes
576
+ EXIT_PASS = 0
577
+ EXIT_FAIL = 1
578
+ EXIT_ERROR = 2
579
+
580
+
581
+ def main() -> int:
582
+ """CLI entry point for pre-commit hook."""
583
+ import argparse
584
+ import json
585
+
586
+ parser = argparse.ArgumentParser(
587
+ prog="crucible-precommit",
588
+ description="Run pre-commit checks on staged changes",
589
+ )
590
+ parser.add_argument(
591
+ "--fail-on",
592
+ choices=["critical", "high", "medium", "low", "info"],
593
+ help="Fail on findings at or above this severity (default: high)",
594
+ )
595
+ parser.add_argument(
596
+ "--verbose", "-v",
597
+ action="store_true",
598
+ help="Show all findings, not just high+",
599
+ )
600
+ parser.add_argument(
601
+ "--json",
602
+ action="store_true",
603
+ help="Output as JSON",
604
+ )
605
+ parser.add_argument(
606
+ "path",
607
+ nargs="?",
608
+ default=".",
609
+ help="Repository path (default: current directory)",
610
+ )
611
+
612
+ args = parser.parse_args()
613
+
614
+ # Load config and apply CLI overrides
615
+ config = load_precommit_config(args.path)
616
+
617
+ if args.fail_on:
618
+ config = PrecommitConfig(
619
+ fail_on=_parse_severity(args.fail_on),
620
+ timeout=config.timeout,
621
+ exclude=config.exclude,
622
+ include_context=config.include_context,
623
+ tools=config.tools,
624
+ skip_tools=config.skip_tools,
625
+ verbose=args.verbose or config.verbose,
626
+ skip_secrets_check=config.skip_secrets_check,
627
+ )
628
+
629
+ result = run_precommit(args.path, config)
630
+
631
+ if args.json:
632
+ output = {
633
+ "passed": result.passed,
634
+ "findings": [
635
+ {
636
+ "tool": f.tool,
637
+ "rule": f.rule,
638
+ "severity": f.severity.value,
639
+ "message": f.message,
640
+ "location": f.location,
641
+ "suggestion": f.suggestion,
642
+ }
643
+ for f in result.findings
644
+ ],
645
+ "blocked_files": list(result.blocked_files),
646
+ "severity_counts": result.severity_counts,
647
+ "files_checked": result.files_checked,
648
+ "error": result.error,
649
+ }
650
+ print(json.dumps(output, indent=2))
651
+ else:
652
+ print(format_precommit_output(result, args.verbose or config.verbose))
653
+
654
+ if result.error:
655
+ return EXIT_ERROR
656
+ return EXIT_PASS if result.passed else EXIT_FAIL
657
+
658
+
659
+ if __name__ == "__main__":
660
+ sys.exit(main())