kekkai-cli 1.1.0__py3-none-any.whl → 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
kekkai/cli.py CHANGED
@@ -58,6 +58,11 @@ def main(argv: Sequence[str] | None = None) -> int:
58
58
  init_parser = subparsers.add_parser("init", help="initialize config and directories")
59
59
  init_parser.add_argument("--config", type=str, help="Path to config file")
60
60
  init_parser.add_argument("--force", action="store_true", help="Overwrite existing config")
61
+ init_parser.add_argument(
62
+ "--ci",
63
+ action="store_true",
64
+ help="Auto-generate GitHub Actions workflow for CI/CD integration",
65
+ )
61
66
 
62
67
  scan_parser = subparsers.add_parser("scan", help="run a scan pipeline")
63
68
  scan_parser.add_argument("--config", type=str, help="Path to config file")
@@ -147,7 +152,7 @@ def main(argv: Sequence[str] | None = None) -> int:
147
152
  help="Minimum severity for PR comments (default: medium)",
148
153
  )
149
154
 
150
- dojo_parser = subparsers.add_parser("dojo", help="manage local DefectDojo stack")
155
+ dojo_parser = subparsers.add_parser("dojo", help=argparse.SUPPRESS)
151
156
  dojo_subparsers = dojo_parser.add_subparsers(dest="dojo_command")
152
157
 
153
158
  dojo_up = dojo_subparsers.add_parser("up", help="start the local DefectDojo stack")
@@ -378,7 +383,7 @@ def main(argv: Sequence[str] | None = None) -> int:
378
383
 
379
384
  parsed = parser.parse_args(args)
380
385
  if parsed.command == "init":
381
- return _command_init(parsed.config, parsed.force)
386
+ return _command_init(parsed.config, parsed.force, parsed.ci)
382
387
  if parsed.command == "scan":
383
388
  return _command_scan(
384
389
  parsed.config,
@@ -427,7 +432,7 @@ def _handle_no_args() -> int:
427
432
  return 0
428
433
 
429
434
 
430
- def _command_init(config_override: str | None, force: bool) -> int:
435
+ def _command_init(config_override: str | None, force: bool, ci: bool = False) -> int:
431
436
  cfg_path = _resolve_config_path(config_override)
432
437
  if cfg_path.exists() and not force:
433
438
  print(f"Config already exists at {cfg_path}. Use --force to overwrite.")
@@ -441,6 +446,27 @@ def _command_init(config_override: str | None, force: bool) -> int:
441
446
  cfg_path.write_text(load_config_text(base_dir))
442
447
  print_dashboard()
443
448
  console.print(f"\n[success]Initialized config at[/success] [cyan]{cfg_path}[/cyan]\n")
449
+
450
+ # Auto-generate GitHub Actions workflow if --ci flag is set
451
+ if ci:
452
+ workflow_created = _generate_github_workflow()
453
+ if workflow_created:
454
+ console.print(
455
+ "[success]✓[/success] Created GitHub Actions workflow: "
456
+ "[cyan].github/workflows/kekkai-security.yml[/cyan]"
457
+ )
458
+ console.print(
459
+ "\n[info]Next steps:[/info]\n"
460
+ " 1. Commit the workflow file:\n"
461
+ " [dim]git add .github/workflows/kekkai-security.yml[/dim]\n"
462
+ " 2. Push to GitHub\n"
463
+ " 3. Security scans will run automatically on pull requests\n"
464
+ )
465
+ else:
466
+ console.print(
467
+ "[warning]⚠[/warning] Not a Git repository or .github/workflows/ cannot be created"
468
+ )
469
+
444
470
  return 0
445
471
 
446
472
 
@@ -604,6 +630,36 @@ def _command_scan(
604
630
  )
605
631
  manifest.write_manifest(run_dir / "run.json", run_manifest)
606
632
 
633
+ # Generate unified report (aggregates all scanner findings)
634
+ if scan_results:
635
+ from .report.unified import UnifiedReportError, generate_unified_report
636
+
637
+ # Determine output path for unified report
638
+ if output_path:
639
+ # --output flag provided: use it for unified report
640
+ unified_report_path = Path(output_path).expanduser().resolve()
641
+ # Security: Validate path (ASVS V5.3.3)
642
+ if not is_within_base(base_dir, unified_report_path):
643
+ # Allow explicit paths outside base_dir, but warn
644
+ console.print(
645
+ f"[warning]Writing outside kekkai home: {unified_report_path}[/warning]"
646
+ )
647
+ else:
648
+ # Default: save in run directory
649
+ unified_report_path = run_dir / "kekkai-report.json"
650
+
651
+ try:
652
+ generate_unified_report(
653
+ scan_results=scan_results,
654
+ output_path=unified_report_path,
655
+ run_id=run_id,
656
+ commit_sha=commit_sha,
657
+ )
658
+ console.print(f"[success]Unified report:[/success] {unified_report_path}")
659
+ except UnifiedReportError as e:
660
+ err_msg = sanitize_error(str(e))
661
+ console.print(f"[warning]Failed to generate unified report: {err_msg}[/warning]")
662
+
607
663
  # Collect all findings for policy evaluation
608
664
  all_findings: list[Finding] = []
609
665
  scan_errors: list[str] = []
@@ -822,6 +878,26 @@ def _resolve_github_repo(override: str | None) -> tuple[str | None, str | None]:
822
878
  return None, None
823
879
 
824
880
 
881
+ def _normalize_scanner_name(stem: str) -> str:
882
+ """Normalize filename stem to scanner name.
883
+
884
+ Strips the "-results" suffix from scanner output filenames.
885
+
886
+ Examples:
887
+ gitleaks-results -> gitleaks
888
+ trivy-results -> trivy
889
+ semgrep-results -> semgrep
890
+ custom-scanner -> custom-scanner
891
+
892
+ Args:
893
+ stem: File stem (name without extension).
894
+
895
+ Returns:
896
+ Normalized scanner name.
897
+ """
898
+ return stem.removesuffix("-results")
899
+
900
+
825
901
  def _create_scanner(
826
902
  name: str,
827
903
  zap_target_url: str | None = None,
@@ -1106,22 +1182,57 @@ def _threatflow_banner() -> str:
1106
1182
  def _command_triage(parsed: argparse.Namespace) -> int:
1107
1183
  """Run interactive triage TUI."""
1108
1184
  from .triage import run_triage
1185
+ from .triage.loader import load_findings_from_path
1109
1186
 
1110
1187
  input_path_str = cast(str | None, getattr(parsed, "input", None))
1111
1188
  output_path_str = cast(str | None, getattr(parsed, "output", None))
1112
1189
 
1113
- input_path = Path(input_path_str).expanduser().resolve() if input_path_str else None
1114
- output_path = Path(output_path_str).expanduser().resolve() if output_path_str else None
1190
+ # Default to latest run if no input specified
1191
+ if not input_path_str:
1192
+ runs_dir = app_base_dir() / "runs"
1193
+ if runs_dir.exists():
1194
+ run_dirs = sorted(
1195
+ [d for d in runs_dir.iterdir() if d.is_dir()],
1196
+ key=lambda d: d.stat().st_mtime,
1197
+ )
1198
+ if run_dirs:
1199
+ input_path = run_dirs[-1]
1200
+ console.print(f"[info]Using latest run: {input_path.name}[/info]\n")
1201
+ else:
1202
+ console.print("[danger]No scan runs found. Run 'kekkai scan' first.[/danger]")
1203
+ return 1
1204
+ else:
1205
+ console.print("[danger]No scan runs found. Run 'kekkai scan' first.[/danger]")
1206
+ return 1
1207
+ else:
1208
+ input_path = Path(input_path_str).expanduser().resolve()
1115
1209
 
1116
- if input_path and not input_path.exists():
1117
- console.print(f"[danger]Error:[/danger] Input file not found: {input_path}")
1210
+ if not input_path.exists():
1211
+ console.print(f"[danger]Error:[/danger] Input not found: {input_path}")
1118
1212
  return 1
1119
1213
 
1214
+ output_path = Path(output_path_str).expanduser().resolve() if output_path_str else None
1215
+
1120
1216
  console.print("[bold cyan]Kekkai Triage[/bold cyan] - Interactive Finding Review")
1121
1217
  console.print("Use j/k to navigate, f=false positive, c=confirmed, d=deferred")
1122
1218
  console.print("Press Ctrl+S to save, q to quit\n")
1123
1219
 
1124
- return run_triage(input_path=input_path, output_path=output_path)
1220
+ # Use new loader that supports raw scanner outputs
1221
+ findings, errors = load_findings_from_path(input_path)
1222
+
1223
+ if errors:
1224
+ console.print("[warning]Warnings:[/warning]")
1225
+ for err in errors[:5]: # Limit to first 5
1226
+ console.print(f" - {err}")
1227
+ console.print()
1228
+
1229
+ if not findings:
1230
+ console.print("[warning]No findings to triage.[/warning]")
1231
+ return 0
1232
+
1233
+ console.print(f"[info]Loaded {len(findings)} finding(s)[/info]\n")
1234
+
1235
+ return run_triage(findings=findings, output_path=output_path)
1125
1236
 
1126
1237
 
1127
1238
  def _command_fix(parsed: argparse.Namespace) -> int:
@@ -1414,9 +1525,13 @@ def _command_upload(parsed: argparse.Namespace) -> int:
1414
1525
  console.print(f"Product: {product_name}")
1415
1526
  console.print(f"Engagement: {engagement_name}")
1416
1527
 
1417
- # Find and load scan results
1418
- scan_files = list(run_dir.glob("*.json"))
1419
- scan_files = [f for f in scan_files if f.name not in ("run.json", "policy-result.json")]
1528
+ # Find and load scan results - prefer *-results.json first
1529
+ scan_files = sorted(run_dir.glob("*-results.json"))
1530
+ if not scan_files:
1531
+ # Fallback to all JSON (excluding metadata files)
1532
+ scan_files = sorted(
1533
+ [f for f in run_dir.glob("*.json") if f.name not in ("run.json", "policy-result.json")]
1534
+ )
1420
1535
 
1421
1536
  if not scan_files:
1422
1537
  console.print(f"[danger]Error:[/danger] No scan results found in {run_dir}")
@@ -1429,35 +1544,39 @@ def _command_upload(parsed: argparse.Namespace) -> int:
1429
1544
  scanners_map: dict[str, Scanner] = {}
1430
1545
 
1431
1546
  for scan_file in scan_files:
1432
- scanner_name = scan_file.stem # e.g., "trivy", "semgrep", "gitleaks"
1547
+ # Normalize scanner name: "gitleaks-results" -> "gitleaks"
1548
+ scanner_name = _normalize_scanner_name(scan_file.stem)
1433
1549
  console.print(f" Loading {scanner_name}...")
1434
1550
 
1551
+ # Load raw JSON
1435
1552
  try:
1436
- with scan_file.open() as f:
1437
- data = _json.load(f)
1438
- except _json.JSONDecodeError as e:
1553
+ raw_text = scan_file.read_text(encoding="utf-8")
1554
+ _json.loads(raw_text) # Validate JSON syntax
1555
+ except (OSError, _json.JSONDecodeError) as e:
1439
1556
  console.print(f" [warning]Skipped (invalid JSON): {e}[/warning]")
1440
1557
  continue
1441
1558
 
1442
- # Parse findings based on format
1443
- findings = _parse_findings_from_json(data)
1444
-
1445
- if findings:
1446
- scan_results.append(
1447
- ScanResult(
1448
- scanner=scanner_name,
1449
- success=True,
1450
- findings=findings,
1451
- raw_output_path=scan_file,
1452
- duration_ms=0,
1453
- )
1559
+ # Create scanner and use canonical parser
1560
+ scanner = _create_scanner(scanner_name)
1561
+ if not scanner:
1562
+ console.print(" [warning]Skipped (unknown scanner)[/warning]")
1563
+ continue
1564
+
1565
+ # Use canonical scanner parser (reuses validated logic)
1566
+ findings = scanner.parse(raw_text)
1567
+
1568
+ scan_results.append(
1569
+ ScanResult(
1570
+ scanner=scanner.name, # Use canonical scanner name
1571
+ success=True,
1572
+ findings=findings,
1573
+ raw_output_path=scan_file,
1574
+ duration_ms=0,
1454
1575
  )
1455
- # Create scanner instance for import
1456
- scanner = _create_scanner(scanner_name)
1457
- if scanner:
1458
- scanners_map[scanner_name] = scanner
1576
+ )
1577
+ scanners_map[scanner.name] = scanner
1459
1578
 
1460
- console.print(f" {len(findings)} findings")
1579
+ console.print(f" {len(findings)} finding(s)")
1461
1580
 
1462
1581
  if not scan_results:
1463
1582
  console.print("[danger]Error:[/danger] No valid scan results to upload")
@@ -1493,11 +1612,9 @@ def _command_upload(parsed: argparse.Namespace) -> int:
1493
1612
  )
1494
1613
 
1495
1614
  success_count = 0
1496
- scanner_names_list = list(scanners_map.keys())
1497
1615
  for idx, ir in enumerate(import_results):
1498
- scanner_label = (
1499
- scanner_names_list[idx] if idx < len(scanner_names_list) else f"scanner-{idx}"
1500
- )
1616
+ # Label based on actual scan_results order (not scanners_map keys)
1617
+ scanner_label = scan_results[idx].scanner if idx < len(scan_results) else f"scanner-{idx}"
1501
1618
  if ir.success:
1502
1619
  success_count += 1
1503
1620
  console.print(
@@ -1661,6 +1778,91 @@ def _resolve_dojo_open_port(parsed: argparse.Namespace, compose_root: Path) -> i
1661
1778
  return dojo.DEFAULT_PORT
1662
1779
 
1663
1780
 
1781
+ def _generate_github_workflow() -> bool:
1782
+ """Generate GitHub Actions workflow file for security scanning.
1783
+
1784
+ Returns:
1785
+ True if workflow was created successfully, False otherwise.
1786
+ """
1787
+ # Check if we're in a git repository
1788
+ cwd = Path.cwd()
1789
+ git_dir = cwd / ".git"
1790
+ if not git_dir.exists():
1791
+ return False
1792
+
1793
+ # Create .github/workflows directory
1794
+ workflows_dir = cwd / ".github" / "workflows"
1795
+ try:
1796
+ workflows_dir.mkdir(parents=True, exist_ok=True)
1797
+ except (OSError, PermissionError):
1798
+ return False
1799
+
1800
+ # Generate workflow file
1801
+ workflow_path = workflows_dir / "kekkai-security.yml"
1802
+ if workflow_path.exists():
1803
+ # Don't overwrite existing workflow
1804
+ return False
1805
+
1806
+ workflow_content = """name: Kekkai Security Scan
1807
+
1808
+ on:
1809
+ pull_request:
1810
+ types: [opened, synchronize, reopened]
1811
+ push:
1812
+ branches:
1813
+ - main
1814
+ - develop
1815
+
1816
+ permissions:
1817
+ contents: read
1818
+ pull-requests: write
1819
+
1820
+ jobs:
1821
+ security-scan:
1822
+ runs-on: ubuntu-latest
1823
+ steps:
1824
+ - name: Checkout code
1825
+ uses: actions/checkout@v4
1826
+
1827
+ - name: Set up Python
1828
+ uses: actions/setup-python@v5
1829
+ with:
1830
+ python-version: '3.12'
1831
+
1832
+ - name: Install Kekkai
1833
+ run: |
1834
+ python -m pip install --upgrade pip
1835
+ pip install kekkai-cli
1836
+
1837
+ - name: Run Security Scan
1838
+ run: |
1839
+ kekkai scan --ci --fail-on high
1840
+ continue-on-error: true
1841
+
1842
+ - name: Post PR Comments (if PR)
1843
+ if: github.event_name == 'pull_request'
1844
+ run: |
1845
+ kekkai scan --pr-comment --max-comments 50
1846
+ env:
1847
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
1848
+ continue-on-error: true
1849
+
1850
+ - name: Upload Results
1851
+ uses: actions/upload-artifact@v4
1852
+ if: always()
1853
+ with:
1854
+ name: kekkai-scan-results
1855
+ path: ~/.kekkai/runs/*/
1856
+ retention-days: 30
1857
+ """
1858
+
1859
+ try:
1860
+ workflow_path.write_text(workflow_content, encoding="utf-8")
1861
+ return True
1862
+ except (OSError, PermissionError):
1863
+ return False
1864
+
1865
+
1664
1866
  def _resolve_config_path(config_override: str | None) -> Path:
1665
1867
  if config_override:
1666
1868
  return Path(config_override).expanduser().resolve()
kekkai/dojo_import.py CHANGED
@@ -61,7 +61,15 @@ class DojoClient:
61
61
 
62
62
  try:
63
63
  with urlopen(req, timeout=self._timeout) as resp: # noqa: S310 # nosec B310
64
- return json.loads(resp.read().decode()) if resp.read else {}
64
+ raw_bytes = resp.read() # Call once and store result
65
+ if not raw_bytes: # Check bytes, not method
66
+ return {}
67
+ try:
68
+ result: dict[str, Any] = json.loads(raw_bytes.decode())
69
+ return result
70
+ except json.JSONDecodeError:
71
+ # Empty or invalid JSON response - return empty dict
72
+ return {}
65
73
  except HTTPError as exc:
66
74
  error_body = exc.read().decode() if exc.fp else str(exc)
67
75
  raise RuntimeError(f"Dojo API error {exc.code}: {error_body}") from exc
kekkai/output.py CHANGED
@@ -57,7 +57,7 @@ BANNER_ASCII = r"""
57
57
  /_/\_\\___/_/\_/_/\_\\_,_/_/
58
58
  """
59
59
 
60
- VERSION = "1.1.0"
60
+ VERSION = "2.0.0"
61
61
 
62
62
 
63
63
  def print_dashboard() -> None:
@@ -88,9 +88,8 @@ def print_dashboard() -> None:
88
88
  menu_table.add_column("Description", style="desc", ratio=3)
89
89
 
90
90
  menu_table.add_row("kekkai scan", "Run security scan in current directory")
91
- menu_table.add_row("kekkai threatflow", "Generate AI-powered threat model")
92
- menu_table.add_row("kekkai dojo", "Manage local DefectDojo instance")
93
91
  menu_table.add_row("kekkai triage", "Interactive finding review (TUI)")
92
+ menu_table.add_row("kekkai threatflow", "Generate AI-powered threat model")
94
93
  menu_table.add_row("kekkai report", "Generate compliance reports")
95
94
  menu_table.add_row("kekkai config", "Manage settings and keys")
96
95
 
@@ -0,0 +1,226 @@
1
+ """Unified report generation for Kekkai scan results.
2
+
3
+ Aggregates findings from multiple scanners into a single JSON report
4
+ with security-hardened validation and resource limits (ASVS V10.3.3).
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import contextlib
10
+ import json
11
+ import os
12
+ import tempfile
13
+ from datetime import UTC, datetime
14
+ from pathlib import Path
15
+ from typing import TYPE_CHECKING, Any
16
+
17
+ from kekkai_core import redact
18
+
19
+ if TYPE_CHECKING:
20
+ from ..scanners.base import Finding, ScanResult
21
+
22
+ __all__ = [
23
+ "generate_unified_report",
24
+ "UnifiedReportError",
25
+ ]
26
+
27
+ # Security limits per ASVS V10.3.3 (DoS mitigation)
28
+ MAX_FINDINGS_PER_SCANNER = 10_000
29
+ MAX_TOTAL_FINDINGS = 50_000
30
+ MAX_JSON_SIZE_MB = 100
31
+
32
+
33
+ class UnifiedReportError(Exception):
34
+ """Error during unified report generation."""
35
+
36
+
37
+ def generate_unified_report(
38
+ scan_results: list[ScanResult],
39
+ output_path: Path,
40
+ run_id: str,
41
+ commit_sha: str | None = None,
42
+ ) -> dict[str, Any]:
43
+ """Generate unified kekkai-report.json from scan results.
44
+
45
+ Aggregates findings from all scanners with security controls:
46
+ - Resource limits (ASVS V10.3.3): 10k findings/scanner, 50k total
47
+ - Sensitive data redaction (ASVS V8.3.4)
48
+ - Atomic writes with safe permissions (ASVS V12.3.1)
49
+ - Path validation (ASVS V5.3.3)
50
+
51
+ Args:
52
+ scan_results: List of scanner results to aggregate.
53
+ output_path: Path to write unified report JSON.
54
+ run_id: Unique run identifier.
55
+ commit_sha: Optional git commit SHA.
56
+
57
+ Returns:
58
+ Report data dictionary.
59
+
60
+ Raises:
61
+ UnifiedReportError: If report generation fails.
62
+ """
63
+ # Aggregate findings with limits
64
+ all_findings: list[dict[str, Any]] = []
65
+ scanner_metadata: dict[str, dict[str, Any]] = {}
66
+ warnings: list[str] = []
67
+
68
+ for scan_res in scan_results:
69
+ if not scan_res.success:
70
+ scanner_metadata[scan_res.scanner] = {
71
+ "success": False,
72
+ "error": scan_res.error,
73
+ "findings_count": 0,
74
+ "duration_ms": scan_res.duration_ms,
75
+ }
76
+ continue
77
+
78
+ # Apply per-scanner limit (DoS mitigation)
79
+ findings = scan_res.findings[:MAX_FINDINGS_PER_SCANNER]
80
+ if len(scan_res.findings) > MAX_FINDINGS_PER_SCANNER:
81
+ warnings.append(
82
+ f"{scan_res.scanner}: truncated {len(scan_res.findings)} findings "
83
+ f"to {MAX_FINDINGS_PER_SCANNER} (limit)"
84
+ )
85
+
86
+ for finding in findings:
87
+ if len(all_findings) >= MAX_TOTAL_FINDINGS:
88
+ warnings.append(
89
+ f"Reached max total findings limit ({MAX_TOTAL_FINDINGS}), stopping aggregation"
90
+ )
91
+ break
92
+
93
+ # Convert to dict with redaction (ASVS V8.3.4)
94
+ all_findings.append(_finding_to_dict(finding))
95
+
96
+ scanner_metadata[scan_res.scanner] = {
97
+ "success": scan_res.success,
98
+ "findings_count": len(findings),
99
+ "duration_ms": scan_res.duration_ms,
100
+ }
101
+
102
+ # Build report structure
103
+ report: dict[str, Any] = {
104
+ "version": "1.0.0",
105
+ "generated_at": datetime.now(UTC).isoformat(),
106
+ "run_id": run_id,
107
+ "commit_sha": commit_sha,
108
+ "scan_metadata": scanner_metadata,
109
+ "summary": _build_summary(all_findings),
110
+ "findings": all_findings,
111
+ }
112
+
113
+ if warnings:
114
+ report["warnings"] = warnings
115
+
116
+ # Write atomically (ASVS V12.3.1)
117
+ try:
118
+ _write_report_atomic(output_path, report)
119
+ except Exception as exc:
120
+ # ASVS V7.4.1: Don't leak full path in error
121
+ raise UnifiedReportError(f"Failed to write report: {exc}") from exc
122
+
123
+ return report
124
+
125
+
126
+ def _finding_to_dict(finding: Finding) -> dict[str, Any]:
127
+ """Convert Finding to dictionary with redaction.
128
+
129
+ Args:
130
+ finding: Scanner finding object.
131
+
132
+ Returns:
133
+ Dictionary with redacted sensitive fields.
134
+ """
135
+ return {
136
+ "id": finding.dedupe_hash(),
137
+ "scanner": finding.scanner,
138
+ "title": redact(finding.title),
139
+ "severity": finding.severity.value,
140
+ "description": redact(finding.description),
141
+ "file_path": finding.file_path,
142
+ "line": finding.line,
143
+ "rule_id": finding.rule_id,
144
+ "cwe": finding.cwe,
145
+ "cve": finding.cve,
146
+ "package_name": finding.package_name,
147
+ "package_version": finding.package_version,
148
+ "fixed_version": finding.fixed_version,
149
+ }
150
+
151
+
152
+ def _build_summary(findings: list[dict[str, Any]]) -> dict[str, int]:
153
+ """Build summary statistics from findings.
154
+
155
+ Args:
156
+ findings: List of finding dictionaries.
157
+
158
+ Returns:
159
+ Summary with total and severity counts.
160
+ """
161
+ summary = {
162
+ "total_findings": len(findings),
163
+ "critical": 0,
164
+ "high": 0,
165
+ "medium": 0,
166
+ "low": 0,
167
+ "info": 0,
168
+ "unknown": 0,
169
+ }
170
+
171
+ for finding in findings:
172
+ severity = finding.get("severity", "unknown")
173
+ if severity in summary:
174
+ summary[severity] += 1
175
+ else:
176
+ summary["unknown"] += 1
177
+
178
+ return summary
179
+
180
+
181
+ def _write_report_atomic(path: Path, data: dict[str, Any]) -> None:
182
+ """Write JSON report atomically with permission checks.
183
+
184
+ Security controls:
185
+ - Size validation before writing (ASVS V10.3.3)
186
+ - Atomic write via temp file + rename (ASVS V12.3.1)
187
+ - Safe file permissions (0o644)
188
+
189
+ Args:
190
+ path: Output file path.
191
+ data: Report data to serialize.
192
+
193
+ Raises:
194
+ ValueError: If report exceeds size limit.
195
+ OSError: If write fails.
196
+ """
197
+ # Ensure parent directory exists
198
+ path.parent.mkdir(parents=True, exist_ok=True)
199
+
200
+ # Serialize and check size (ASVS V10.3.3)
201
+ json_str = json.dumps(data, indent=2, ensure_ascii=False)
202
+ size_mb = len(json_str.encode("utf-8")) / (1024 * 1024)
203
+ if size_mb > MAX_JSON_SIZE_MB:
204
+ raise ValueError(f"Report too large: {size_mb:.1f}MB > {MAX_JSON_SIZE_MB}MB")
205
+
206
+ # Atomic write: temp file + rename (ASVS V12.3.1)
207
+ temp_fd, temp_path_str = tempfile.mkstemp(
208
+ dir=str(path.parent), prefix=".kekkai-report-", suffix=".json.tmp"
209
+ )
210
+ temp_path = Path(temp_path_str)
211
+
212
+ try:
213
+ # Write to temp file
214
+ os.write(temp_fd, json_str.encode("utf-8"))
215
+ os.close(temp_fd)
216
+
217
+ # Set safe permissions (rw-r--r--)
218
+ os.chmod(temp_path, 0o644)
219
+
220
+ # Atomic rename
221
+ temp_path.rename(path)
222
+ except Exception:
223
+ # Clean up temp file on error
224
+ with contextlib.suppress(OSError):
225
+ temp_path.unlink()
226
+ raise