kekkai-cli 1.1.0__py3-none-any.whl → 1.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kekkai/cli.py +124 -33
- kekkai/dojo_import.py +9 -1
- kekkai/output.py +1 -1
- kekkai/report/unified.py +226 -0
- kekkai/triage/__init__.py +54 -1
- kekkai/triage/loader.py +196 -0
- {kekkai_cli-1.1.0.dist-info → kekkai_cli-1.1.1.dist-info}/METADATA +33 -13
- {kekkai_cli-1.1.0.dist-info → kekkai_cli-1.1.1.dist-info}/RECORD +11 -27
- {kekkai_cli-1.1.0.dist-info → kekkai_cli-1.1.1.dist-info}/entry_points.txt +0 -1
- {kekkai_cli-1.1.0.dist-info → kekkai_cli-1.1.1.dist-info}/top_level.txt +0 -1
- portal/__init__.py +0 -19
- portal/api.py +0 -155
- portal/auth.py +0 -103
- portal/enterprise/__init__.py +0 -45
- portal/enterprise/audit.py +0 -435
- portal/enterprise/licensing.py +0 -408
- portal/enterprise/rbac.py +0 -276
- portal/enterprise/saml.py +0 -595
- portal/ops/__init__.py +0 -53
- portal/ops/backup.py +0 -553
- portal/ops/log_shipper.py +0 -469
- portal/ops/monitoring.py +0 -517
- portal/ops/restore.py +0 -469
- portal/ops/secrets.py +0 -408
- portal/ops/upgrade.py +0 -591
- portal/tenants.py +0 -340
- portal/uploads.py +0 -259
- portal/web.py +0 -393
- {kekkai_cli-1.1.0.dist-info → kekkai_cli-1.1.1.dist-info}/WHEEL +0 -0
kekkai/cli.py
CHANGED
|
@@ -604,6 +604,36 @@ def _command_scan(
|
|
|
604
604
|
)
|
|
605
605
|
manifest.write_manifest(run_dir / "run.json", run_manifest)
|
|
606
606
|
|
|
607
|
+
# Generate unified report (aggregates all scanner findings)
|
|
608
|
+
if scan_results:
|
|
609
|
+
from .report.unified import UnifiedReportError, generate_unified_report
|
|
610
|
+
|
|
611
|
+
# Determine output path for unified report
|
|
612
|
+
if output_path:
|
|
613
|
+
# --output flag provided: use it for unified report
|
|
614
|
+
unified_report_path = Path(output_path).expanduser().resolve()
|
|
615
|
+
# Security: Validate path (ASVS V5.3.3)
|
|
616
|
+
if not is_within_base(base_dir, unified_report_path):
|
|
617
|
+
# Allow explicit paths outside base_dir, but warn
|
|
618
|
+
console.print(
|
|
619
|
+
f"[warning]Writing outside kekkai home: {unified_report_path}[/warning]"
|
|
620
|
+
)
|
|
621
|
+
else:
|
|
622
|
+
# Default: save in run directory
|
|
623
|
+
unified_report_path = run_dir / "kekkai-report.json"
|
|
624
|
+
|
|
625
|
+
try:
|
|
626
|
+
generate_unified_report(
|
|
627
|
+
scan_results=scan_results,
|
|
628
|
+
output_path=unified_report_path,
|
|
629
|
+
run_id=run_id,
|
|
630
|
+
commit_sha=commit_sha,
|
|
631
|
+
)
|
|
632
|
+
console.print(f"[success]Unified report:[/success] {unified_report_path}")
|
|
633
|
+
except UnifiedReportError as e:
|
|
634
|
+
err_msg = sanitize_error(str(e))
|
|
635
|
+
console.print(f"[warning]Failed to generate unified report: {err_msg}[/warning]")
|
|
636
|
+
|
|
607
637
|
# Collect all findings for policy evaluation
|
|
608
638
|
all_findings: list[Finding] = []
|
|
609
639
|
scan_errors: list[str] = []
|
|
@@ -822,6 +852,26 @@ def _resolve_github_repo(override: str | None) -> tuple[str | None, str | None]:
|
|
|
822
852
|
return None, None
|
|
823
853
|
|
|
824
854
|
|
|
855
|
+
def _normalize_scanner_name(stem: str) -> str:
|
|
856
|
+
"""Normalize filename stem to scanner name.
|
|
857
|
+
|
|
858
|
+
Strips the "-results" suffix from scanner output filenames.
|
|
859
|
+
|
|
860
|
+
Examples:
|
|
861
|
+
gitleaks-results -> gitleaks
|
|
862
|
+
trivy-results -> trivy
|
|
863
|
+
semgrep-results -> semgrep
|
|
864
|
+
custom-scanner -> custom-scanner
|
|
865
|
+
|
|
866
|
+
Args:
|
|
867
|
+
stem: File stem (name without extension).
|
|
868
|
+
|
|
869
|
+
Returns:
|
|
870
|
+
Normalized scanner name.
|
|
871
|
+
"""
|
|
872
|
+
return stem.removesuffix("-results")
|
|
873
|
+
|
|
874
|
+
|
|
825
875
|
def _create_scanner(
|
|
826
876
|
name: str,
|
|
827
877
|
zap_target_url: str | None = None,
|
|
@@ -1106,22 +1156,57 @@ def _threatflow_banner() -> str:
|
|
|
1106
1156
|
def _command_triage(parsed: argparse.Namespace) -> int:
|
|
1107
1157
|
"""Run interactive triage TUI."""
|
|
1108
1158
|
from .triage import run_triage
|
|
1159
|
+
from .triage.loader import load_findings_from_path
|
|
1109
1160
|
|
|
1110
1161
|
input_path_str = cast(str | None, getattr(parsed, "input", None))
|
|
1111
1162
|
output_path_str = cast(str | None, getattr(parsed, "output", None))
|
|
1112
1163
|
|
|
1113
|
-
|
|
1114
|
-
|
|
1164
|
+
# Default to latest run if no input specified
|
|
1165
|
+
if not input_path_str:
|
|
1166
|
+
runs_dir = app_base_dir() / "runs"
|
|
1167
|
+
if runs_dir.exists():
|
|
1168
|
+
run_dirs = sorted(
|
|
1169
|
+
[d for d in runs_dir.iterdir() if d.is_dir()],
|
|
1170
|
+
key=lambda d: d.stat().st_mtime,
|
|
1171
|
+
)
|
|
1172
|
+
if run_dirs:
|
|
1173
|
+
input_path = run_dirs[-1]
|
|
1174
|
+
console.print(f"[info]Using latest run: {input_path.name}[/info]\n")
|
|
1175
|
+
else:
|
|
1176
|
+
console.print("[danger]No scan runs found. Run 'kekkai scan' first.[/danger]")
|
|
1177
|
+
return 1
|
|
1178
|
+
else:
|
|
1179
|
+
console.print("[danger]No scan runs found. Run 'kekkai scan' first.[/danger]")
|
|
1180
|
+
return 1
|
|
1181
|
+
else:
|
|
1182
|
+
input_path = Path(input_path_str).expanduser().resolve()
|
|
1115
1183
|
|
|
1116
|
-
if
|
|
1117
|
-
console.print(f"[danger]Error:[/danger] Input
|
|
1184
|
+
if not input_path.exists():
|
|
1185
|
+
console.print(f"[danger]Error:[/danger] Input not found: {input_path}")
|
|
1118
1186
|
return 1
|
|
1119
1187
|
|
|
1188
|
+
output_path = Path(output_path_str).expanduser().resolve() if output_path_str else None
|
|
1189
|
+
|
|
1120
1190
|
console.print("[bold cyan]Kekkai Triage[/bold cyan] - Interactive Finding Review")
|
|
1121
1191
|
console.print("Use j/k to navigate, f=false positive, c=confirmed, d=deferred")
|
|
1122
1192
|
console.print("Press Ctrl+S to save, q to quit\n")
|
|
1123
1193
|
|
|
1124
|
-
|
|
1194
|
+
# Use new loader that supports raw scanner outputs
|
|
1195
|
+
findings, errors = load_findings_from_path(input_path)
|
|
1196
|
+
|
|
1197
|
+
if errors:
|
|
1198
|
+
console.print("[warning]Warnings:[/warning]")
|
|
1199
|
+
for err in errors[:5]: # Limit to first 5
|
|
1200
|
+
console.print(f" - {err}")
|
|
1201
|
+
console.print()
|
|
1202
|
+
|
|
1203
|
+
if not findings:
|
|
1204
|
+
console.print("[warning]No findings to triage.[/warning]")
|
|
1205
|
+
return 0
|
|
1206
|
+
|
|
1207
|
+
console.print(f"[info]Loaded {len(findings)} finding(s)[/info]\n")
|
|
1208
|
+
|
|
1209
|
+
return run_triage(findings=findings, output_path=output_path)
|
|
1125
1210
|
|
|
1126
1211
|
|
|
1127
1212
|
def _command_fix(parsed: argparse.Namespace) -> int:
|
|
@@ -1414,9 +1499,13 @@ def _command_upload(parsed: argparse.Namespace) -> int:
|
|
|
1414
1499
|
console.print(f"Product: {product_name}")
|
|
1415
1500
|
console.print(f"Engagement: {engagement_name}")
|
|
1416
1501
|
|
|
1417
|
-
# Find and load scan results
|
|
1418
|
-
scan_files =
|
|
1419
|
-
|
|
1502
|
+
# Find and load scan results - prefer *-results.json first
|
|
1503
|
+
scan_files = sorted(run_dir.glob("*-results.json"))
|
|
1504
|
+
if not scan_files:
|
|
1505
|
+
# Fallback to all JSON (excluding metadata files)
|
|
1506
|
+
scan_files = sorted(
|
|
1507
|
+
[f for f in run_dir.glob("*.json") if f.name not in ("run.json", "policy-result.json")]
|
|
1508
|
+
)
|
|
1420
1509
|
|
|
1421
1510
|
if not scan_files:
|
|
1422
1511
|
console.print(f"[danger]Error:[/danger] No scan results found in {run_dir}")
|
|
@@ -1429,35 +1518,39 @@ def _command_upload(parsed: argparse.Namespace) -> int:
|
|
|
1429
1518
|
scanners_map: dict[str, Scanner] = {}
|
|
1430
1519
|
|
|
1431
1520
|
for scan_file in scan_files:
|
|
1432
|
-
|
|
1521
|
+
# Normalize scanner name: "gitleaks-results" -> "gitleaks"
|
|
1522
|
+
scanner_name = _normalize_scanner_name(scan_file.stem)
|
|
1433
1523
|
console.print(f" Loading {scanner_name}...")
|
|
1434
1524
|
|
|
1525
|
+
# Load raw JSON
|
|
1435
1526
|
try:
|
|
1436
|
-
|
|
1437
|
-
|
|
1438
|
-
except _json.JSONDecodeError as e:
|
|
1527
|
+
raw_text = scan_file.read_text(encoding="utf-8")
|
|
1528
|
+
_json.loads(raw_text) # Validate JSON syntax
|
|
1529
|
+
except (OSError, _json.JSONDecodeError) as e:
|
|
1439
1530
|
console.print(f" [warning]Skipped (invalid JSON): {e}[/warning]")
|
|
1440
1531
|
continue
|
|
1441
1532
|
|
|
1442
|
-
#
|
|
1443
|
-
|
|
1444
|
-
|
|
1445
|
-
|
|
1446
|
-
|
|
1447
|
-
|
|
1448
|
-
|
|
1449
|
-
|
|
1450
|
-
|
|
1451
|
-
|
|
1452
|
-
|
|
1453
|
-
|
|
1533
|
+
# Create scanner and use canonical parser
|
|
1534
|
+
scanner = _create_scanner(scanner_name)
|
|
1535
|
+
if not scanner:
|
|
1536
|
+
console.print(" [warning]Skipped (unknown scanner)[/warning]")
|
|
1537
|
+
continue
|
|
1538
|
+
|
|
1539
|
+
# Use canonical scanner parser (reuses validated logic)
|
|
1540
|
+
findings = scanner.parse(raw_text)
|
|
1541
|
+
|
|
1542
|
+
scan_results.append(
|
|
1543
|
+
ScanResult(
|
|
1544
|
+
scanner=scanner.name, # Use canonical scanner name
|
|
1545
|
+
success=True,
|
|
1546
|
+
findings=findings,
|
|
1547
|
+
raw_output_path=scan_file,
|
|
1548
|
+
duration_ms=0,
|
|
1454
1549
|
)
|
|
1455
|
-
|
|
1456
|
-
|
|
1457
|
-
if scanner:
|
|
1458
|
-
scanners_map[scanner_name] = scanner
|
|
1550
|
+
)
|
|
1551
|
+
scanners_map[scanner.name] = scanner
|
|
1459
1552
|
|
|
1460
|
-
|
|
1553
|
+
console.print(f" {len(findings)} finding(s)")
|
|
1461
1554
|
|
|
1462
1555
|
if not scan_results:
|
|
1463
1556
|
console.print("[danger]Error:[/danger] No valid scan results to upload")
|
|
@@ -1493,11 +1586,9 @@ def _command_upload(parsed: argparse.Namespace) -> int:
|
|
|
1493
1586
|
)
|
|
1494
1587
|
|
|
1495
1588
|
success_count = 0
|
|
1496
|
-
scanner_names_list = list(scanners_map.keys())
|
|
1497
1589
|
for idx, ir in enumerate(import_results):
|
|
1498
|
-
|
|
1499
|
-
|
|
1500
|
-
)
|
|
1590
|
+
# Label based on actual scan_results order (not scanners_map keys)
|
|
1591
|
+
scanner_label = scan_results[idx].scanner if idx < len(scan_results) else f"scanner-{idx}"
|
|
1501
1592
|
if ir.success:
|
|
1502
1593
|
success_count += 1
|
|
1503
1594
|
console.print(
|
kekkai/dojo_import.py
CHANGED
|
@@ -61,7 +61,15 @@ class DojoClient:
|
|
|
61
61
|
|
|
62
62
|
try:
|
|
63
63
|
with urlopen(req, timeout=self._timeout) as resp: # noqa: S310 # nosec B310
|
|
64
|
-
|
|
64
|
+
raw_bytes = resp.read() # Call once and store result
|
|
65
|
+
if not raw_bytes: # Check bytes, not method
|
|
66
|
+
return {}
|
|
67
|
+
try:
|
|
68
|
+
result: dict[str, Any] = json.loads(raw_bytes.decode())
|
|
69
|
+
return result
|
|
70
|
+
except json.JSONDecodeError:
|
|
71
|
+
# Empty or invalid JSON response - return empty dict
|
|
72
|
+
return {}
|
|
65
73
|
except HTTPError as exc:
|
|
66
74
|
error_body = exc.read().decode() if exc.fp else str(exc)
|
|
67
75
|
raise RuntimeError(f"Dojo API error {exc.code}: {error_body}") from exc
|
kekkai/output.py
CHANGED
kekkai/report/unified.py
ADDED
|
@@ -0,0 +1,226 @@
|
|
|
1
|
+
"""Unified report generation for Kekkai scan results.
|
|
2
|
+
|
|
3
|
+
Aggregates findings from multiple scanners into a single JSON report
|
|
4
|
+
with security-hardened validation and resource limits (ASVS V10.3.3).
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import contextlib
|
|
10
|
+
import json
|
|
11
|
+
import os
|
|
12
|
+
import tempfile
|
|
13
|
+
from datetime import UTC, datetime
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
from typing import TYPE_CHECKING, Any
|
|
16
|
+
|
|
17
|
+
from kekkai_core import redact
|
|
18
|
+
|
|
19
|
+
if TYPE_CHECKING:
|
|
20
|
+
from ..scanners.base import Finding, ScanResult
|
|
21
|
+
|
|
22
|
+
__all__ = [
|
|
23
|
+
"generate_unified_report",
|
|
24
|
+
"UnifiedReportError",
|
|
25
|
+
]
|
|
26
|
+
|
|
27
|
+
# Security limits per ASVS V10.3.3 (DoS mitigation)
|
|
28
|
+
MAX_FINDINGS_PER_SCANNER = 10_000
|
|
29
|
+
MAX_TOTAL_FINDINGS = 50_000
|
|
30
|
+
MAX_JSON_SIZE_MB = 100
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class UnifiedReportError(Exception):
|
|
34
|
+
"""Error during unified report generation."""
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def generate_unified_report(
|
|
38
|
+
scan_results: list[ScanResult],
|
|
39
|
+
output_path: Path,
|
|
40
|
+
run_id: str,
|
|
41
|
+
commit_sha: str | None = None,
|
|
42
|
+
) -> dict[str, Any]:
|
|
43
|
+
"""Generate unified kekkai-report.json from scan results.
|
|
44
|
+
|
|
45
|
+
Aggregates findings from all scanners with security controls:
|
|
46
|
+
- Resource limits (ASVS V10.3.3): 10k findings/scanner, 50k total
|
|
47
|
+
- Sensitive data redaction (ASVS V8.3.4)
|
|
48
|
+
- Atomic writes with safe permissions (ASVS V12.3.1)
|
|
49
|
+
- Path validation (ASVS V5.3.3)
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
scan_results: List of scanner results to aggregate.
|
|
53
|
+
output_path: Path to write unified report JSON.
|
|
54
|
+
run_id: Unique run identifier.
|
|
55
|
+
commit_sha: Optional git commit SHA.
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
Report data dictionary.
|
|
59
|
+
|
|
60
|
+
Raises:
|
|
61
|
+
UnifiedReportError: If report generation fails.
|
|
62
|
+
"""
|
|
63
|
+
# Aggregate findings with limits
|
|
64
|
+
all_findings: list[dict[str, Any]] = []
|
|
65
|
+
scanner_metadata: dict[str, dict[str, Any]] = {}
|
|
66
|
+
warnings: list[str] = []
|
|
67
|
+
|
|
68
|
+
for scan_res in scan_results:
|
|
69
|
+
if not scan_res.success:
|
|
70
|
+
scanner_metadata[scan_res.scanner] = {
|
|
71
|
+
"success": False,
|
|
72
|
+
"error": scan_res.error,
|
|
73
|
+
"findings_count": 0,
|
|
74
|
+
"duration_ms": scan_res.duration_ms,
|
|
75
|
+
}
|
|
76
|
+
continue
|
|
77
|
+
|
|
78
|
+
# Apply per-scanner limit (DoS mitigation)
|
|
79
|
+
findings = scan_res.findings[:MAX_FINDINGS_PER_SCANNER]
|
|
80
|
+
if len(scan_res.findings) > MAX_FINDINGS_PER_SCANNER:
|
|
81
|
+
warnings.append(
|
|
82
|
+
f"{scan_res.scanner}: truncated {len(scan_res.findings)} findings "
|
|
83
|
+
f"to {MAX_FINDINGS_PER_SCANNER} (limit)"
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
for finding in findings:
|
|
87
|
+
if len(all_findings) >= MAX_TOTAL_FINDINGS:
|
|
88
|
+
warnings.append(
|
|
89
|
+
f"Reached max total findings limit ({MAX_TOTAL_FINDINGS}), stopping aggregation"
|
|
90
|
+
)
|
|
91
|
+
break
|
|
92
|
+
|
|
93
|
+
# Convert to dict with redaction (ASVS V8.3.4)
|
|
94
|
+
all_findings.append(_finding_to_dict(finding))
|
|
95
|
+
|
|
96
|
+
scanner_metadata[scan_res.scanner] = {
|
|
97
|
+
"success": scan_res.success,
|
|
98
|
+
"findings_count": len(findings),
|
|
99
|
+
"duration_ms": scan_res.duration_ms,
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
# Build report structure
|
|
103
|
+
report: dict[str, Any] = {
|
|
104
|
+
"version": "1.0.0",
|
|
105
|
+
"generated_at": datetime.now(UTC).isoformat(),
|
|
106
|
+
"run_id": run_id,
|
|
107
|
+
"commit_sha": commit_sha,
|
|
108
|
+
"scan_metadata": scanner_metadata,
|
|
109
|
+
"summary": _build_summary(all_findings),
|
|
110
|
+
"findings": all_findings,
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
if warnings:
|
|
114
|
+
report["warnings"] = warnings
|
|
115
|
+
|
|
116
|
+
# Write atomically (ASVS V12.3.1)
|
|
117
|
+
try:
|
|
118
|
+
_write_report_atomic(output_path, report)
|
|
119
|
+
except Exception as exc:
|
|
120
|
+
# ASVS V7.4.1: Don't leak full path in error
|
|
121
|
+
raise UnifiedReportError(f"Failed to write report: {exc}") from exc
|
|
122
|
+
|
|
123
|
+
return report
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def _finding_to_dict(finding: Finding) -> dict[str, Any]:
|
|
127
|
+
"""Convert Finding to dictionary with redaction.
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
finding: Scanner finding object.
|
|
131
|
+
|
|
132
|
+
Returns:
|
|
133
|
+
Dictionary with redacted sensitive fields.
|
|
134
|
+
"""
|
|
135
|
+
return {
|
|
136
|
+
"id": finding.dedupe_hash(),
|
|
137
|
+
"scanner": finding.scanner,
|
|
138
|
+
"title": redact(finding.title),
|
|
139
|
+
"severity": finding.severity.value,
|
|
140
|
+
"description": redact(finding.description),
|
|
141
|
+
"file_path": finding.file_path,
|
|
142
|
+
"line": finding.line,
|
|
143
|
+
"rule_id": finding.rule_id,
|
|
144
|
+
"cwe": finding.cwe,
|
|
145
|
+
"cve": finding.cve,
|
|
146
|
+
"package_name": finding.package_name,
|
|
147
|
+
"package_version": finding.package_version,
|
|
148
|
+
"fixed_version": finding.fixed_version,
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
def _build_summary(findings: list[dict[str, Any]]) -> dict[str, int]:
|
|
153
|
+
"""Build summary statistics from findings.
|
|
154
|
+
|
|
155
|
+
Args:
|
|
156
|
+
findings: List of finding dictionaries.
|
|
157
|
+
|
|
158
|
+
Returns:
|
|
159
|
+
Summary with total and severity counts.
|
|
160
|
+
"""
|
|
161
|
+
summary = {
|
|
162
|
+
"total_findings": len(findings),
|
|
163
|
+
"critical": 0,
|
|
164
|
+
"high": 0,
|
|
165
|
+
"medium": 0,
|
|
166
|
+
"low": 0,
|
|
167
|
+
"info": 0,
|
|
168
|
+
"unknown": 0,
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
for finding in findings:
|
|
172
|
+
severity = finding.get("severity", "unknown")
|
|
173
|
+
if severity in summary:
|
|
174
|
+
summary[severity] += 1
|
|
175
|
+
else:
|
|
176
|
+
summary["unknown"] += 1
|
|
177
|
+
|
|
178
|
+
return summary
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
def _write_report_atomic(path: Path, data: dict[str, Any]) -> None:
|
|
182
|
+
"""Write JSON report atomically with permission checks.
|
|
183
|
+
|
|
184
|
+
Security controls:
|
|
185
|
+
- Size validation before writing (ASVS V10.3.3)
|
|
186
|
+
- Atomic write via temp file + rename (ASVS V12.3.1)
|
|
187
|
+
- Safe file permissions (0o644)
|
|
188
|
+
|
|
189
|
+
Args:
|
|
190
|
+
path: Output file path.
|
|
191
|
+
data: Report data to serialize.
|
|
192
|
+
|
|
193
|
+
Raises:
|
|
194
|
+
ValueError: If report exceeds size limit.
|
|
195
|
+
OSError: If write fails.
|
|
196
|
+
"""
|
|
197
|
+
# Ensure parent directory exists
|
|
198
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
199
|
+
|
|
200
|
+
# Serialize and check size (ASVS V10.3.3)
|
|
201
|
+
json_str = json.dumps(data, indent=2, ensure_ascii=False)
|
|
202
|
+
size_mb = len(json_str.encode("utf-8")) / (1024 * 1024)
|
|
203
|
+
if size_mb > MAX_JSON_SIZE_MB:
|
|
204
|
+
raise ValueError(f"Report too large: {size_mb:.1f}MB > {MAX_JSON_SIZE_MB}MB")
|
|
205
|
+
|
|
206
|
+
# Atomic write: temp file + rename (ASVS V12.3.1)
|
|
207
|
+
temp_fd, temp_path_str = tempfile.mkstemp(
|
|
208
|
+
dir=str(path.parent), prefix=".kekkai-report-", suffix=".json.tmp"
|
|
209
|
+
)
|
|
210
|
+
temp_path = Path(temp_path_str)
|
|
211
|
+
|
|
212
|
+
try:
|
|
213
|
+
# Write to temp file
|
|
214
|
+
os.write(temp_fd, json_str.encode("utf-8"))
|
|
215
|
+
os.close(temp_fd)
|
|
216
|
+
|
|
217
|
+
# Set safe permissions (rw-r--r--)
|
|
218
|
+
os.chmod(temp_path, 0o644)
|
|
219
|
+
|
|
220
|
+
# Atomic rename
|
|
221
|
+
temp_path.rename(path)
|
|
222
|
+
except Exception:
|
|
223
|
+
# Clean up temp file on error
|
|
224
|
+
with contextlib.suppress(OSError):
|
|
225
|
+
temp_path.unlink()
|
|
226
|
+
raise
|
kekkai/triage/__init__.py
CHANGED
|
@@ -4,9 +4,18 @@ Provides a terminal-based interface for reviewing findings,
|
|
|
4
4
|
marking false positives, and generating .kekkaiignore files.
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
|
-
from
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from typing import TYPE_CHECKING
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from collections.abc import Sequence
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
|
|
15
|
+
# Import models and utilities (no heavy dependencies)
|
|
8
16
|
from .audit import AuditEntry, TriageAuditLog, log_decisions
|
|
9
17
|
from .ignore import IgnoreEntry, IgnoreFile, IgnorePatternValidator, ValidationError
|
|
18
|
+
from .loader import load_findings_from_path
|
|
10
19
|
from .models import (
|
|
11
20
|
FindingEntry,
|
|
12
21
|
Severity,
|
|
@@ -15,6 +24,49 @@ from .models import (
|
|
|
15
24
|
load_findings_from_json,
|
|
16
25
|
)
|
|
17
26
|
|
|
27
|
+
|
|
28
|
+
def run_triage(
|
|
29
|
+
input_path: Path | None = None,
|
|
30
|
+
output_path: Path | None = None,
|
|
31
|
+
findings: Sequence[FindingEntry] | None = None,
|
|
32
|
+
) -> int:
|
|
33
|
+
"""Run the triage TUI (lazy import).
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
input_path: Path to findings JSON file.
|
|
37
|
+
output_path: Path for .kekkaiignore output.
|
|
38
|
+
findings: Pre-loaded findings (alternative to input_path).
|
|
39
|
+
|
|
40
|
+
Returns:
|
|
41
|
+
Exit code (0 for success).
|
|
42
|
+
|
|
43
|
+
Raises:
|
|
44
|
+
RuntimeError: If Textual is not installed.
|
|
45
|
+
"""
|
|
46
|
+
try:
|
|
47
|
+
from .app import run_triage as _run_triage
|
|
48
|
+
|
|
49
|
+
return _run_triage(
|
|
50
|
+
input_path=input_path,
|
|
51
|
+
output_path=output_path,
|
|
52
|
+
findings=findings,
|
|
53
|
+
)
|
|
54
|
+
except ImportError as e:
|
|
55
|
+
raise RuntimeError(
|
|
56
|
+
"Triage TUI requires 'textual'. Install with: pip install textual"
|
|
57
|
+
) from e
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
# Re-export TriageApp for compatibility (lazy)
|
|
61
|
+
def __getattr__(name: str) -> type:
|
|
62
|
+
"""Lazy import for TriageApp."""
|
|
63
|
+
if name == "TriageApp":
|
|
64
|
+
from .app import TriageApp
|
|
65
|
+
|
|
66
|
+
return TriageApp
|
|
67
|
+
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
|
68
|
+
|
|
69
|
+
|
|
18
70
|
__all__ = [
|
|
19
71
|
"TriageApp",
|
|
20
72
|
"run_triage",
|
|
@@ -30,4 +82,5 @@ __all__ = [
|
|
|
30
82
|
"TriageState",
|
|
31
83
|
"Severity",
|
|
32
84
|
"load_findings_from_json",
|
|
85
|
+
"load_findings_from_path",
|
|
33
86
|
]
|