kekkai-cli 1.0.5__py3-none-any.whl → 1.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. kekkai/cli.py +789 -19
  2. kekkai/compliance/__init__.py +68 -0
  3. kekkai/compliance/hipaa.py +235 -0
  4. kekkai/compliance/mappings.py +136 -0
  5. kekkai/compliance/owasp.py +517 -0
  6. kekkai/compliance/owasp_agentic.py +267 -0
  7. kekkai/compliance/pci_dss.py +205 -0
  8. kekkai/compliance/soc2.py +209 -0
  9. kekkai/dojo.py +91 -14
  10. kekkai/dojo_import.py +9 -1
  11. kekkai/fix/__init__.py +47 -0
  12. kekkai/fix/audit.py +278 -0
  13. kekkai/fix/differ.py +427 -0
  14. kekkai/fix/engine.py +500 -0
  15. kekkai/fix/prompts.py +251 -0
  16. kekkai/output.py +10 -12
  17. kekkai/report/__init__.py +41 -0
  18. kekkai/report/compliance_matrix.py +98 -0
  19. kekkai/report/generator.py +365 -0
  20. kekkai/report/html.py +69 -0
  21. kekkai/report/pdf.py +63 -0
  22. kekkai/report/unified.py +226 -0
  23. kekkai/scanners/container.py +33 -3
  24. kekkai/scanners/gitleaks.py +3 -1
  25. kekkai/scanners/semgrep.py +1 -1
  26. kekkai/scanners/trivy.py +1 -1
  27. kekkai/threatflow/model_adapter.py +143 -1
  28. kekkai/triage/__init__.py +54 -1
  29. kekkai/triage/loader.py +196 -0
  30. kekkai_cli-1.1.1.dist-info/METADATA +379 -0
  31. {kekkai_cli-1.0.5.dist-info → kekkai_cli-1.1.1.dist-info}/RECORD +34 -33
  32. {kekkai_cli-1.0.5.dist-info → kekkai_cli-1.1.1.dist-info}/entry_points.txt +0 -1
  33. {kekkai_cli-1.0.5.dist-info → kekkai_cli-1.1.1.dist-info}/top_level.txt +0 -1
  34. kekkai_cli-1.0.5.dist-info/METADATA +0 -135
  35. portal/__init__.py +0 -19
  36. portal/api.py +0 -155
  37. portal/auth.py +0 -103
  38. portal/enterprise/__init__.py +0 -32
  39. portal/enterprise/audit.py +0 -435
  40. portal/enterprise/licensing.py +0 -342
  41. portal/enterprise/rbac.py +0 -276
  42. portal/enterprise/saml.py +0 -595
  43. portal/ops/__init__.py +0 -53
  44. portal/ops/backup.py +0 -553
  45. portal/ops/log_shipper.py +0 -469
  46. portal/ops/monitoring.py +0 -517
  47. portal/ops/restore.py +0 -469
  48. portal/ops/secrets.py +0 -408
  49. portal/ops/upgrade.py +0 -591
  50. portal/tenants.py +0 -340
  51. portal/uploads.py +0 -259
  52. portal/web.py +0 -384
  53. {kekkai_cli-1.0.5.dist-info → kekkai_cli-1.1.1.dist-info}/WHEEL +0 -0
@@ -0,0 +1,226 @@
1
+ """Unified report generation for Kekkai scan results.
2
+
3
+ Aggregates findings from multiple scanners into a single JSON report
4
+ with security-hardened validation and resource limits (ASVS V10.3.3).
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import contextlib
10
+ import json
11
+ import os
12
+ import tempfile
13
+ from datetime import UTC, datetime
14
+ from pathlib import Path
15
+ from typing import TYPE_CHECKING, Any
16
+
17
+ from kekkai_core import redact
18
+
19
+ if TYPE_CHECKING:
20
+ from ..scanners.base import Finding, ScanResult
21
+
22
+ __all__ = [
23
+ "generate_unified_report",
24
+ "UnifiedReportError",
25
+ ]
26
+
27
+ # Security limits per ASVS V10.3.3 (DoS mitigation)
28
+ MAX_FINDINGS_PER_SCANNER = 10_000
29
+ MAX_TOTAL_FINDINGS = 50_000
30
+ MAX_JSON_SIZE_MB = 100
31
+
32
+
33
+ class UnifiedReportError(Exception):
34
+ """Error during unified report generation."""
35
+
36
+
37
+ def generate_unified_report(
38
+ scan_results: list[ScanResult],
39
+ output_path: Path,
40
+ run_id: str,
41
+ commit_sha: str | None = None,
42
+ ) -> dict[str, Any]:
43
+ """Generate unified kekkai-report.json from scan results.
44
+
45
+ Aggregates findings from all scanners with security controls:
46
+ - Resource limits (ASVS V10.3.3): 10k findings/scanner, 50k total
47
+ - Sensitive data redaction (ASVS V8.3.4)
48
+ - Atomic writes with safe permissions (ASVS V12.3.1)
49
+ - Path validation (ASVS V5.3.3)
50
+
51
+ Args:
52
+ scan_results: List of scanner results to aggregate.
53
+ output_path: Path to write unified report JSON.
54
+ run_id: Unique run identifier.
55
+ commit_sha: Optional git commit SHA.
56
+
57
+ Returns:
58
+ Report data dictionary.
59
+
60
+ Raises:
61
+ UnifiedReportError: If report generation fails.
62
+ """
63
+ # Aggregate findings with limits
64
+ all_findings: list[dict[str, Any]] = []
65
+ scanner_metadata: dict[str, dict[str, Any]] = {}
66
+ warnings: list[str] = []
67
+
68
+ for scan_res in scan_results:
69
+ if not scan_res.success:
70
+ scanner_metadata[scan_res.scanner] = {
71
+ "success": False,
72
+ "error": scan_res.error,
73
+ "findings_count": 0,
74
+ "duration_ms": scan_res.duration_ms,
75
+ }
76
+ continue
77
+
78
+ # Apply per-scanner limit (DoS mitigation)
79
+ findings = scan_res.findings[:MAX_FINDINGS_PER_SCANNER]
80
+ if len(scan_res.findings) > MAX_FINDINGS_PER_SCANNER:
81
+ warnings.append(
82
+ f"{scan_res.scanner}: truncated {len(scan_res.findings)} findings "
83
+ f"to {MAX_FINDINGS_PER_SCANNER} (limit)"
84
+ )
85
+
86
+ for finding in findings:
87
+ if len(all_findings) >= MAX_TOTAL_FINDINGS:
88
+ warnings.append(
89
+ f"Reached max total findings limit ({MAX_TOTAL_FINDINGS}), stopping aggregation"
90
+ )
91
+ break
92
+
93
+ # Convert to dict with redaction (ASVS V8.3.4)
94
+ all_findings.append(_finding_to_dict(finding))
95
+
96
+ scanner_metadata[scan_res.scanner] = {
97
+ "success": scan_res.success,
98
+ "findings_count": len(findings),
99
+ "duration_ms": scan_res.duration_ms,
100
+ }
101
+
102
+ # Build report structure
103
+ report: dict[str, Any] = {
104
+ "version": "1.0.0",
105
+ "generated_at": datetime.now(UTC).isoformat(),
106
+ "run_id": run_id,
107
+ "commit_sha": commit_sha,
108
+ "scan_metadata": scanner_metadata,
109
+ "summary": _build_summary(all_findings),
110
+ "findings": all_findings,
111
+ }
112
+
113
+ if warnings:
114
+ report["warnings"] = warnings
115
+
116
+ # Write atomically (ASVS V12.3.1)
117
+ try:
118
+ _write_report_atomic(output_path, report)
119
+ except Exception as exc:
120
+ # ASVS V7.4.1: Don't leak full path in error
121
+ raise UnifiedReportError(f"Failed to write report: {exc}") from exc
122
+
123
+ return report
124
+
125
+
126
+ def _finding_to_dict(finding: Finding) -> dict[str, Any]:
127
+ """Convert Finding to dictionary with redaction.
128
+
129
+ Args:
130
+ finding: Scanner finding object.
131
+
132
+ Returns:
133
+ Dictionary with redacted sensitive fields.
134
+ """
135
+ return {
136
+ "id": finding.dedupe_hash(),
137
+ "scanner": finding.scanner,
138
+ "title": redact(finding.title),
139
+ "severity": finding.severity.value,
140
+ "description": redact(finding.description),
141
+ "file_path": finding.file_path,
142
+ "line": finding.line,
143
+ "rule_id": finding.rule_id,
144
+ "cwe": finding.cwe,
145
+ "cve": finding.cve,
146
+ "package_name": finding.package_name,
147
+ "package_version": finding.package_version,
148
+ "fixed_version": finding.fixed_version,
149
+ }
150
+
151
+
152
+ def _build_summary(findings: list[dict[str, Any]]) -> dict[str, int]:
153
+ """Build summary statistics from findings.
154
+
155
+ Args:
156
+ findings: List of finding dictionaries.
157
+
158
+ Returns:
159
+ Summary with total and severity counts.
160
+ """
161
+ summary = {
162
+ "total_findings": len(findings),
163
+ "critical": 0,
164
+ "high": 0,
165
+ "medium": 0,
166
+ "low": 0,
167
+ "info": 0,
168
+ "unknown": 0,
169
+ }
170
+
171
+ for finding in findings:
172
+ severity = finding.get("severity", "unknown")
173
+ if severity in summary:
174
+ summary[severity] += 1
175
+ else:
176
+ summary["unknown"] += 1
177
+
178
+ return summary
179
+
180
+
181
+ def _write_report_atomic(path: Path, data: dict[str, Any]) -> None:
182
+ """Write JSON report atomically with permission checks.
183
+
184
+ Security controls:
185
+ - Size validation before writing (ASVS V10.3.3)
186
+ - Atomic write via temp file + rename (ASVS V12.3.1)
187
+ - Safe file permissions (0o644)
188
+
189
+ Args:
190
+ path: Output file path.
191
+ data: Report data to serialize.
192
+
193
+ Raises:
194
+ ValueError: If report exceeds size limit.
195
+ OSError: If write fails.
196
+ """
197
+ # Ensure parent directory exists
198
+ path.parent.mkdir(parents=True, exist_ok=True)
199
+
200
+ # Serialize and check size (ASVS V10.3.3)
201
+ json_str = json.dumps(data, indent=2, ensure_ascii=False)
202
+ size_mb = len(json_str.encode("utf-8")) / (1024 * 1024)
203
+ if size_mb > MAX_JSON_SIZE_MB:
204
+ raise ValueError(f"Report too large: {size_mb:.1f}MB > {MAX_JSON_SIZE_MB}MB")
205
+
206
+ # Atomic write: temp file + rename (ASVS V12.3.1)
207
+ temp_fd, temp_path_str = tempfile.mkstemp(
208
+ dir=str(path.parent), prefix=".kekkai-report-", suffix=".json.tmp"
209
+ )
210
+ temp_path = Path(temp_path_str)
211
+
212
+ try:
213
+ # Write to temp file
214
+ os.write(temp_fd, json_str.encode("utf-8"))
215
+ os.close(temp_fd)
216
+
217
+ # Set safe permissions (rw-r--r--)
218
+ os.chmod(temp_path, 0o644)
219
+
220
+ # Atomic rename
221
+ temp_path.rename(path)
222
+ except Exception:
223
+ # Clean up temp file on error
224
+ with contextlib.suppress(OSError):
225
+ temp_path.unlink()
226
+ raise
@@ -36,6 +36,19 @@ def docker_command() -> str:
36
36
  return docker
37
37
 
38
38
 
39
+ def _resolve_image_ref(image: str, digest: str | None) -> str:
40
+ """Resolve full image reference with tag or digest.
41
+
42
+ Ensures images have explicit :latest tag when no digest or tag is provided
43
+ for reliable cross-platform pulling.
44
+ """
45
+ if digest:
46
+ return f"{image}@{digest}"
47
+ if ":" not in image:
48
+ return f"{image}:latest"
49
+ return image
50
+
51
+
39
52
  def run_container(
40
53
  config: ContainerConfig,
41
54
  repo_path: Path,
@@ -61,7 +74,7 @@ def run_container(
61
74
  user: User to run as (default: 1000:1000, None for container default)
62
75
  """
63
76
  docker = docker_command()
64
- image_ref = f"{config.image}@{config.image_digest}" if config.image_digest else config.image
77
+ image_ref = _resolve_image_ref(config.image, config.image_digest)
65
78
 
66
79
  args = [
67
80
  docker,
@@ -73,7 +86,20 @@ def run_container(
73
86
  args.extend(["--user", user])
74
87
 
75
88
  if config.read_only:
76
- args.extend(["--read-only", "--tmpfs", "/tmp:rw,noexec,nosuid,size=512m"]) # nosec B108 # noqa: S108
89
+ args.extend(["--read-only"])
90
+ # Determine uid/gid for tmpfs ownership (match container user)
91
+ tmpfs_opts = "rw"
92
+ if user:
93
+ uid_gid = user.split(":")[0]
94
+ tmpfs_opts = f"rw,uid={uid_gid},gid={uid_gid}"
95
+ # Core temp directory (2GB for Trivy DB ~500MB + scanner temp files)
96
+ args.extend(["--tmpfs", f"/tmp:{tmpfs_opts},noexec,nosuid,size=2g"]) # nosec B108 # noqa: S108
97
+ # Scanner cache directories (Trivy DB, Semgrep cache, etc.)
98
+ args.extend(["--tmpfs", f"/root:{tmpfs_opts},size=1g"])
99
+ # Generic home for tools that need writable home
100
+ args.extend(["--tmpfs", f"/home:{tmpfs_opts},size=256m"])
101
+ # Set HOME env to writable location for tools that use $HOME/.cache
102
+ args.extend(["-e", "HOME=/tmp"])
77
103
 
78
104
  if config.network_disabled:
79
105
  args.extend(["--network", "none"])
@@ -132,8 +158,12 @@ def run_container(
132
158
 
133
159
 
134
160
  def pull_image(image: str, digest: str | None = None) -> bool:
161
+ """Pull a Docker image.
162
+
163
+ Uses _resolve_image_ref to ensure proper tag handling.
164
+ """
135
165
  docker = docker_command()
136
- ref = f"{image}@{digest}" if digest else image
166
+ ref = _resolve_image_ref(image, digest)
137
167
  proc = subprocess.run( # noqa: S603 # nosec B603
138
168
  [docker, "pull", ref],
139
169
  capture_output=True,
@@ -16,7 +16,7 @@ from .base import Finding, ScanContext, ScanResult, Severity
16
16
  from .container import ContainerConfig, run_container
17
17
 
18
18
  GITLEAKS_IMAGE = "zricethezav/gitleaks"
19
- GITLEAKS_DIGEST = "sha256:691af3c7c5a48b16f187ce3446d5f194838f91238f27270ed36eef6359a574d9"
19
+ GITLEAKS_DIGEST: str | None = None # Allow Docker to pull architecture-appropriate image
20
20
  SCAN_TYPE = "Gitleaks Scan"
21
21
 
22
22
 
@@ -85,6 +85,7 @@ class GitleaksScanner:
85
85
  "detect",
86
86
  "--source",
87
87
  "/repo",
88
+ "--no-git", # Scan all files, not just git-tracked
88
89
  "--report-format",
89
90
  "json",
90
91
  "--report-path",
@@ -125,6 +126,7 @@ class GitleaksScanner:
125
126
  "detect",
126
127
  "--source",
127
128
  str(ctx.repo_path),
129
+ "--no-git", # Scan all files, not just git-tracked
128
130
  "--report-format",
129
131
  "json",
130
132
  "--report-path",
@@ -16,7 +16,7 @@ from .base import Finding, ScanContext, ScanResult, Severity
16
16
  from .container import ContainerConfig, run_container
17
17
 
18
18
  SEMGREP_IMAGE = "returntocorp/semgrep"
19
- SEMGREP_DIGEST = "sha256:a5a71b85df0c65c58f13e94c0d0ce7d8e7c8d123456789abcdef0123456789ab"
19
+ SEMGREP_DIGEST: str | None = None # Allow Docker to pull architecture-appropriate image
20
20
  SCAN_TYPE = "Semgrep JSON Report"
21
21
 
22
22
 
kekkai/scanners/trivy.py CHANGED
@@ -15,7 +15,7 @@ from .base import Finding, ScanContext, ScanResult, Severity
15
15
  from .container import ContainerConfig, run_container
16
16
 
17
17
  TRIVY_IMAGE = "aquasec/trivy"
18
- TRIVY_DIGEST = "sha256:e9d62d670b10c9f78bb7c61d5c1f6e0bb32fc8bd0f6e1a7dd0c4e6b7f5df0a30"
18
+ TRIVY_DIGEST: str | None = None # Allow Docker to pull architecture-appropriate image
19
19
  SCAN_TYPE = "Trivy Scan"
20
20
 
21
21
 
@@ -396,6 +396,143 @@ class RemoteModelAdapter(ModelAdapter):
396
396
  )
397
397
 
398
398
 
399
+ class OllamaModelAdapter(ModelAdapter):
400
+ """Adapter for Ollama local LLM server.
401
+
402
+ Ollama provides an easy way to run local models with a simple API.
403
+ Install: curl -fsSL https://ollama.ai/install.sh | sh
404
+ Pull model: ollama pull tinyllama
405
+ """
406
+
407
+ def __init__(
408
+ self,
409
+ model_name: str = "tinyllama",
410
+ api_base: str | None = None,
411
+ ) -> None:
412
+ self._model_name = model_name
413
+ self._api_base = api_base or os.environ.get("OLLAMA_HOST") or "http://localhost:11434"
414
+
415
+ @property
416
+ def name(self) -> str:
417
+ return f"ollama:{self._model_name}"
418
+
419
+ @property
420
+ def is_local(self) -> bool:
421
+ return True # Ollama runs locally
422
+
423
+ def generate(
424
+ self,
425
+ system_prompt: str,
426
+ user_prompt: str,
427
+ config: ModelConfig | None = None,
428
+ ) -> ModelResponse:
429
+ """Generate using Ollama API."""
430
+ import urllib.error
431
+ import urllib.request
432
+
433
+ config = config or ModelConfig()
434
+ start_time = time.time()
435
+
436
+ url = f"{self._api_base.rstrip('/')}/api/chat"
437
+ model = config.model_name or self._model_name
438
+
439
+ data = {
440
+ "model": model,
441
+ "messages": [
442
+ {"role": "system", "content": system_prompt},
443
+ {"role": "user", "content": user_prompt},
444
+ ],
445
+ "stream": False,
446
+ "options": {
447
+ "temperature": config.temperature,
448
+ "num_predict": config.max_tokens,
449
+ },
450
+ }
451
+
452
+ headers = {"Content-Type": "application/json"}
453
+
454
+ req = urllib.request.Request( # noqa: S310 # nosec B310
455
+ url,
456
+ data=json.dumps(data).encode("utf-8"),
457
+ headers=headers,
458
+ method="POST",
459
+ )
460
+
461
+ try:
462
+ with urllib.request.urlopen( # noqa: S310 # nosec B310
463
+ req, timeout=config.timeout_seconds
464
+ ) as resp:
465
+ response_data: dict[str, Any] = json.loads(resp.read().decode("utf-8"))
466
+
467
+ content = response_data.get("message", {}).get("content", "")
468
+ latency_ms = int((time.time() - start_time) * 1000)
469
+
470
+ # Ollama provides token counts in some responses
471
+ prompt_tokens = response_data.get("prompt_eval_count", 0)
472
+ completion_tokens = response_data.get("eval_count", 0)
473
+
474
+ return ModelResponse(
475
+ content=content,
476
+ model_name=response_data.get("model", model),
477
+ prompt_tokens=prompt_tokens,
478
+ completion_tokens=completion_tokens,
479
+ total_tokens=prompt_tokens + completion_tokens,
480
+ latency_ms=latency_ms,
481
+ raw_response=response_data,
482
+ )
483
+ except urllib.error.URLError as e:
484
+ error_msg = str(e)
485
+ if "Connection refused" in error_msg:
486
+ logger.error("Ollama not running. Start with: ollama serve")
487
+ return ModelResponse(
488
+ content="[OLLAMA NOT RUNNING - Start with: ollama serve]",
489
+ model_name=model,
490
+ latency_ms=int((time.time() - start_time) * 1000),
491
+ )
492
+ logger.error("Ollama API error: %s", e)
493
+ return ModelResponse(
494
+ content="",
495
+ model_name=model,
496
+ latency_ms=int((time.time() - start_time) * 1000),
497
+ )
498
+ except Exception as e:
499
+ logger.error("Ollama request failed: %s", e)
500
+ return ModelResponse(
501
+ content="",
502
+ model_name=model,
503
+ latency_ms=int((time.time() - start_time) * 1000),
504
+ )
505
+
506
+ def health_check(self) -> bool:
507
+ """Check if Ollama is running and model is available."""
508
+ import urllib.request
509
+
510
+ try:
511
+ url = f"{self._api_base.rstrip('/')}/api/tags"
512
+ req = urllib.request.Request(url, method="GET") # noqa: S310 # nosec B310
513
+ with urllib.request.urlopen(req, timeout=5) as resp: # noqa: S310 # nosec B310
514
+ data: dict[str, list[dict[str, str]]] = json.loads(resp.read().decode())
515
+ models = [m.get("name", "") for m in data.get("models", [])]
516
+ # Check if our model is available (with or without :latest tag)
517
+ model_base = self._model_name.split(":")[0]
518
+ return any(model_base in m for m in models)
519
+ except Exception:
520
+ return False
521
+
522
+ def list_models(self) -> list[str]:
523
+ """List available models in Ollama."""
524
+ import urllib.request
525
+
526
+ try:
527
+ url = f"{self._api_base.rstrip('/')}/api/tags"
528
+ req = urllib.request.Request(url, method="GET") # noqa: S310 # nosec B310
529
+ with urllib.request.urlopen(req, timeout=5) as resp: # noqa: S310 # nosec B310
530
+ data: dict[str, list[dict[str, str]]] = json.loads(resp.read().decode())
531
+ return [m.get("name", "") for m in data.get("models", [])]
532
+ except Exception:
533
+ return []
534
+
535
+
399
536
  class MockModelAdapter(ModelAdapter):
400
537
  """Mock adapter for testing."""
401
538
 
@@ -461,7 +598,7 @@ def create_adapter(
461
598
  """Create a model adapter based on mode.
462
599
 
463
600
  Args:
464
- mode: "local", "openai", "anthropic", or "mock"
601
+ mode: "local", "ollama", "openai", "anthropic", or "mock"
465
602
  config: Configuration for the adapter
466
603
 
467
604
  Returns:
@@ -473,6 +610,11 @@ def create_adapter(
473
610
  return MockModelAdapter()
474
611
  elif mode == "local":
475
612
  return LocalModelAdapter(model_path=config.model_path)
613
+ elif mode == "ollama":
614
+ return OllamaModelAdapter(
615
+ model_name=config.model_name or "tinyllama",
616
+ api_base=config.api_base,
617
+ )
476
618
  elif mode == "openai":
477
619
  return RemoteModelAdapter(
478
620
  api_key=config.api_key,
kekkai/triage/__init__.py CHANGED
@@ -4,9 +4,18 @@ Provides a terminal-based interface for reviewing findings,
4
4
  marking false positives, and generating .kekkaiignore files.
5
5
  """
6
6
 
7
- from .app import TriageApp, run_triage
7
+ from __future__ import annotations
8
+
9
+ from typing import TYPE_CHECKING
10
+
11
+ if TYPE_CHECKING:
12
+ from collections.abc import Sequence
13
+ from pathlib import Path
14
+
15
+ # Import models and utilities (no heavy dependencies)
8
16
  from .audit import AuditEntry, TriageAuditLog, log_decisions
9
17
  from .ignore import IgnoreEntry, IgnoreFile, IgnorePatternValidator, ValidationError
18
+ from .loader import load_findings_from_path
10
19
  from .models import (
11
20
  FindingEntry,
12
21
  Severity,
@@ -15,6 +24,49 @@ from .models import (
15
24
  load_findings_from_json,
16
25
  )
17
26
 
27
+
28
+ def run_triage(
29
+ input_path: Path | None = None,
30
+ output_path: Path | None = None,
31
+ findings: Sequence[FindingEntry] | None = None,
32
+ ) -> int:
33
+ """Run the triage TUI (lazy import).
34
+
35
+ Args:
36
+ input_path: Path to findings JSON file.
37
+ output_path: Path for .kekkaiignore output.
38
+ findings: Pre-loaded findings (alternative to input_path).
39
+
40
+ Returns:
41
+ Exit code (0 for success).
42
+
43
+ Raises:
44
+ RuntimeError: If Textual is not installed.
45
+ """
46
+ try:
47
+ from .app import run_triage as _run_triage
48
+
49
+ return _run_triage(
50
+ input_path=input_path,
51
+ output_path=output_path,
52
+ findings=findings,
53
+ )
54
+ except ImportError as e:
55
+ raise RuntimeError(
56
+ "Triage TUI requires 'textual'. Install with: pip install textual"
57
+ ) from e
58
+
59
+
60
+ # Re-export TriageApp for compatibility (lazy)
61
+ def __getattr__(name: str) -> type:
62
+ """Lazy import for TriageApp."""
63
+ if name == "TriageApp":
64
+ from .app import TriageApp
65
+
66
+ return TriageApp
67
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
68
+
69
+
18
70
  __all__ = [
19
71
  "TriageApp",
20
72
  "run_triage",
@@ -30,4 +82,5 @@ __all__ = [
30
82
  "TriageState",
31
83
  "Severity",
32
84
  "load_findings_from_json",
85
+ "load_findings_from_path",
33
86
  ]