kekkai-cli 1.1.0__py3-none-any.whl → 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
kekkai/triage/__init__.py CHANGED
@@ -4,9 +4,18 @@ Provides a terminal-based interface for reviewing findings,
4
4
  marking false positives, and generating .kekkaiignore files.
5
5
  """
6
6
 
7
- from .app import TriageApp, run_triage
7
+ from __future__ import annotations
8
+
9
+ from typing import TYPE_CHECKING
10
+
11
+ if TYPE_CHECKING:
12
+ from collections.abc import Sequence
13
+ from pathlib import Path
14
+
15
+ # Import models and utilities (no heavy dependencies)
8
16
  from .audit import AuditEntry, TriageAuditLog, log_decisions
9
17
  from .ignore import IgnoreEntry, IgnoreFile, IgnorePatternValidator, ValidationError
18
+ from .loader import load_findings_from_path
10
19
  from .models import (
11
20
  FindingEntry,
12
21
  Severity,
@@ -15,6 +24,49 @@ from .models import (
15
24
  load_findings_from_json,
16
25
  )
17
26
 
27
+
28
+ def run_triage(
29
+ input_path: Path | None = None,
30
+ output_path: Path | None = None,
31
+ findings: Sequence[FindingEntry] | None = None,
32
+ ) -> int:
33
+ """Run the triage TUI (lazy import).
34
+
35
+ Args:
36
+ input_path: Path to findings JSON file.
37
+ output_path: Path for .kekkaiignore output.
38
+ findings: Pre-loaded findings (alternative to input_path).
39
+
40
+ Returns:
41
+ Exit code (0 for success).
42
+
43
+ Raises:
44
+ RuntimeError: If Textual is not installed.
45
+ """
46
+ try:
47
+ from .app import run_triage as _run_triage
48
+
49
+ return _run_triage(
50
+ input_path=input_path,
51
+ output_path=output_path,
52
+ findings=findings,
53
+ )
54
+ except ImportError as e:
55
+ raise RuntimeError(
56
+ "Triage TUI requires 'textual'. Install with: pip install textual"
57
+ ) from e
58
+
59
+
60
+ # Re-export TriageApp for compatibility (lazy)
61
+ def __getattr__(name: str) -> type:
62
+ """Lazy import for TriageApp."""
63
+ if name == "TriageApp":
64
+ from .app import TriageApp
65
+
66
+ return TriageApp
67
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
68
+
69
+
18
70
  __all__ = [
19
71
  "TriageApp",
20
72
  "run_triage",
@@ -30,4 +82,5 @@ __all__ = [
30
82
  "TriageState",
31
83
  "Severity",
32
84
  "load_findings_from_json",
85
+ "load_findings_from_path",
33
86
  ]
@@ -0,0 +1,232 @@
1
+ """Fix generation screen for AI-powered code fixes.
2
+
3
+ Provides a modal screen that shows fix generation progress
4
+ and preview of AI-generated patches.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import os
10
+ from collections.abc import Callable
11
+ from typing import TYPE_CHECKING
12
+
13
+ from rich.text import Text
14
+ from textual.app import ComposeResult
15
+ from textual.binding import Binding
16
+ from textual.containers import Vertical, VerticalScroll
17
+ from textual.screen import ModalScreen
18
+ from textual.widgets import Footer, Label, Static
19
+
20
+ if TYPE_CHECKING:
21
+ from .models import FindingEntry
22
+
23
+ __all__ = ["FixGenerationScreen"]
24
+
25
+
26
+ class FixGenerationScreen(ModalScreen[bool]):
27
+ """Modal screen for generating AI-powered fixes.
28
+
29
+ Shows progress, model configuration, and fix preview.
30
+
31
+ Bindings:
32
+ escape: Cancel and go back
33
+ enter: Accept fix (if generated)
34
+ """
35
+
36
+ BINDINGS = [
37
+ Binding("escape", "cancel", "Cancel"),
38
+ Binding("enter", "accept", "Accept Fix", show=False),
39
+ ]
40
+
41
+ DEFAULT_CSS = """
42
+ FixGenerationScreen {
43
+ align: center middle;
44
+ }
45
+ #fix-dialog {
46
+ width: 80;
47
+ height: 30;
48
+ border: thick $primary;
49
+ background: $surface;
50
+ padding: 1;
51
+ }
52
+ #fix-title {
53
+ dock: top;
54
+ height: 3;
55
+ content-align: center middle;
56
+ background: $primary;
57
+ color: $text;
58
+ }
59
+ #fix-content {
60
+ height: 1fr;
61
+ padding: 1;
62
+ }
63
+ #fix-preview {
64
+ height: 1fr;
65
+ border: solid $accent;
66
+ padding: 1;
67
+ background: $panel;
68
+ }
69
+ #fix-status {
70
+ dock: bottom;
71
+ height: 3;
72
+ padding: 1;
73
+ background: $surface;
74
+ }
75
+ .fix-button {
76
+ margin: 1;
77
+ }
78
+ """
79
+
80
+ def __init__(
81
+ self,
82
+ finding: FindingEntry,
83
+ on_fix_generated: Callable[[bool, str], None] | None = None,
84
+ name: str | None = None,
85
+ id: str | None = None,
86
+ ) -> None:
87
+ super().__init__(name=name, id=id)
88
+ self.finding = finding
89
+ self.on_fix_generated = on_fix_generated
90
+ self.fix_preview: str | None = None
91
+ self.fix_generated = False
92
+
93
+ def compose(self) -> ComposeResult:
94
+ with Vertical(id="fix-dialog"):
95
+ yield Label("🤖 AI-Powered Fix Generation", id="fix-title")
96
+ with VerticalScroll(id="fix-content"):
97
+ yield Label(self._get_finding_summary())
98
+ yield Label(self._get_model_info())
99
+ yield Static("", id="fix-preview")
100
+ yield Static(self._get_initial_status(), id="fix-status")
101
+ yield Footer()
102
+
103
+ def _get_finding_summary(self) -> Text:
104
+ """Generate summary of finding to fix."""
105
+ text = Text()
106
+ text.append("Finding:\n", style="bold")
107
+ text.append(f" {self.finding.scanner}: ", style="cyan")
108
+ text.append(f"{self.finding.title}\n")
109
+ if self.finding.file_path:
110
+ text.append(f" File: {self.finding.file_path}", style="dim")
111
+ if self.finding.line:
112
+ text.append(f":{self.finding.line}", style="dim")
113
+ text.append("\n")
114
+ return text
115
+
116
+ def _get_model_info(self) -> Text:
117
+ """Display model configuration info."""
118
+ text = Text()
119
+ text.append("\nModel Configuration:\n", style="bold")
120
+
121
+ # Check for Ollama
122
+ if self._is_ollama_available():
123
+ text.append(" ✓ Ollama detected (local-first AI)\n", style="green")
124
+ text.append(" No API keys needed - runs on your machine\n", style="dim")
125
+ # Check for API keys
126
+ elif os.environ.get("KEKKAI_FIX_API_KEY"):
127
+ text.append(" ⚠ Using remote API (OpenAI/Anthropic)\n", style="yellow")
128
+ text.append(" Code will be sent to external service\n", style="dim")
129
+ else:
130
+ text.append(" ✗ No AI backend configured\n", style="red")
131
+ text.append(" Install Ollama or set KEKKAI_FIX_API_KEY\n", style="dim")
132
+
133
+ return text
134
+
135
+ def _get_initial_status(self) -> Text:
136
+ """Initial status message."""
137
+ if self._is_ollama_available() or os.environ.get("KEKKAI_FIX_API_KEY"):
138
+ return Text("Press Enter to generate fix, or Escape to cancel", style="italic")
139
+ else:
140
+ return Text(
141
+ "❌ Cannot generate fix: No AI backend configured\n"
142
+ "Install Ollama (recommended) or set KEKKAI_FIX_API_KEY",
143
+ style="red",
144
+ )
145
+
146
+ def _is_ollama_available(self) -> bool:
147
+ """Check if Ollama is available on the system."""
148
+ import shutil
149
+
150
+ return shutil.which("ollama") is not None
151
+
152
+ def on_mount(self) -> None:
153
+ """Auto-generate fix if backend is available."""
154
+ if self._is_ollama_available() or os.environ.get("KEKKAI_FIX_API_KEY"):
155
+ # Auto-start fix generation
156
+ self.set_timer(0.5, self._generate_fix)
157
+
158
+ def _generate_fix(self) -> None:
159
+ """Generate AI-powered fix."""
160
+ status = self.query_one("#fix-status", Static)
161
+ status.update(Text("⏳ Generating fix with AI...", style="yellow italic"))
162
+
163
+ try:
164
+ # Import fix engine
165
+ from ..fix import FixConfig
166
+
167
+ # Determine model mode
168
+ if self._is_ollama_available():
169
+ model_mode = "ollama"
170
+ model_name = os.environ.get("KEKKAI_FIX_MODEL_NAME", "mistral")
171
+ else:
172
+ model_mode = "openai"
173
+ model_name = None
174
+
175
+ # Create fix config (for future integration)
176
+ _config = FixConfig(
177
+ model_mode=model_mode,
178
+ model_name=model_name,
179
+ api_key=os.environ.get("KEKKAI_FIX_API_KEY"),
180
+ max_fixes=1,
181
+ timeout_seconds=60,
182
+ dry_run=True,
183
+ )
184
+
185
+ # Note: This is a simplified mock - actual implementation would:
186
+ # 1. Convert FindingEntry to proper format for FixEngine
187
+ # 2. Call fix engine with proper error handling
188
+ # 3. Display actual fix preview
189
+
190
+ # For now, show a placeholder
191
+ self.fix_preview = (
192
+ "# AI-Powered Fix (Preview)\n"
193
+ f"# Finding: {self.finding.title}\n"
194
+ f"# Scanner: {self.finding.scanner}\n\n"
195
+ "# Fix would be generated here using:\n"
196
+ f"# - Model: {model_name or 'gpt-4'}\n"
197
+ f"# - Mode: {model_mode}\n"
198
+ "# - Context from source file\n\n"
199
+ "# Press Enter to apply (dry-run mode)\n"
200
+ "# Press Escape to cancel"
201
+ )
202
+
203
+ preview = self.query_one("#fix-preview", Static)
204
+ preview.update(Text(self.fix_preview, style="green"))
205
+
206
+ status.update(
207
+ Text("✓ Fix generated! Press Enter to apply or Escape to cancel", style="green")
208
+ )
209
+ self.fix_generated = True
210
+
211
+ except Exception as e:
212
+ status.update(Text(f"✗ Fix generation failed: {e}", style="red"))
213
+ self.fix_generated = False
214
+
215
+ def action_accept(self) -> None:
216
+ """Accept and apply the generated fix."""
217
+ if not self.fix_generated:
218
+ return
219
+
220
+ if self.on_fix_generated:
221
+ self.on_fix_generated(
222
+ True, "Fix generated successfully (dry-run mode - review before applying)"
223
+ )
224
+
225
+ self.dismiss(True)
226
+
227
+ def action_cancel(self) -> None:
228
+ """Cancel fix generation."""
229
+ if self.on_fix_generated:
230
+ self.on_fix_generated(False, "Fix generation cancelled")
231
+
232
+ self.dismiss(False)
@@ -0,0 +1,196 @@
1
+ """Triage findings loader with scanner format detection.
2
+
3
+ Supports loading findings from:
4
+ - Native triage JSON (list or {"findings": [...]})
5
+ - Raw scanner outputs (Semgrep/Trivy/Gitleaks)
6
+ - Run directories (aggregates all *-results.json)
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ import json
12
+ from pathlib import Path
13
+ from typing import TYPE_CHECKING, Any
14
+
15
+ if TYPE_CHECKING:
16
+ from ..scanners.base import Finding
17
+ from ..scanners.base import Severity as ScannerSeverity
18
+
19
+ from .models import FindingEntry
20
+ from .models import Severity as TriageSeverity
21
+
22
+ __all__ = [
23
+ "load_findings_from_path",
24
+ ]
25
+
26
+ # Size limits for DoS mitigation (ASVS V10.3.3)
27
+ MAX_FILE_SIZE_MB = 200
28
+ WARN_FILE_SIZE_MB = 50
29
+
30
+
31
+ def load_findings_from_path(
32
+ path: Path,
33
+ ) -> tuple[list[FindingEntry], list[str]]:
34
+ """Load findings from file or directory.
35
+
36
+ Supports:
37
+ - Unified report (kekkai-report.json) - PREFERRED
38
+ - Native triage JSON (list or {"findings": [...]})
39
+ - Raw scanner outputs (Semgrep/Trivy/Gitleaks)
40
+ - Run directories (aggregates all *-results.json)
41
+
42
+ Priority:
43
+ 1. kekkai-report.json (unified report)
44
+ 2. *-results.json (individual scanner outputs)
45
+ 3. Any other JSON files (excluding metadata)
46
+
47
+ Args:
48
+ path: Path to findings file or run directory.
49
+
50
+ Returns:
51
+ Tuple of (findings, error_messages).
52
+ Error messages include filename only (no full paths) per ASVS V7.4.1.
53
+ """
54
+ errors: list[str] = []
55
+
56
+ # Determine input type
57
+ if path.is_dir():
58
+ # Priority 1: Check for unified report first
59
+ unified_report = path / "kekkai-report.json"
60
+ if unified_report.exists():
61
+ files = [unified_report]
62
+ else:
63
+ # Priority 2: Prefer canonical scan outputs
64
+ files = sorted(path.glob("*-results.json"))
65
+ if not files:
66
+ # Priority 3: Fallback to all JSON (excluding metadata files)
67
+ files = sorted(
68
+ [
69
+ p
70
+ for p in path.glob("*.json")
71
+ if p.name not in ("run.json", "policy-result.json")
72
+ ]
73
+ )
74
+ else:
75
+ files = [path]
76
+
77
+ findings: list[FindingEntry] = []
78
+ for file in files:
79
+ # Check if file exists first
80
+ if not file.exists():
81
+ errors.append(f"{file.name}: OSError")
82
+ continue
83
+
84
+ # Size check (DoS mitigation per ASVS V10.3.3)
85
+ size_mb = file.stat().st_size / (1024 * 1024)
86
+ if size_mb > MAX_FILE_SIZE_MB:
87
+ msg = f"{file.name}: file too large ({size_mb:.1f} MB, max {MAX_FILE_SIZE_MB} MB)"
88
+ errors.append(msg)
89
+ continue
90
+
91
+ try:
92
+ content = file.read_text(encoding="utf-8")
93
+ if not content.strip():
94
+ continue
95
+ data = json.loads(content)
96
+ except (OSError, json.JSONDecodeError) as exc:
97
+ # ASVS V7.4.1: Don't leak full path, only filename
98
+ errors.append(f"{file.name}: {type(exc).__name__}")
99
+ continue
100
+
101
+ # Detect format and parse
102
+ try:
103
+ batch = _parse_findings(data, file.stem)
104
+ findings.extend(batch)
105
+ except Exception as exc:
106
+ errors.append(f"{file.name}: unsupported format ({str(exc)[:50]})")
107
+
108
+ # Deduplicate by stable key
109
+ seen: set[str] = set()
110
+ deduped: list[FindingEntry] = []
111
+ for f in findings:
112
+ key = f"{f.scanner}:{f.rule_id}:{f.file_path}:{f.line}"
113
+ if key not in seen:
114
+ seen.add(key)
115
+ deduped.append(f)
116
+
117
+ return deduped, errors
118
+
119
+
120
+ def _parse_findings(data: Any, stem: str) -> list[FindingEntry]:
121
+ """Parse findings from JSON data.
122
+
123
+ Args:
124
+ data: Parsed JSON data.
125
+ stem: File stem (used to detect scanner type).
126
+
127
+ Returns:
128
+ List of FindingEntry objects.
129
+
130
+ Raises:
131
+ ValueError: If format is unknown or scanner not found.
132
+ """
133
+ # Try native triage format first (ASVS V5.1.2: strongly typed validation)
134
+ if isinstance(data, list) and data and isinstance(data[0], dict) and "scanner" in data[0]:
135
+ return [FindingEntry.from_dict(item) for item in data]
136
+
137
+ if isinstance(data, dict) and "findings" in data:
138
+ findings_data = data["findings"]
139
+ if isinstance(findings_data, list):
140
+ return [FindingEntry.from_dict(item) for item in findings_data]
141
+
142
+ # Try scanner-specific format
143
+ scanner_name = stem.replace("-results", "")
144
+
145
+ # Lazy import to avoid circular dependency
146
+ from ..cli import _create_scanner
147
+
148
+ scanner = _create_scanner(scanner_name)
149
+ if not scanner:
150
+ raise ValueError(f"Unknown scanner: {scanner_name}")
151
+
152
+ # Use canonical scanner parser (reuses validated logic)
153
+ raw_json = json.dumps(data)
154
+ canonical_findings = scanner.parse(raw_json)
155
+
156
+ # Convert to triage format
157
+ return [_finding_to_entry(f) for f in canonical_findings]
158
+
159
+
160
+ def _finding_to_entry(f: Finding) -> FindingEntry:
161
+ """Convert scanner Finding to triage FindingEntry.
162
+
163
+ Args:
164
+ f: Scanner Finding object.
165
+
166
+ Returns:
167
+ Triage FindingEntry object.
168
+ """
169
+ return FindingEntry(
170
+ id=f.dedupe_hash(),
171
+ title=f.title,
172
+ severity=_map_severity(f.severity),
173
+ scanner=f.scanner,
174
+ file_path=f.file_path or "",
175
+ line=f.line,
176
+ description=f.description,
177
+ rule_id=f.rule_id or "",
178
+ )
179
+
180
+
181
+ def _map_severity(s: ScannerSeverity) -> TriageSeverity:
182
+ """Map scanner Severity to triage Severity.
183
+
184
+ Both use the same enum values, just different type namespaces.
185
+
186
+ Args:
187
+ s: Scanner severity enum.
188
+
189
+ Returns:
190
+ Triage severity enum.
191
+ """
192
+ try:
193
+ return TriageSeverity(s.value)
194
+ except ValueError:
195
+ # Fallback to INFO for unknown severities
196
+ return TriageSeverity.INFO
kekkai/triage/screens.py CHANGED
@@ -49,6 +49,7 @@ class FindingListScreen(Screen[None]):
49
49
  Binding("down", "cursor_down", "Next", show=False),
50
50
  Binding("up", "cursor_up", "Previous", show=False),
51
51
  Binding("enter", "view_detail", "View"),
52
+ Binding("x", "fix_with_ai", "🤖 Fix with AI"),
52
53
  Binding("f", "mark_false_positive", "False Positive"),
53
54
  Binding("c", "mark_confirmed", "Confirmed"),
54
55
  Binding("d", "mark_deferred", "Deferred"),