kekkai-cli 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kekkai/__init__.py +7 -0
- kekkai/cli.py +1038 -0
- kekkai/config.py +403 -0
- kekkai/dojo.py +419 -0
- kekkai/dojo_import.py +213 -0
- kekkai/github/__init__.py +16 -0
- kekkai/github/commenter.py +198 -0
- kekkai/github/models.py +56 -0
- kekkai/github/sanitizer.py +112 -0
- kekkai/installer/__init__.py +39 -0
- kekkai/installer/errors.py +23 -0
- kekkai/installer/extract.py +161 -0
- kekkai/installer/manager.py +252 -0
- kekkai/installer/manifest.py +189 -0
- kekkai/installer/verify.py +86 -0
- kekkai/manifest.py +77 -0
- kekkai/output.py +218 -0
- kekkai/paths.py +46 -0
- kekkai/policy.py +326 -0
- kekkai/runner.py +70 -0
- kekkai/scanners/__init__.py +67 -0
- kekkai/scanners/backends/__init__.py +14 -0
- kekkai/scanners/backends/base.py +73 -0
- kekkai/scanners/backends/docker.py +178 -0
- kekkai/scanners/backends/native.py +240 -0
- kekkai/scanners/base.py +110 -0
- kekkai/scanners/container.py +144 -0
- kekkai/scanners/falco.py +237 -0
- kekkai/scanners/gitleaks.py +237 -0
- kekkai/scanners/semgrep.py +227 -0
- kekkai/scanners/trivy.py +246 -0
- kekkai/scanners/url_policy.py +163 -0
- kekkai/scanners/zap.py +340 -0
- kekkai/threatflow/__init__.py +94 -0
- kekkai/threatflow/artifacts.py +476 -0
- kekkai/threatflow/chunking.py +361 -0
- kekkai/threatflow/core.py +438 -0
- kekkai/threatflow/mermaid.py +374 -0
- kekkai/threatflow/model_adapter.py +491 -0
- kekkai/threatflow/prompts.py +277 -0
- kekkai/threatflow/redaction.py +228 -0
- kekkai/threatflow/sanitizer.py +643 -0
- kekkai/triage/__init__.py +33 -0
- kekkai/triage/app.py +168 -0
- kekkai/triage/audit.py +203 -0
- kekkai/triage/ignore.py +269 -0
- kekkai/triage/models.py +185 -0
- kekkai/triage/screens.py +341 -0
- kekkai/triage/widgets.py +169 -0
- kekkai_cli-1.0.0.dist-info/METADATA +135 -0
- kekkai_cli-1.0.0.dist-info/RECORD +90 -0
- kekkai_cli-1.0.0.dist-info/WHEEL +5 -0
- kekkai_cli-1.0.0.dist-info/entry_points.txt +3 -0
- kekkai_cli-1.0.0.dist-info/top_level.txt +3 -0
- kekkai_core/__init__.py +3 -0
- kekkai_core/ci/__init__.py +11 -0
- kekkai_core/ci/benchmarks.py +354 -0
- kekkai_core/ci/metadata.py +104 -0
- kekkai_core/ci/validators.py +92 -0
- kekkai_core/docker/__init__.py +17 -0
- kekkai_core/docker/metadata.py +153 -0
- kekkai_core/docker/sbom.py +173 -0
- kekkai_core/docker/security.py +158 -0
- kekkai_core/docker/signing.py +135 -0
- kekkai_core/redaction.py +84 -0
- kekkai_core/slsa/__init__.py +13 -0
- kekkai_core/slsa/verify.py +121 -0
- kekkai_core/windows/__init__.py +29 -0
- kekkai_core/windows/chocolatey.py +335 -0
- kekkai_core/windows/installer.py +256 -0
- kekkai_core/windows/scoop.py +165 -0
- kekkai_core/windows/validators.py +220 -0
- portal/__init__.py +19 -0
- portal/api.py +155 -0
- portal/auth.py +103 -0
- portal/enterprise/__init__.py +32 -0
- portal/enterprise/audit.py +435 -0
- portal/enterprise/licensing.py +342 -0
- portal/enterprise/rbac.py +276 -0
- portal/enterprise/saml.py +595 -0
- portal/ops/__init__.py +53 -0
- portal/ops/backup.py +553 -0
- portal/ops/log_shipper.py +469 -0
- portal/ops/monitoring.py +517 -0
- portal/ops/restore.py +469 -0
- portal/ops/secrets.py +408 -0
- portal/ops/upgrade.py +591 -0
- portal/tenants.py +340 -0
- portal/uploads.py +259 -0
- portal/web.py +384 -0
kekkai/triage/app.py
ADDED
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
"""Main Textual application for triage TUI.
|
|
2
|
+
|
|
3
|
+
Provides the entry point for interactive finding triage with
|
|
4
|
+
keyboard-driven navigation and ignore file generation.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import json
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import TYPE_CHECKING
|
|
12
|
+
|
|
13
|
+
from textual.app import App
|
|
14
|
+
|
|
15
|
+
from .audit import TriageAuditLog
|
|
16
|
+
from .ignore import IgnoreFile
|
|
17
|
+
from .models import FindingEntry, TriageDecision, TriageState, load_findings_from_json
|
|
18
|
+
from .screens import FindingListScreen
|
|
19
|
+
|
|
20
|
+
if TYPE_CHECKING:
|
|
21
|
+
from collections.abc import Sequence
|
|
22
|
+
|
|
23
|
+
__all__ = [
|
|
24
|
+
"TriageApp",
|
|
25
|
+
"run_triage",
|
|
26
|
+
]
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class TriageApp(App[None]):
|
|
30
|
+
"""Interactive triage application for security findings.
|
|
31
|
+
|
|
32
|
+
Allows reviewing findings, marking false positives, and
|
|
33
|
+
generating .kekkaiignore files.
|
|
34
|
+
|
|
35
|
+
Attributes:
|
|
36
|
+
findings: List of findings to triage.
|
|
37
|
+
ignore_file: IgnoreFile manager for output.
|
|
38
|
+
audit_log: Audit log for recording decisions.
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
TITLE = "Kekkai Triage"
|
|
42
|
+
CSS = """
|
|
43
|
+
Screen {
|
|
44
|
+
background: $background;
|
|
45
|
+
}
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
def __init__(
|
|
49
|
+
self,
|
|
50
|
+
findings: Sequence[FindingEntry] | None = None,
|
|
51
|
+
input_path: Path | None = None,
|
|
52
|
+
output_path: Path | None = None,
|
|
53
|
+
audit_path: Path | None = None,
|
|
54
|
+
) -> None:
|
|
55
|
+
"""Initialize triage application.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
findings: Pre-loaded findings to triage.
|
|
59
|
+
input_path: Path to findings JSON file.
|
|
60
|
+
output_path: Path for .kekkaiignore output.
|
|
61
|
+
audit_path: Path for audit log.
|
|
62
|
+
"""
|
|
63
|
+
super().__init__()
|
|
64
|
+
self._input_path = input_path
|
|
65
|
+
self._findings_list: list[FindingEntry] = list(findings) if findings else []
|
|
66
|
+
self.ignore_file = IgnoreFile(output_path)
|
|
67
|
+
self.audit_log = TriageAuditLog(audit_path)
|
|
68
|
+
self._decisions: dict[str, TriageDecision] = {}
|
|
69
|
+
|
|
70
|
+
@property
|
|
71
|
+
def findings(self) -> list[FindingEntry]:
|
|
72
|
+
"""Get findings list, loading from file if needed."""
|
|
73
|
+
if not self._findings_list and self._input_path:
|
|
74
|
+
self._load_findings()
|
|
75
|
+
return self._findings_list
|
|
76
|
+
|
|
77
|
+
def _load_findings(self) -> None:
|
|
78
|
+
"""Load findings from input file."""
|
|
79
|
+
if not self._input_path or not self._input_path.exists():
|
|
80
|
+
return
|
|
81
|
+
|
|
82
|
+
try:
|
|
83
|
+
content = self._input_path.read_text(encoding="utf-8")
|
|
84
|
+
data = json.loads(content)
|
|
85
|
+
|
|
86
|
+
if isinstance(data, list):
|
|
87
|
+
self._findings_list = load_findings_from_json(data)
|
|
88
|
+
elif isinstance(data, dict) and "findings" in data:
|
|
89
|
+
self._findings_list = load_findings_from_json(data["findings"])
|
|
90
|
+
except (json.JSONDecodeError, KeyError, TypeError):
|
|
91
|
+
self._findings_list = []
|
|
92
|
+
|
|
93
|
+
def on_mount(self) -> None:
|
|
94
|
+
"""Handle app mount."""
|
|
95
|
+
self.push_screen(
|
|
96
|
+
FindingListScreen(
|
|
97
|
+
findings=self.findings,
|
|
98
|
+
on_state_change=self._handle_state_change,
|
|
99
|
+
on_save=self._handle_save,
|
|
100
|
+
)
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
def _handle_state_change(self, index: int, state: TriageState) -> None:
|
|
104
|
+
"""Handle finding state change.
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
index: Finding index.
|
|
108
|
+
state: New triage state.
|
|
109
|
+
"""
|
|
110
|
+
if index >= len(self.findings):
|
|
111
|
+
return
|
|
112
|
+
|
|
113
|
+
finding = self.findings[index]
|
|
114
|
+
ignore_pattern = None
|
|
115
|
+
|
|
116
|
+
if state == TriageState.FALSE_POSITIVE:
|
|
117
|
+
ignore_pattern = finding.generate_ignore_pattern()
|
|
118
|
+
|
|
119
|
+
decision = TriageDecision(
|
|
120
|
+
finding_id=finding.id,
|
|
121
|
+
state=state,
|
|
122
|
+
reason=finding.notes,
|
|
123
|
+
ignore_pattern=ignore_pattern,
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
self._decisions[finding.id] = decision
|
|
127
|
+
self.audit_log.log_decision(decision)
|
|
128
|
+
|
|
129
|
+
def _handle_save(self) -> None:
|
|
130
|
+
"""Handle save action."""
|
|
131
|
+
self.ignore_file.load()
|
|
132
|
+
|
|
133
|
+
for finding in self.findings:
|
|
134
|
+
if finding.state == TriageState.FALSE_POSITIVE:
|
|
135
|
+
pattern = finding.generate_ignore_pattern()
|
|
136
|
+
if not self.ignore_file.has_pattern(pattern):
|
|
137
|
+
self.ignore_file.add_entry(
|
|
138
|
+
pattern=pattern,
|
|
139
|
+
comment=finding.notes[:100] if finding.notes else finding.title[:100],
|
|
140
|
+
finding_id=finding.id,
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
self.ignore_file.save()
|
|
144
|
+
self.audit_log.log_action("save_ignore_file", finding_id="*")
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
def run_triage(
|
|
148
|
+
input_path: Path | None = None,
|
|
149
|
+
output_path: Path | None = None,
|
|
150
|
+
findings: Sequence[FindingEntry] | None = None,
|
|
151
|
+
) -> int:
|
|
152
|
+
"""Run the triage TUI.
|
|
153
|
+
|
|
154
|
+
Args:
|
|
155
|
+
input_path: Path to findings JSON file.
|
|
156
|
+
output_path: Path for .kekkaiignore output.
|
|
157
|
+
findings: Pre-loaded findings (alternative to input_path).
|
|
158
|
+
|
|
159
|
+
Returns:
|
|
160
|
+
Exit code (0 for success).
|
|
161
|
+
"""
|
|
162
|
+
app = TriageApp(
|
|
163
|
+
findings=findings,
|
|
164
|
+
input_path=input_path,
|
|
165
|
+
output_path=output_path,
|
|
166
|
+
)
|
|
167
|
+
app.run()
|
|
168
|
+
return 0
|
kekkai/triage/audit.py
ADDED
|
@@ -0,0 +1,203 @@
|
|
|
1
|
+
"""Audit logging for triage decisions.
|
|
2
|
+
|
|
3
|
+
Provides append-only audit trail for all triage decisions to
|
|
4
|
+
support non-repudiation and compliance requirements.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import json
|
|
10
|
+
import os
|
|
11
|
+
from datetime import UTC, datetime
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import TYPE_CHECKING
|
|
14
|
+
|
|
15
|
+
if TYPE_CHECKING:
|
|
16
|
+
from collections.abc import Sequence
|
|
17
|
+
|
|
18
|
+
from .models import TriageDecision
|
|
19
|
+
|
|
20
|
+
__all__ = [
|
|
21
|
+
"TriageAuditLog",
|
|
22
|
+
"AuditEntry",
|
|
23
|
+
]
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class AuditEntry:
|
|
27
|
+
"""A single audit log entry.
|
|
28
|
+
|
|
29
|
+
Attributes:
|
|
30
|
+
timestamp: When the action occurred (ISO format).
|
|
31
|
+
action: The action performed (e.g., "mark_false_positive").
|
|
32
|
+
finding_id: ID of the affected finding.
|
|
33
|
+
user: User who performed the action.
|
|
34
|
+
details: Additional action details.
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
def __init__(
|
|
38
|
+
self,
|
|
39
|
+
action: str,
|
|
40
|
+
finding_id: str,
|
|
41
|
+
user: str = "",
|
|
42
|
+
details: dict[str, str] | None = None,
|
|
43
|
+
timestamp: str | None = None,
|
|
44
|
+
) -> None:
|
|
45
|
+
self.timestamp = timestamp or datetime.now(UTC).isoformat()
|
|
46
|
+
self.action = action
|
|
47
|
+
self.finding_id = finding_id
|
|
48
|
+
self.user = user or os.environ.get("USER", "unknown")
|
|
49
|
+
self.details = details or {}
|
|
50
|
+
|
|
51
|
+
def to_dict(self) -> dict[str, str | dict[str, str]]:
|
|
52
|
+
"""Convert to dictionary for JSON serialization."""
|
|
53
|
+
return {
|
|
54
|
+
"timestamp": self.timestamp,
|
|
55
|
+
"action": self.action,
|
|
56
|
+
"finding_id": self.finding_id,
|
|
57
|
+
"user": self.user,
|
|
58
|
+
"details": self.details,
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
def to_json(self) -> str:
|
|
62
|
+
"""Convert to JSON string."""
|
|
63
|
+
return json.dumps(self.to_dict(), separators=(",", ":"))
|
|
64
|
+
|
|
65
|
+
@classmethod
|
|
66
|
+
def from_dict(cls, data: dict[str, str | dict[str, str]]) -> AuditEntry:
|
|
67
|
+
"""Create from dictionary."""
|
|
68
|
+
details_raw = data.get("details", {})
|
|
69
|
+
details = dict(details_raw) if isinstance(details_raw, dict) else {}
|
|
70
|
+
return cls(
|
|
71
|
+
timestamp=str(data.get("timestamp", "")),
|
|
72
|
+
action=str(data.get("action", "")),
|
|
73
|
+
finding_id=str(data.get("finding_id", "")),
|
|
74
|
+
user=str(data.get("user", "")),
|
|
75
|
+
details={str(k): str(v) for k, v in details.items()},
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class TriageAuditLog:
|
|
80
|
+
"""Append-only audit log for triage decisions.
|
|
81
|
+
|
|
82
|
+
Stores entries in JSON Lines format (.jsonl) for easy parsing
|
|
83
|
+
and tamper evidence.
|
|
84
|
+
|
|
85
|
+
Attributes:
|
|
86
|
+
path: Path to the audit log file.
|
|
87
|
+
"""
|
|
88
|
+
|
|
89
|
+
DEFAULT_PATH = Path.home() / ".kekkai" / "triage-audit.jsonl"
|
|
90
|
+
|
|
91
|
+
def __init__(self, path: Path | None = None) -> None:
|
|
92
|
+
"""Initialize audit log.
|
|
93
|
+
|
|
94
|
+
Args:
|
|
95
|
+
path: Path to audit log file. Defaults to ~/.kekkai/triage-audit.jsonl.
|
|
96
|
+
"""
|
|
97
|
+
self.path = path or self.DEFAULT_PATH
|
|
98
|
+
self.path.parent.mkdir(parents=True, exist_ok=True)
|
|
99
|
+
|
|
100
|
+
def log(self, entry: AuditEntry) -> None:
|
|
101
|
+
"""Append an entry to the audit log.
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
entry: The audit entry to log.
|
|
105
|
+
"""
|
|
106
|
+
with self.path.open("a", encoding="utf-8") as f:
|
|
107
|
+
f.write(entry.to_json() + "\n")
|
|
108
|
+
|
|
109
|
+
def log_decision(self, decision: TriageDecision) -> None:
|
|
110
|
+
"""Log a triage decision.
|
|
111
|
+
|
|
112
|
+
Args:
|
|
113
|
+
decision: The triage decision to log.
|
|
114
|
+
"""
|
|
115
|
+
entry = AuditEntry(
|
|
116
|
+
action=f"triage_{decision.state.value}",
|
|
117
|
+
finding_id=decision.finding_id,
|
|
118
|
+
user=decision.user,
|
|
119
|
+
details={
|
|
120
|
+
"reason": decision.reason,
|
|
121
|
+
"ignore_pattern": decision.ignore_pattern or "",
|
|
122
|
+
},
|
|
123
|
+
)
|
|
124
|
+
self.log(entry)
|
|
125
|
+
|
|
126
|
+
def log_action(
|
|
127
|
+
self,
|
|
128
|
+
action: str,
|
|
129
|
+
finding_id: str,
|
|
130
|
+
details: dict[str, str] | None = None,
|
|
131
|
+
) -> None:
|
|
132
|
+
"""Log a generic action.
|
|
133
|
+
|
|
134
|
+
Args:
|
|
135
|
+
action: Action name.
|
|
136
|
+
finding_id: Affected finding ID.
|
|
137
|
+
details: Additional details.
|
|
138
|
+
"""
|
|
139
|
+
entry = AuditEntry(
|
|
140
|
+
action=action,
|
|
141
|
+
finding_id=finding_id,
|
|
142
|
+
details=details,
|
|
143
|
+
)
|
|
144
|
+
self.log(entry)
|
|
145
|
+
|
|
146
|
+
def read_all(self) -> list[AuditEntry]:
|
|
147
|
+
"""Read all entries from the log.
|
|
148
|
+
|
|
149
|
+
Returns:
|
|
150
|
+
List of all audit entries.
|
|
151
|
+
"""
|
|
152
|
+
entries: list[AuditEntry] = []
|
|
153
|
+
|
|
154
|
+
if not self.path.exists():
|
|
155
|
+
return entries
|
|
156
|
+
|
|
157
|
+
with self.path.open("r", encoding="utf-8") as f:
|
|
158
|
+
for line in f:
|
|
159
|
+
line = line.strip()
|
|
160
|
+
if not line:
|
|
161
|
+
continue
|
|
162
|
+
try:
|
|
163
|
+
data = json.loads(line)
|
|
164
|
+
entries.append(AuditEntry.from_dict(data))
|
|
165
|
+
except json.JSONDecodeError:
|
|
166
|
+
continue
|
|
167
|
+
|
|
168
|
+
return entries
|
|
169
|
+
|
|
170
|
+
def read_for_finding(self, finding_id: str) -> list[AuditEntry]:
|
|
171
|
+
"""Read entries for a specific finding.
|
|
172
|
+
|
|
173
|
+
Args:
|
|
174
|
+
finding_id: The finding ID to filter by.
|
|
175
|
+
|
|
176
|
+
Returns:
|
|
177
|
+
List of matching audit entries.
|
|
178
|
+
"""
|
|
179
|
+
return [e for e in self.read_all() if e.finding_id == finding_id]
|
|
180
|
+
|
|
181
|
+
def get_recent(self, count: int = 100) -> list[AuditEntry]:
|
|
182
|
+
"""Get most recent entries.
|
|
183
|
+
|
|
184
|
+
Args:
|
|
185
|
+
count: Maximum number of entries to return.
|
|
186
|
+
|
|
187
|
+
Returns:
|
|
188
|
+
List of recent audit entries (newest last).
|
|
189
|
+
"""
|
|
190
|
+
all_entries = self.read_all()
|
|
191
|
+
return all_entries[-count:] if len(all_entries) > count else all_entries
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
def log_decisions(decisions: Sequence[TriageDecision], log_path: Path | None = None) -> None:
|
|
195
|
+
"""Log multiple triage decisions.
|
|
196
|
+
|
|
197
|
+
Args:
|
|
198
|
+
decisions: Decisions to log.
|
|
199
|
+
log_path: Optional custom log path.
|
|
200
|
+
"""
|
|
201
|
+
audit_log = TriageAuditLog(log_path)
|
|
202
|
+
for decision in decisions:
|
|
203
|
+
audit_log.log_decision(decision)
|
kekkai/triage/ignore.py
ADDED
|
@@ -0,0 +1,269 @@
|
|
|
1
|
+
"""Ignore file management for triage decisions.
|
|
2
|
+
|
|
3
|
+
Provides validation and I/O for .kekkaiignore files with strict
|
|
4
|
+
security controls against injection attacks.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import re
|
|
10
|
+
from dataclasses import dataclass
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import TYPE_CHECKING
|
|
13
|
+
|
|
14
|
+
if TYPE_CHECKING:
|
|
15
|
+
from collections.abc import Sequence
|
|
16
|
+
|
|
17
|
+
__all__ = [
|
|
18
|
+
"IgnorePatternValidator",
|
|
19
|
+
"IgnoreFile",
|
|
20
|
+
"ValidationError",
|
|
21
|
+
]
|
|
22
|
+
|
|
23
|
+
VALID_PATTERN_CHARS = re.compile(r"^[a-zA-Z0-9_./*\-:]+$")
|
|
24
|
+
PATH_TRAVERSAL_PATTERN = re.compile(r"(^|/)\.\.(/|$)")
|
|
25
|
+
DANGEROUS_PATTERNS = [
|
|
26
|
+
"..",
|
|
27
|
+
"~",
|
|
28
|
+
"$",
|
|
29
|
+
"`",
|
|
30
|
+
";",
|
|
31
|
+
"&",
|
|
32
|
+
"|",
|
|
33
|
+
">",
|
|
34
|
+
"<",
|
|
35
|
+
"\\",
|
|
36
|
+
]
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class ValidationError(Exception):
|
|
40
|
+
"""Raised when pattern validation fails."""
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@dataclass
|
|
44
|
+
class IgnorePatternValidator:
|
|
45
|
+
"""Validates ignore patterns against security constraints.
|
|
46
|
+
|
|
47
|
+
Enforces:
|
|
48
|
+
- No path traversal (../)
|
|
49
|
+
- Allowlisted characters only
|
|
50
|
+
- Maximum pattern length
|
|
51
|
+
- No shell metacharacters
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
max_pattern_length: int = 500
|
|
55
|
+
|
|
56
|
+
def is_valid(self, pattern: str) -> bool:
|
|
57
|
+
"""Check if a pattern is valid.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
pattern: The ignore pattern to validate.
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
True if valid, False otherwise.
|
|
64
|
+
"""
|
|
65
|
+
if not pattern or not pattern.strip():
|
|
66
|
+
return False
|
|
67
|
+
|
|
68
|
+
if len(pattern) > self.max_pattern_length:
|
|
69
|
+
return False
|
|
70
|
+
|
|
71
|
+
if PATH_TRAVERSAL_PATTERN.search(pattern):
|
|
72
|
+
return False
|
|
73
|
+
|
|
74
|
+
for dangerous in DANGEROUS_PATTERNS:
|
|
75
|
+
if dangerous in pattern:
|
|
76
|
+
return False
|
|
77
|
+
|
|
78
|
+
return bool(VALID_PATTERN_CHARS.match(pattern))
|
|
79
|
+
|
|
80
|
+
def validate(self, pattern: str) -> str:
|
|
81
|
+
"""Validate and return pattern or raise error.
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
pattern: The ignore pattern to validate.
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
The validated pattern (stripped).
|
|
88
|
+
|
|
89
|
+
Raises:
|
|
90
|
+
ValidationError: If pattern is invalid.
|
|
91
|
+
"""
|
|
92
|
+
pattern = pattern.strip()
|
|
93
|
+
|
|
94
|
+
if not pattern:
|
|
95
|
+
raise ValidationError("Empty pattern")
|
|
96
|
+
|
|
97
|
+
if len(pattern) > self.max_pattern_length:
|
|
98
|
+
raise ValidationError(f"Pattern exceeds max length ({self.max_pattern_length})")
|
|
99
|
+
|
|
100
|
+
if PATH_TRAVERSAL_PATTERN.search(pattern):
|
|
101
|
+
raise ValidationError("Path traversal not allowed")
|
|
102
|
+
|
|
103
|
+
for dangerous in DANGEROUS_PATTERNS:
|
|
104
|
+
if dangerous in pattern:
|
|
105
|
+
raise ValidationError(f"Dangerous character not allowed: {dangerous!r}")
|
|
106
|
+
|
|
107
|
+
if not VALID_PATTERN_CHARS.match(pattern):
|
|
108
|
+
raise ValidationError("Pattern contains invalid characters")
|
|
109
|
+
|
|
110
|
+
return pattern
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
@dataclass
|
|
114
|
+
class IgnoreEntry:
|
|
115
|
+
"""An entry in the ignore file.
|
|
116
|
+
|
|
117
|
+
Attributes:
|
|
118
|
+
pattern: The ignore pattern.
|
|
119
|
+
comment: Optional comment/reason.
|
|
120
|
+
finding_id: Associated finding ID if applicable.
|
|
121
|
+
"""
|
|
122
|
+
|
|
123
|
+
pattern: str
|
|
124
|
+
comment: str = ""
|
|
125
|
+
finding_id: str = ""
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
class IgnoreFile:
|
|
129
|
+
"""Manages .kekkaiignore file read/write operations.
|
|
130
|
+
|
|
131
|
+
Format:
|
|
132
|
+
# Comment line
|
|
133
|
+
scanner:rule_id:file_path # inline comment
|
|
134
|
+
|
|
135
|
+
Attributes:
|
|
136
|
+
path: Path to the ignore file.
|
|
137
|
+
entries: List of ignore entries.
|
|
138
|
+
"""
|
|
139
|
+
|
|
140
|
+
def __init__(self, path: Path | None = None) -> None:
|
|
141
|
+
"""Initialize ignore file manager.
|
|
142
|
+
|
|
143
|
+
Args:
|
|
144
|
+
path: Path to ignore file. Defaults to .kekkaiignore in cwd.
|
|
145
|
+
"""
|
|
146
|
+
self.path = path or Path(".kekkaiignore")
|
|
147
|
+
self.entries: list[IgnoreEntry] = []
|
|
148
|
+
self._validator = IgnorePatternValidator()
|
|
149
|
+
|
|
150
|
+
def load(self) -> list[IgnoreEntry]:
|
|
151
|
+
"""Load entries from file.
|
|
152
|
+
|
|
153
|
+
Returns:
|
|
154
|
+
List of ignore entries.
|
|
155
|
+
"""
|
|
156
|
+
self.entries = []
|
|
157
|
+
|
|
158
|
+
if not self.path.exists():
|
|
159
|
+
return self.entries
|
|
160
|
+
|
|
161
|
+
content = self.path.read_text(encoding="utf-8")
|
|
162
|
+
for line in content.splitlines():
|
|
163
|
+
line = line.strip()
|
|
164
|
+
|
|
165
|
+
if not line or line.startswith("#"):
|
|
166
|
+
continue
|
|
167
|
+
|
|
168
|
+
comment = ""
|
|
169
|
+
if " # " in line:
|
|
170
|
+
line, comment = line.split(" # ", 1)
|
|
171
|
+
line = line.strip()
|
|
172
|
+
comment = comment.strip()
|
|
173
|
+
|
|
174
|
+
if self._validator.is_valid(line):
|
|
175
|
+
self.entries.append(IgnoreEntry(pattern=line, comment=comment))
|
|
176
|
+
|
|
177
|
+
return self.entries
|
|
178
|
+
|
|
179
|
+
def save(self, entries: Sequence[IgnoreEntry] | None = None) -> None:
|
|
180
|
+
"""Save entries to file.
|
|
181
|
+
|
|
182
|
+
Args:
|
|
183
|
+
entries: Entries to save. Uses self.entries if None.
|
|
184
|
+
"""
|
|
185
|
+
if entries is not None:
|
|
186
|
+
self.entries = list(entries)
|
|
187
|
+
|
|
188
|
+
lines = [
|
|
189
|
+
"# Kekkai Ignore File",
|
|
190
|
+
"# Generated by kekkai triage",
|
|
191
|
+
"# Format: scanner:rule_id:file_path",
|
|
192
|
+
"",
|
|
193
|
+
]
|
|
194
|
+
|
|
195
|
+
for entry in self.entries:
|
|
196
|
+
pattern = self._validator.validate(entry.pattern)
|
|
197
|
+
if entry.comment:
|
|
198
|
+
safe_comment = entry.comment.replace("\n", " ").replace("#", "")[:100]
|
|
199
|
+
lines.append(f"{pattern} # {safe_comment}")
|
|
200
|
+
else:
|
|
201
|
+
lines.append(pattern)
|
|
202
|
+
|
|
203
|
+
self.path.write_text("\n".join(lines) + "\n", encoding="utf-8")
|
|
204
|
+
|
|
205
|
+
def add_entry(self, pattern: str, comment: str = "", finding_id: str = "") -> None:
|
|
206
|
+
"""Add a validated entry.
|
|
207
|
+
|
|
208
|
+
Args:
|
|
209
|
+
pattern: Ignore pattern to add.
|
|
210
|
+
comment: Optional comment.
|
|
211
|
+
finding_id: Associated finding ID.
|
|
212
|
+
|
|
213
|
+
Raises:
|
|
214
|
+
ValidationError: If pattern is invalid.
|
|
215
|
+
"""
|
|
216
|
+
validated = self._validator.validate(pattern)
|
|
217
|
+
self.entries.append(IgnoreEntry(pattern=validated, comment=comment, finding_id=finding_id))
|
|
218
|
+
|
|
219
|
+
def has_pattern(self, pattern: str) -> bool:
|
|
220
|
+
"""Check if pattern already exists.
|
|
221
|
+
|
|
222
|
+
Args:
|
|
223
|
+
pattern: Pattern to check.
|
|
224
|
+
|
|
225
|
+
Returns:
|
|
226
|
+
True if pattern exists.
|
|
227
|
+
"""
|
|
228
|
+
return any(e.pattern == pattern for e in self.entries)
|
|
229
|
+
|
|
230
|
+
def matches(self, scanner: str, rule_id: str, file_path: str) -> bool:
|
|
231
|
+
"""Check if a finding matches any ignore pattern.
|
|
232
|
+
|
|
233
|
+
Args:
|
|
234
|
+
scanner: Scanner name.
|
|
235
|
+
rule_id: Rule identifier.
|
|
236
|
+
file_path: File path.
|
|
237
|
+
|
|
238
|
+
Returns:
|
|
239
|
+
True if finding should be ignored.
|
|
240
|
+
"""
|
|
241
|
+
full_pattern = f"{scanner}:{rule_id}:{file_path}"
|
|
242
|
+
scanner_rule = f"{scanner}:{rule_id}"
|
|
243
|
+
scanner_only = scanner
|
|
244
|
+
|
|
245
|
+
for entry in self.entries:
|
|
246
|
+
pattern = entry.pattern
|
|
247
|
+
if pattern == full_pattern:
|
|
248
|
+
return True
|
|
249
|
+
if pattern == scanner_rule:
|
|
250
|
+
return True
|
|
251
|
+
if pattern == scanner_only:
|
|
252
|
+
return True
|
|
253
|
+
if "*" in pattern and self._glob_match(pattern, full_pattern):
|
|
254
|
+
return True
|
|
255
|
+
|
|
256
|
+
return False
|
|
257
|
+
|
|
258
|
+
def _glob_match(self, pattern: str, target: str) -> bool:
|
|
259
|
+
"""Simple glob matching with * wildcard.
|
|
260
|
+
|
|
261
|
+
Args:
|
|
262
|
+
pattern: Pattern with optional * wildcards.
|
|
263
|
+
target: String to match against.
|
|
264
|
+
|
|
265
|
+
Returns:
|
|
266
|
+
True if pattern matches target.
|
|
267
|
+
"""
|
|
268
|
+
regex_pattern = re.escape(pattern).replace(r"\*", ".*")
|
|
269
|
+
return bool(re.fullmatch(regex_pattern, target))
|