kekkai-cli 1.0.5__py3-none-any.whl → 1.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kekkai/cli.py +789 -19
- kekkai/compliance/__init__.py +68 -0
- kekkai/compliance/hipaa.py +235 -0
- kekkai/compliance/mappings.py +136 -0
- kekkai/compliance/owasp.py +517 -0
- kekkai/compliance/owasp_agentic.py +267 -0
- kekkai/compliance/pci_dss.py +205 -0
- kekkai/compliance/soc2.py +209 -0
- kekkai/dojo.py +91 -14
- kekkai/dojo_import.py +9 -1
- kekkai/fix/__init__.py +47 -0
- kekkai/fix/audit.py +278 -0
- kekkai/fix/differ.py +427 -0
- kekkai/fix/engine.py +500 -0
- kekkai/fix/prompts.py +251 -0
- kekkai/output.py +10 -12
- kekkai/report/__init__.py +41 -0
- kekkai/report/compliance_matrix.py +98 -0
- kekkai/report/generator.py +365 -0
- kekkai/report/html.py +69 -0
- kekkai/report/pdf.py +63 -0
- kekkai/report/unified.py +226 -0
- kekkai/scanners/container.py +33 -3
- kekkai/scanners/gitleaks.py +3 -1
- kekkai/scanners/semgrep.py +1 -1
- kekkai/scanners/trivy.py +1 -1
- kekkai/threatflow/model_adapter.py +143 -1
- kekkai/triage/__init__.py +54 -1
- kekkai/triage/loader.py +196 -0
- kekkai_cli-1.1.1.dist-info/METADATA +379 -0
- {kekkai_cli-1.0.5.dist-info → kekkai_cli-1.1.1.dist-info}/RECORD +34 -33
- {kekkai_cli-1.0.5.dist-info → kekkai_cli-1.1.1.dist-info}/entry_points.txt +0 -1
- {kekkai_cli-1.0.5.dist-info → kekkai_cli-1.1.1.dist-info}/top_level.txt +0 -1
- kekkai_cli-1.0.5.dist-info/METADATA +0 -135
- portal/__init__.py +0 -19
- portal/api.py +0 -155
- portal/auth.py +0 -103
- portal/enterprise/__init__.py +0 -32
- portal/enterprise/audit.py +0 -435
- portal/enterprise/licensing.py +0 -342
- portal/enterprise/rbac.py +0 -276
- portal/enterprise/saml.py +0 -595
- portal/ops/__init__.py +0 -53
- portal/ops/backup.py +0 -553
- portal/ops/log_shipper.py +0 -469
- portal/ops/monitoring.py +0 -517
- portal/ops/restore.py +0 -469
- portal/ops/secrets.py +0 -408
- portal/ops/upgrade.py +0 -591
- portal/tenants.py +0 -340
- portal/uploads.py +0 -259
- portal/web.py +0 -384
- {kekkai_cli-1.0.5.dist-info → kekkai_cli-1.1.1.dist-info}/WHEEL +0 -0
kekkai/dojo.py
CHANGED
|
@@ -49,7 +49,10 @@ def compose_command() -> list[str]:
|
|
|
49
49
|
docker_compose = shutil.which("docker-compose")
|
|
50
50
|
if docker_compose:
|
|
51
51
|
return [docker_compose]
|
|
52
|
-
raise RuntimeError(
|
|
52
|
+
raise RuntimeError(
|
|
53
|
+
"Docker Compose not found. Please install Docker Desktop "
|
|
54
|
+
"or the 'docker-compose-plugin' package for your system."
|
|
55
|
+
)
|
|
53
56
|
|
|
54
57
|
|
|
55
58
|
def check_port_available(port: int, host: str = "127.0.0.1") -> bool:
|
|
@@ -62,6 +65,21 @@ def check_port_available(port: int, host: str = "127.0.0.1") -> bool:
|
|
|
62
65
|
return True
|
|
63
66
|
|
|
64
67
|
|
|
68
|
+
def find_available_port(preferred: int, max_attempts: int = 20) -> tuple[int, bool]:
|
|
69
|
+
"""Find an available port, starting from preferred.
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
Tuple of (port, was_fallback) - was_fallback is True if not the preferred port
|
|
73
|
+
"""
|
|
74
|
+
if check_port_available(preferred):
|
|
75
|
+
return preferred, False
|
|
76
|
+
for offset in range(1, max_attempts + 1):
|
|
77
|
+
candidate = preferred + offset
|
|
78
|
+
if check_port_available(candidate):
|
|
79
|
+
return candidate, True
|
|
80
|
+
raise RuntimeError(f"No available ports found in range {preferred}-{preferred + max_attempts}")
|
|
81
|
+
|
|
82
|
+
|
|
65
83
|
def load_env_file(path: Path) -> dict[str, str]:
|
|
66
84
|
if not path.exists():
|
|
67
85
|
return {}
|
|
@@ -115,7 +133,6 @@ def ensure_env(path: Path, port: int, tls_port: int) -> dict[str, str]:
|
|
|
115
133
|
|
|
116
134
|
def build_compose_yaml() -> str:
|
|
117
135
|
return (
|
|
118
|
-
'version: "3.9"\n'
|
|
119
136
|
"services:\n"
|
|
120
137
|
" nginx:\n"
|
|
121
138
|
" image: defectdojo/defectdojo-nginx:${NGINX_VERSION:-latest}\n"
|
|
@@ -278,15 +295,24 @@ def compose_up(
|
|
|
278
295
|
tls_port: int,
|
|
279
296
|
wait: bool,
|
|
280
297
|
open_browser: bool,
|
|
281
|
-
) -> dict[str, str]:
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
298
|
+
) -> tuple[dict[str, str], int, int]:
|
|
299
|
+
"""Start DefectDojo stack.
|
|
300
|
+
|
|
301
|
+
Returns:
|
|
302
|
+
Tuple of (env_dict, actual_port, actual_tls_port)
|
|
303
|
+
"""
|
|
304
|
+
# Auto-select available ports
|
|
305
|
+
actual_port, port_fallback = find_available_port(port)
|
|
306
|
+
actual_tls_port, tls_fallback = find_available_port(tls_port)
|
|
286
307
|
|
|
287
308
|
compose_file = compose_root / "docker-compose.yml"
|
|
288
309
|
env_file = compose_root / ".env"
|
|
289
|
-
env = ensure_compose_files(compose_file, env_file,
|
|
310
|
+
env = ensure_compose_files(compose_file, env_file, actual_port, actual_tls_port)
|
|
311
|
+
|
|
312
|
+
# Store port info for later retrieval
|
|
313
|
+
env["DD_PORT"] = str(actual_port)
|
|
314
|
+
env["DD_TLS_PORT"] = str(actual_tls_port)
|
|
315
|
+
write_env_file(env_file, env)
|
|
290
316
|
|
|
291
317
|
cmd = compose_command() + [
|
|
292
318
|
"--project-name",
|
|
@@ -301,11 +327,24 @@ def compose_up(
|
|
|
301
327
|
raise RuntimeError(proc.stderr.strip() or "Failed to start DefectDojo")
|
|
302
328
|
|
|
303
329
|
if wait:
|
|
304
|
-
wait_for_ui(
|
|
330
|
+
wait_for_ui(actual_port, timeout=300)
|
|
331
|
+
# Generate API key for kekkai upload command
|
|
332
|
+
try:
|
|
333
|
+
api_key = generate_api_key(
|
|
334
|
+
actual_port,
|
|
335
|
+
env.get("DD_ADMIN_USER", "admin"),
|
|
336
|
+
env.get("DD_ADMIN_PASSWORD", ""),
|
|
337
|
+
)
|
|
338
|
+
env["DD_API_KEY"] = api_key
|
|
339
|
+
write_env_file(env_file, env)
|
|
340
|
+
except RuntimeError:
|
|
341
|
+
# Non-fatal - user can generate API key manually via UI
|
|
342
|
+
pass
|
|
305
343
|
|
|
306
344
|
if open_browser:
|
|
307
|
-
open_ui(
|
|
308
|
-
|
|
345
|
+
open_ui(actual_port)
|
|
346
|
+
|
|
347
|
+
return env, actual_port, actual_tls_port
|
|
309
348
|
|
|
310
349
|
|
|
311
350
|
def compose_down(*, compose_root: Path, project_name: str) -> None:
|
|
@@ -318,7 +357,11 @@ def compose_down(*, compose_root: Path, project_name: str) -> None:
|
|
|
318
357
|
"--profile",
|
|
319
358
|
DOJO_PROFILE,
|
|
320
359
|
]
|
|
321
|
-
proc = subprocess.run(
|
|
360
|
+
proc = subprocess.run( # noqa: S603 # nosec B603
|
|
361
|
+
cmd + ["down", "--remove-orphans", "--volumes"],
|
|
362
|
+
capture_output=True,
|
|
363
|
+
text=True,
|
|
364
|
+
)
|
|
322
365
|
if proc.returncode != 0:
|
|
323
366
|
raise RuntimeError(proc.stderr.strip() or "Failed to stop DefectDojo")
|
|
324
367
|
|
|
@@ -382,9 +425,10 @@ def wait_for_ui(port: int, timeout: int = 300) -> None:
|
|
|
382
425
|
if resp.status in {200, 302, 401}:
|
|
383
426
|
return
|
|
384
427
|
last_error = f"HTTP {resp.status}"
|
|
385
|
-
except (URLError, HTTPError) as exc:
|
|
428
|
+
except (URLError, HTTPError, OSError, ConnectionError) as exc:
|
|
429
|
+
# OSError/ConnectionError covers ConnectionResetError, BrokenPipeError, etc.
|
|
386
430
|
last_error = str(exc)
|
|
387
|
-
|
|
431
|
+
time.sleep(2)
|
|
388
432
|
raise RuntimeError(f"DefectDojo UI did not become ready in time ({last_error})")
|
|
389
433
|
|
|
390
434
|
|
|
@@ -395,6 +439,39 @@ def open_ui(port: int) -> None:
|
|
|
395
439
|
webbrowser.open(url)
|
|
396
440
|
|
|
397
441
|
|
|
442
|
+
def generate_api_key(port: int, username: str, password: str, timeout: int = 30) -> str:
|
|
443
|
+
"""Generate DefectDojo API key using admin credentials.
|
|
444
|
+
|
|
445
|
+
Uses the /api/v2/api-token-auth/ endpoint to get a token.
|
|
446
|
+
|
|
447
|
+
Args:
|
|
448
|
+
port: DefectDojo port
|
|
449
|
+
username: Admin username
|
|
450
|
+
password: Admin password
|
|
451
|
+
timeout: Request timeout in seconds
|
|
452
|
+
|
|
453
|
+
Returns:
|
|
454
|
+
API token string
|
|
455
|
+
|
|
456
|
+
Raises:
|
|
457
|
+
RuntimeError: If token generation fails
|
|
458
|
+
"""
|
|
459
|
+
url = f"http://localhost:{port}/api/v2/api-token-auth/"
|
|
460
|
+
data = json.dumps({"username": username, "password": password}).encode()
|
|
461
|
+
headers = {"Content-Type": "application/json"}
|
|
462
|
+
|
|
463
|
+
req = Request(url, data=data, headers=headers, method="POST") # noqa: S310 # nosec B310
|
|
464
|
+
try:
|
|
465
|
+
with urlopen(req, timeout=timeout) as resp: # noqa: S310 # nosec B310
|
|
466
|
+
result: dict[str, str] = json.loads(resp.read().decode())
|
|
467
|
+
token = result.get("token", "")
|
|
468
|
+
if not token:
|
|
469
|
+
raise RuntimeError("Empty token returned from DefectDojo")
|
|
470
|
+
return token
|
|
471
|
+
except (URLError, HTTPError, OSError) as exc:
|
|
472
|
+
raise RuntimeError(f"Failed to generate API key: {exc}") from exc
|
|
473
|
+
|
|
474
|
+
|
|
398
475
|
def _random_string(length: int) -> str:
|
|
399
476
|
alphabet = string.ascii_letters + string.digits
|
|
400
477
|
return "".join(secrets.choice(alphabet) for _ in range(length))
|
kekkai/dojo_import.py
CHANGED
|
@@ -61,7 +61,15 @@ class DojoClient:
|
|
|
61
61
|
|
|
62
62
|
try:
|
|
63
63
|
with urlopen(req, timeout=self._timeout) as resp: # noqa: S310 # nosec B310
|
|
64
|
-
|
|
64
|
+
raw_bytes = resp.read() # Call once and store result
|
|
65
|
+
if not raw_bytes: # Check bytes, not method
|
|
66
|
+
return {}
|
|
67
|
+
try:
|
|
68
|
+
result: dict[str, Any] = json.loads(raw_bytes.decode())
|
|
69
|
+
return result
|
|
70
|
+
except json.JSONDecodeError:
|
|
71
|
+
# Empty or invalid JSON response - return empty dict
|
|
72
|
+
return {}
|
|
65
73
|
except HTTPError as exc:
|
|
66
74
|
error_body = exc.read().decode() if exc.fp else str(exc)
|
|
67
75
|
raise RuntimeError(f"Dojo API error {exc.code}: {error_body}") from exc
|
kekkai/fix/__init__.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
"""AI-powered code remediation engine.
|
|
2
|
+
|
|
3
|
+
Provides `kekkai fix` functionality to generate and apply code fixes
|
|
4
|
+
for security findings using LLM-based suggestions.
|
|
5
|
+
|
|
6
|
+
Security considerations:
|
|
7
|
+
- All inputs sanitized before LLM processing (reuses TieredSanitizer)
|
|
8
|
+
- Preview mode default (no auto-apply without explicit --apply)
|
|
9
|
+
- Audit logging for all operations (ASVS V8.3.1)
|
|
10
|
+
- Supports local LLM for sensitive codebases
|
|
11
|
+
|
|
12
|
+
ASVS Requirements:
|
|
13
|
+
- V5.2.5: Sanitize before LLM
|
|
14
|
+
- V5.3.3: Diff format preserves code intent
|
|
15
|
+
- V6.4.1: API keys in env vars only
|
|
16
|
+
- V8.3.1: Audit log for fix applications
|
|
17
|
+
- V13.1.1: HTTPS for remote API calls
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
from __future__ import annotations
|
|
21
|
+
|
|
22
|
+
from .audit import FixAttempt, FixAuditLog, create_session_id
|
|
23
|
+
from .differ import ApplyResult, DiffApplier, DiffHunk, DiffParser, ParsedDiff, generate_diff
|
|
24
|
+
from .engine import FixConfig, FixEngine, FixResult, FixSuggestion, create_fix_engine
|
|
25
|
+
from .prompts import FixPromptBuilder
|
|
26
|
+
|
|
27
|
+
__all__ = [
|
|
28
|
+
# Engine
|
|
29
|
+
"FixEngine",
|
|
30
|
+
"FixConfig",
|
|
31
|
+
"FixResult",
|
|
32
|
+
"FixSuggestion",
|
|
33
|
+
"create_fix_engine",
|
|
34
|
+
# Prompts
|
|
35
|
+
"FixPromptBuilder",
|
|
36
|
+
# Differ
|
|
37
|
+
"DiffParser",
|
|
38
|
+
"DiffApplier",
|
|
39
|
+
"DiffHunk",
|
|
40
|
+
"ParsedDiff",
|
|
41
|
+
"ApplyResult",
|
|
42
|
+
"generate_diff",
|
|
43
|
+
# Audit
|
|
44
|
+
"FixAuditLog",
|
|
45
|
+
"FixAttempt",
|
|
46
|
+
"create_session_id",
|
|
47
|
+
]
|
kekkai/fix/audit.py
ADDED
|
@@ -0,0 +1,278 @@
|
|
|
1
|
+
"""Audit logging for fix applications.
|
|
2
|
+
|
|
3
|
+
Records all fix attempts, approvals, and applications with timestamps
|
|
4
|
+
for compliance and forensics purposes.
|
|
5
|
+
|
|
6
|
+
ASVS V8.3.1: Sensitive data not logged inappropriately.
|
|
7
|
+
ASVS V16.3.3: Log security-relevant events.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
import json
|
|
13
|
+
import logging
|
|
14
|
+
from dataclasses import asdict, dataclass, field
|
|
15
|
+
from datetime import UTC, datetime
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
from typing import Any
|
|
18
|
+
|
|
19
|
+
logger = logging.getLogger(__name__)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class FixAttempt:
|
|
24
|
+
"""Record of a single fix attempt."""
|
|
25
|
+
|
|
26
|
+
finding_id: str
|
|
27
|
+
rule_id: str
|
|
28
|
+
file_path: str
|
|
29
|
+
line_number: int
|
|
30
|
+
severity: str
|
|
31
|
+
timestamp: str = field(default_factory=lambda: datetime.now(UTC).isoformat())
|
|
32
|
+
model_used: str = ""
|
|
33
|
+
status: str = "pending" # pending, approved, applied, rejected, failed
|
|
34
|
+
error: str | None = None
|
|
35
|
+
diff_preview: str | None = None
|
|
36
|
+
lines_added: int = 0
|
|
37
|
+
lines_removed: int = 0
|
|
38
|
+
backup_path: str | None = None
|
|
39
|
+
|
|
40
|
+
def to_dict(self) -> dict[str, Any]:
|
|
41
|
+
"""Convert to dictionary for JSON serialization."""
|
|
42
|
+
return asdict(self)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
@dataclass
|
|
46
|
+
class FixAuditLog:
|
|
47
|
+
"""Audit log for fix operations.
|
|
48
|
+
|
|
49
|
+
Maintains an append-only log of all fix attempts for a session.
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
session_id: str
|
|
53
|
+
repo_path: str
|
|
54
|
+
started_at: str = field(default_factory=lambda: datetime.now(UTC).isoformat())
|
|
55
|
+
model_mode: str = "local"
|
|
56
|
+
attempts: list[FixAttempt] = field(default_factory=list)
|
|
57
|
+
_output_path: Path | None = field(default=None, repr=False)
|
|
58
|
+
|
|
59
|
+
def record_attempt(
|
|
60
|
+
self,
|
|
61
|
+
finding_id: str,
|
|
62
|
+
rule_id: str,
|
|
63
|
+
file_path: str,
|
|
64
|
+
line_number: int,
|
|
65
|
+
severity: str,
|
|
66
|
+
model_used: str = "",
|
|
67
|
+
) -> FixAttempt:
|
|
68
|
+
"""Record a new fix attempt.
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
finding_id: Unique identifier for the finding
|
|
72
|
+
rule_id: Scanner rule that triggered the finding
|
|
73
|
+
file_path: Path to the affected file
|
|
74
|
+
line_number: Line number of the finding
|
|
75
|
+
severity: Finding severity level
|
|
76
|
+
model_used: LLM model used for fix generation
|
|
77
|
+
|
|
78
|
+
Returns:
|
|
79
|
+
The created FixAttempt record
|
|
80
|
+
"""
|
|
81
|
+
attempt = FixAttempt(
|
|
82
|
+
finding_id=finding_id,
|
|
83
|
+
rule_id=rule_id,
|
|
84
|
+
file_path=file_path,
|
|
85
|
+
line_number=line_number,
|
|
86
|
+
severity=severity,
|
|
87
|
+
model_used=model_used,
|
|
88
|
+
)
|
|
89
|
+
self.attempts.append(attempt)
|
|
90
|
+
self._auto_save()
|
|
91
|
+
|
|
92
|
+
logger.info(
|
|
93
|
+
"fix_attempt_recorded",
|
|
94
|
+
extra={
|
|
95
|
+
"finding_id": finding_id,
|
|
96
|
+
"rule_id": rule_id,
|
|
97
|
+
"file_path": file_path,
|
|
98
|
+
"line_number": line_number,
|
|
99
|
+
},
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
return attempt
|
|
103
|
+
|
|
104
|
+
def update_attempt(
|
|
105
|
+
self,
|
|
106
|
+
attempt: FixAttempt,
|
|
107
|
+
*,
|
|
108
|
+
status: str | None = None,
|
|
109
|
+
error: str | None = None,
|
|
110
|
+
diff_preview: str | None = None,
|
|
111
|
+
lines_added: int | None = None,
|
|
112
|
+
lines_removed: int | None = None,
|
|
113
|
+
backup_path: str | None = None,
|
|
114
|
+
) -> None:
|
|
115
|
+
"""Update an existing attempt record.
|
|
116
|
+
|
|
117
|
+
Args:
|
|
118
|
+
attempt: The attempt to update
|
|
119
|
+
status: New status (approved, applied, rejected, failed)
|
|
120
|
+
error: Error message if failed
|
|
121
|
+
diff_preview: Preview of the diff (truncated for security)
|
|
122
|
+
lines_added: Number of lines added
|
|
123
|
+
lines_removed: Number of lines removed
|
|
124
|
+
backup_path: Path to backup file if created
|
|
125
|
+
"""
|
|
126
|
+
if status is not None:
|
|
127
|
+
attempt.status = status
|
|
128
|
+
if error is not None:
|
|
129
|
+
attempt.error = error
|
|
130
|
+
if diff_preview is not None:
|
|
131
|
+
# Truncate diff preview to avoid logging sensitive code
|
|
132
|
+
attempt.diff_preview = diff_preview[:500] if len(diff_preview) > 500 else diff_preview
|
|
133
|
+
if lines_added is not None:
|
|
134
|
+
attempt.lines_added = lines_added
|
|
135
|
+
if lines_removed is not None:
|
|
136
|
+
attempt.lines_removed = lines_removed
|
|
137
|
+
if backup_path is not None:
|
|
138
|
+
attempt.backup_path = backup_path
|
|
139
|
+
|
|
140
|
+
self._auto_save()
|
|
141
|
+
|
|
142
|
+
logger.info(
|
|
143
|
+
"fix_attempt_updated",
|
|
144
|
+
extra={
|
|
145
|
+
"finding_id": attempt.finding_id,
|
|
146
|
+
"status": attempt.status,
|
|
147
|
+
"lines_changed": (attempt.lines_added + attempt.lines_removed),
|
|
148
|
+
},
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
def mark_applied(
|
|
152
|
+
self,
|
|
153
|
+
attempt: FixAttempt,
|
|
154
|
+
lines_added: int,
|
|
155
|
+
lines_removed: int,
|
|
156
|
+
backup_path: str | None = None,
|
|
157
|
+
) -> None:
|
|
158
|
+
"""Mark an attempt as successfully applied."""
|
|
159
|
+
self.update_attempt(
|
|
160
|
+
attempt,
|
|
161
|
+
status="applied",
|
|
162
|
+
lines_added=lines_added,
|
|
163
|
+
lines_removed=lines_removed,
|
|
164
|
+
backup_path=backup_path,
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
def mark_failed(self, attempt: FixAttempt, error: str) -> None:
|
|
168
|
+
"""Mark an attempt as failed."""
|
|
169
|
+
self.update_attempt(attempt, status="failed", error=error)
|
|
170
|
+
|
|
171
|
+
def mark_rejected(self, attempt: FixAttempt, reason: str = "") -> None:
|
|
172
|
+
"""Mark an attempt as rejected by user."""
|
|
173
|
+
self.update_attempt(attempt, status="rejected", error=reason or "User rejected")
|
|
174
|
+
|
|
175
|
+
@property
|
|
176
|
+
def summary(self) -> dict[str, int]:
|
|
177
|
+
"""Get summary counts by status."""
|
|
178
|
+
counts: dict[str, int] = {
|
|
179
|
+
"total": len(self.attempts),
|
|
180
|
+
"pending": 0,
|
|
181
|
+
"approved": 0,
|
|
182
|
+
"applied": 0,
|
|
183
|
+
"rejected": 0,
|
|
184
|
+
"failed": 0,
|
|
185
|
+
}
|
|
186
|
+
for attempt in self.attempts:
|
|
187
|
+
if attempt.status in counts:
|
|
188
|
+
counts[attempt.status] += 1
|
|
189
|
+
return counts
|
|
190
|
+
|
|
191
|
+
def set_output_path(self, path: Path) -> None:
|
|
192
|
+
"""Set the output path for auto-saving."""
|
|
193
|
+
self._output_path = path
|
|
194
|
+
self._auto_save()
|
|
195
|
+
|
|
196
|
+
def _auto_save(self) -> None:
|
|
197
|
+
"""Auto-save if output path is set."""
|
|
198
|
+
if self._output_path:
|
|
199
|
+
self.save(self._output_path)
|
|
200
|
+
|
|
201
|
+
def save(self, path: Path) -> None:
|
|
202
|
+
"""Save audit log to JSON file.
|
|
203
|
+
|
|
204
|
+
Args:
|
|
205
|
+
path: Output path for the JSON file
|
|
206
|
+
"""
|
|
207
|
+
data = {
|
|
208
|
+
"session_id": self.session_id,
|
|
209
|
+
"repo_path": self.repo_path,
|
|
210
|
+
"started_at": self.started_at,
|
|
211
|
+
"model_mode": self.model_mode,
|
|
212
|
+
"summary": self.summary,
|
|
213
|
+
"attempts": [a.to_dict() for a in self.attempts],
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
217
|
+
path.write_text(json.dumps(data, indent=2))
|
|
218
|
+
|
|
219
|
+
logger.debug("audit_log_saved", extra={"path": str(path)})
|
|
220
|
+
|
|
221
|
+
@classmethod
|
|
222
|
+
def load(cls, path: Path) -> FixAuditLog:
|
|
223
|
+
"""Load audit log from JSON file.
|
|
224
|
+
|
|
225
|
+
Args:
|
|
226
|
+
path: Path to the JSON file
|
|
227
|
+
|
|
228
|
+
Returns:
|
|
229
|
+
Loaded FixAuditLog instance
|
|
230
|
+
"""
|
|
231
|
+
data = json.loads(path.read_text())
|
|
232
|
+
|
|
233
|
+
log = cls(
|
|
234
|
+
session_id=data["session_id"],
|
|
235
|
+
repo_path=data["repo_path"],
|
|
236
|
+
started_at=data.get("started_at", ""),
|
|
237
|
+
model_mode=data.get("model_mode", "local"),
|
|
238
|
+
)
|
|
239
|
+
|
|
240
|
+
for attempt_data in data.get("attempts", []):
|
|
241
|
+
attempt = FixAttempt(
|
|
242
|
+
finding_id=attempt_data["finding_id"],
|
|
243
|
+
rule_id=attempt_data["rule_id"],
|
|
244
|
+
file_path=attempt_data["file_path"],
|
|
245
|
+
line_number=attempt_data["line_number"],
|
|
246
|
+
severity=attempt_data["severity"],
|
|
247
|
+
timestamp=attempt_data.get("timestamp", ""),
|
|
248
|
+
model_used=attempt_data.get("model_used", ""),
|
|
249
|
+
status=attempt_data.get("status", "pending"),
|
|
250
|
+
error=attempt_data.get("error"),
|
|
251
|
+
diff_preview=attempt_data.get("diff_preview"),
|
|
252
|
+
lines_added=attempt_data.get("lines_added", 0),
|
|
253
|
+
lines_removed=attempt_data.get("lines_removed", 0),
|
|
254
|
+
backup_path=attempt_data.get("backup_path"),
|
|
255
|
+
)
|
|
256
|
+
log.attempts.append(attempt)
|
|
257
|
+
|
|
258
|
+
return log
|
|
259
|
+
|
|
260
|
+
def to_dict(self) -> dict[str, Any]:
|
|
261
|
+
"""Convert to dictionary."""
|
|
262
|
+
return {
|
|
263
|
+
"session_id": self.session_id,
|
|
264
|
+
"repo_path": self.repo_path,
|
|
265
|
+
"started_at": self.started_at,
|
|
266
|
+
"model_mode": self.model_mode,
|
|
267
|
+
"summary": self.summary,
|
|
268
|
+
"attempts": [a.to_dict() for a in self.attempts],
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
def create_session_id() -> str:
|
|
273
|
+
"""Generate a unique session ID for audit logging."""
|
|
274
|
+
import secrets
|
|
275
|
+
|
|
276
|
+
timestamp = datetime.now(UTC).strftime("%Y%m%d-%H%M%S")
|
|
277
|
+
random_suffix = secrets.token_hex(4)
|
|
278
|
+
return f"fix-{timestamp}-{random_suffix}"
|