kekkai-cli 1.0.5__py3-none-any.whl → 1.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kekkai/cli.py +789 -19
- kekkai/compliance/__init__.py +68 -0
- kekkai/compliance/hipaa.py +235 -0
- kekkai/compliance/mappings.py +136 -0
- kekkai/compliance/owasp.py +517 -0
- kekkai/compliance/owasp_agentic.py +267 -0
- kekkai/compliance/pci_dss.py +205 -0
- kekkai/compliance/soc2.py +209 -0
- kekkai/dojo.py +91 -14
- kekkai/dojo_import.py +9 -1
- kekkai/fix/__init__.py +47 -0
- kekkai/fix/audit.py +278 -0
- kekkai/fix/differ.py +427 -0
- kekkai/fix/engine.py +500 -0
- kekkai/fix/prompts.py +251 -0
- kekkai/output.py +10 -12
- kekkai/report/__init__.py +41 -0
- kekkai/report/compliance_matrix.py +98 -0
- kekkai/report/generator.py +365 -0
- kekkai/report/html.py +69 -0
- kekkai/report/pdf.py +63 -0
- kekkai/report/unified.py +226 -0
- kekkai/scanners/container.py +33 -3
- kekkai/scanners/gitleaks.py +3 -1
- kekkai/scanners/semgrep.py +1 -1
- kekkai/scanners/trivy.py +1 -1
- kekkai/threatflow/model_adapter.py +143 -1
- kekkai/triage/__init__.py +54 -1
- kekkai/triage/loader.py +196 -0
- kekkai_cli-1.1.1.dist-info/METADATA +379 -0
- {kekkai_cli-1.0.5.dist-info → kekkai_cli-1.1.1.dist-info}/RECORD +34 -33
- {kekkai_cli-1.0.5.dist-info → kekkai_cli-1.1.1.dist-info}/entry_points.txt +0 -1
- {kekkai_cli-1.0.5.dist-info → kekkai_cli-1.1.1.dist-info}/top_level.txt +0 -1
- kekkai_cli-1.0.5.dist-info/METADATA +0 -135
- portal/__init__.py +0 -19
- portal/api.py +0 -155
- portal/auth.py +0 -103
- portal/enterprise/__init__.py +0 -32
- portal/enterprise/audit.py +0 -435
- portal/enterprise/licensing.py +0 -342
- portal/enterprise/rbac.py +0 -276
- portal/enterprise/saml.py +0 -595
- portal/ops/__init__.py +0 -53
- portal/ops/backup.py +0 -553
- portal/ops/log_shipper.py +0 -469
- portal/ops/monitoring.py +0 -517
- portal/ops/restore.py +0 -469
- portal/ops/secrets.py +0 -408
- portal/ops/upgrade.py +0 -591
- portal/tenants.py +0 -340
- portal/uploads.py +0 -259
- portal/web.py +0 -384
- {kekkai_cli-1.0.5.dist-info → kekkai_cli-1.1.1.dist-info}/WHEEL +0 -0
kekkai/cli.py
CHANGED
|
@@ -7,10 +7,10 @@ import sys
|
|
|
7
7
|
from collections.abc import Sequence
|
|
8
8
|
from datetime import UTC, datetime
|
|
9
9
|
from pathlib import Path
|
|
10
|
-
from typing import cast
|
|
10
|
+
from typing import Any, cast
|
|
11
11
|
|
|
12
12
|
from . import dojo, manifest
|
|
13
|
-
from .config import ConfigOverrides, DojoSettings, PolicySettings, load_config
|
|
13
|
+
from .config import DEFAULT_SCANNERS, ConfigOverrides, DojoSettings, PolicySettings, load_config
|
|
14
14
|
from .dojo_import import DojoConfig, import_results_to_dojo
|
|
15
15
|
from .output import (
|
|
16
16
|
VERSION,
|
|
@@ -179,8 +179,8 @@ def main(argv: Sequence[str] | None = None) -> int:
|
|
|
179
179
|
threatflow_parser.add_argument(
|
|
180
180
|
"--model-mode",
|
|
181
181
|
type=str,
|
|
182
|
-
choices=["local", "openai", "anthropic", "mock"],
|
|
183
|
-
help="LLM backend: local (
|
|
182
|
+
choices=["local", "ollama", "openai", "anthropic", "mock"],
|
|
183
|
+
help="LLM backend: local, ollama (recommended), openai, anthropic, or mock",
|
|
184
184
|
)
|
|
185
185
|
threatflow_parser.add_argument(
|
|
186
186
|
"--model-path", type=str, help="Path to local model file (for local mode)"
|
|
@@ -217,6 +217,165 @@ def main(argv: Sequence[str] | None = None) -> int:
|
|
|
217
217
|
help="Path for .kekkaiignore output (default: .kekkaiignore)",
|
|
218
218
|
)
|
|
219
219
|
|
|
220
|
+
# Fix subcommand - AI-powered remediation
|
|
221
|
+
fix_parser = subparsers.add_parser("fix", help="generate AI-powered code fixes for findings")
|
|
222
|
+
fix_parser.add_argument(
|
|
223
|
+
"--input",
|
|
224
|
+
type=str,
|
|
225
|
+
help="Path to scan results JSON (Semgrep format)",
|
|
226
|
+
)
|
|
227
|
+
fix_parser.add_argument(
|
|
228
|
+
"--repo",
|
|
229
|
+
type=str,
|
|
230
|
+
help="Path to repository (default: current directory)",
|
|
231
|
+
)
|
|
232
|
+
fix_parser.add_argument(
|
|
233
|
+
"--output-dir",
|
|
234
|
+
type=str,
|
|
235
|
+
help="Output directory for diffs and audit log",
|
|
236
|
+
)
|
|
237
|
+
fix_parser.add_argument(
|
|
238
|
+
"--dry-run",
|
|
239
|
+
action="store_true",
|
|
240
|
+
default=True,
|
|
241
|
+
help="Preview fixes without applying (default: True)",
|
|
242
|
+
)
|
|
243
|
+
fix_parser.add_argument(
|
|
244
|
+
"--apply",
|
|
245
|
+
action="store_true",
|
|
246
|
+
help="Apply fixes to files (requires explicit flag)",
|
|
247
|
+
)
|
|
248
|
+
fix_parser.add_argument(
|
|
249
|
+
"--model-mode",
|
|
250
|
+
type=str,
|
|
251
|
+
choices=["local", "ollama", "openai", "anthropic", "mock"],
|
|
252
|
+
default="local",
|
|
253
|
+
help="LLM backend: local (default), openai, anthropic, or mock",
|
|
254
|
+
)
|
|
255
|
+
fix_parser.add_argument(
|
|
256
|
+
"--api-key",
|
|
257
|
+
type=str,
|
|
258
|
+
help="API key for remote LLM (prefer KEKKAI_FIX_API_KEY env var)",
|
|
259
|
+
)
|
|
260
|
+
fix_parser.add_argument(
|
|
261
|
+
"--model-name",
|
|
262
|
+
type=str,
|
|
263
|
+
help="Specific model name to use",
|
|
264
|
+
)
|
|
265
|
+
fix_parser.add_argument(
|
|
266
|
+
"--max-fixes",
|
|
267
|
+
type=int,
|
|
268
|
+
default=10,
|
|
269
|
+
help="Maximum fixes to generate per run (default: 10)",
|
|
270
|
+
)
|
|
271
|
+
fix_parser.add_argument(
|
|
272
|
+
"--timeout",
|
|
273
|
+
type=int,
|
|
274
|
+
default=120,
|
|
275
|
+
help="Timeout in seconds for LLM calls (default: 120)",
|
|
276
|
+
)
|
|
277
|
+
fix_parser.add_argument(
|
|
278
|
+
"--no-backup",
|
|
279
|
+
action="store_true",
|
|
280
|
+
help="Disable backup creation when applying fixes",
|
|
281
|
+
)
|
|
282
|
+
|
|
283
|
+
# Report subcommand - Compliance mapping and reporting
|
|
284
|
+
report_parser = subparsers.add_parser(
|
|
285
|
+
"report", help="generate compliance reports from scan findings"
|
|
286
|
+
)
|
|
287
|
+
report_parser.add_argument(
|
|
288
|
+
"--input",
|
|
289
|
+
type=str,
|
|
290
|
+
required=True,
|
|
291
|
+
help="Path to scan results JSON file",
|
|
292
|
+
)
|
|
293
|
+
report_parser.add_argument(
|
|
294
|
+
"--output",
|
|
295
|
+
type=str,
|
|
296
|
+
help="Output directory for reports (default: current directory)",
|
|
297
|
+
)
|
|
298
|
+
report_parser.add_argument(
|
|
299
|
+
"--format",
|
|
300
|
+
type=str,
|
|
301
|
+
default="html",
|
|
302
|
+
help="Report format: html, pdf, compliance, json, all (default: html)",
|
|
303
|
+
)
|
|
304
|
+
report_parser.add_argument(
|
|
305
|
+
"--frameworks",
|
|
306
|
+
type=str,
|
|
307
|
+
help="Comma-separated frameworks: PCI-DSS,SOC2,OWASP,HIPAA (default: all)",
|
|
308
|
+
)
|
|
309
|
+
report_parser.add_argument(
|
|
310
|
+
"--min-severity",
|
|
311
|
+
type=str,
|
|
312
|
+
default="info",
|
|
313
|
+
help="Minimum severity to include: critical,high,medium,low,info (default: info)",
|
|
314
|
+
)
|
|
315
|
+
report_parser.add_argument(
|
|
316
|
+
"--title",
|
|
317
|
+
type=str,
|
|
318
|
+
default="Security Scan Report",
|
|
319
|
+
help="Report title",
|
|
320
|
+
)
|
|
321
|
+
report_parser.add_argument(
|
|
322
|
+
"--organization",
|
|
323
|
+
type=str,
|
|
324
|
+
default="",
|
|
325
|
+
help="Organization name for report header",
|
|
326
|
+
)
|
|
327
|
+
report_parser.add_argument(
|
|
328
|
+
"--project",
|
|
329
|
+
type=str,
|
|
330
|
+
default="",
|
|
331
|
+
help="Project name for report header",
|
|
332
|
+
)
|
|
333
|
+
report_parser.add_argument(
|
|
334
|
+
"--no-executive-summary",
|
|
335
|
+
action="store_true",
|
|
336
|
+
help="Exclude executive summary section",
|
|
337
|
+
)
|
|
338
|
+
report_parser.add_argument(
|
|
339
|
+
"--no-timeline",
|
|
340
|
+
action="store_true",
|
|
341
|
+
help="Exclude remediation timeline section",
|
|
342
|
+
)
|
|
343
|
+
|
|
344
|
+
# Upload subcommand - upload scan results to DefectDojo
|
|
345
|
+
upload_parser = subparsers.add_parser("upload", help="upload scan results to DefectDojo")
|
|
346
|
+
upload_parser.add_argument(
|
|
347
|
+
"--run-id",
|
|
348
|
+
type=str,
|
|
349
|
+
help="Run ID to upload (default: latest run)",
|
|
350
|
+
)
|
|
351
|
+
upload_parser.add_argument(
|
|
352
|
+
"--input",
|
|
353
|
+
type=str,
|
|
354
|
+
help="Path to specific results file to upload",
|
|
355
|
+
)
|
|
356
|
+
upload_parser.add_argument(
|
|
357
|
+
"--dojo-url",
|
|
358
|
+
type=str,
|
|
359
|
+
help="DefectDojo base URL (default: http://localhost:8080)",
|
|
360
|
+
)
|
|
361
|
+
upload_parser.add_argument(
|
|
362
|
+
"--dojo-api-key",
|
|
363
|
+
type=str,
|
|
364
|
+
help="DefectDojo API key (or set KEKKAI_DOJO_API_KEY env var)",
|
|
365
|
+
)
|
|
366
|
+
upload_parser.add_argument(
|
|
367
|
+
"--product",
|
|
368
|
+
type=str,
|
|
369
|
+
default="Kekkai Scans",
|
|
370
|
+
help="DefectDojo product name",
|
|
371
|
+
)
|
|
372
|
+
upload_parser.add_argument(
|
|
373
|
+
"--engagement",
|
|
374
|
+
type=str,
|
|
375
|
+
default="Default Engagement",
|
|
376
|
+
help="DefectDojo engagement name",
|
|
377
|
+
)
|
|
378
|
+
|
|
220
379
|
parsed = parser.parse_args(args)
|
|
221
380
|
if parsed.command == "init":
|
|
222
381
|
return _command_init(parsed.config, parsed.force)
|
|
@@ -249,6 +408,12 @@ def main(argv: Sequence[str] | None = None) -> int:
|
|
|
249
408
|
return _command_threatflow(parsed)
|
|
250
409
|
if parsed.command == "triage":
|
|
251
410
|
return _command_triage(parsed)
|
|
411
|
+
if parsed.command == "fix":
|
|
412
|
+
return _command_fix(parsed)
|
|
413
|
+
if parsed.command == "report":
|
|
414
|
+
return _command_report(parsed)
|
|
415
|
+
if parsed.command == "upload":
|
|
416
|
+
return _command_upload(parsed)
|
|
252
417
|
|
|
253
418
|
parser.print_help()
|
|
254
419
|
return 1
|
|
@@ -439,6 +604,36 @@ def _command_scan(
|
|
|
439
604
|
)
|
|
440
605
|
manifest.write_manifest(run_dir / "run.json", run_manifest)
|
|
441
606
|
|
|
607
|
+
# Generate unified report (aggregates all scanner findings)
|
|
608
|
+
if scan_results:
|
|
609
|
+
from .report.unified import UnifiedReportError, generate_unified_report
|
|
610
|
+
|
|
611
|
+
# Determine output path for unified report
|
|
612
|
+
if output_path:
|
|
613
|
+
# --output flag provided: use it for unified report
|
|
614
|
+
unified_report_path = Path(output_path).expanduser().resolve()
|
|
615
|
+
# Security: Validate path (ASVS V5.3.3)
|
|
616
|
+
if not is_within_base(base_dir, unified_report_path):
|
|
617
|
+
# Allow explicit paths outside base_dir, but warn
|
|
618
|
+
console.print(
|
|
619
|
+
f"[warning]Writing outside kekkai home: {unified_report_path}[/warning]"
|
|
620
|
+
)
|
|
621
|
+
else:
|
|
622
|
+
# Default: save in run directory
|
|
623
|
+
unified_report_path = run_dir / "kekkai-report.json"
|
|
624
|
+
|
|
625
|
+
try:
|
|
626
|
+
generate_unified_report(
|
|
627
|
+
scan_results=scan_results,
|
|
628
|
+
output_path=unified_report_path,
|
|
629
|
+
run_id=run_id,
|
|
630
|
+
commit_sha=commit_sha,
|
|
631
|
+
)
|
|
632
|
+
console.print(f"[success]Unified report:[/success] {unified_report_path}")
|
|
633
|
+
except UnifiedReportError as e:
|
|
634
|
+
err_msg = sanitize_error(str(e))
|
|
635
|
+
console.print(f"[warning]Failed to generate unified report: {err_msg}[/warning]")
|
|
636
|
+
|
|
442
637
|
# Collect all findings for policy evaluation
|
|
443
638
|
all_findings: list[Finding] = []
|
|
444
639
|
scan_errors: list[str] = []
|
|
@@ -489,7 +684,7 @@ def _resolve_scanners(override: str | None, config_scanners: list[str] | None) -
|
|
|
489
684
|
return [s.strip() for s in override.split(",") if s.strip()]
|
|
490
685
|
if config_scanners:
|
|
491
686
|
return config_scanners
|
|
492
|
-
return
|
|
687
|
+
return list(DEFAULT_SCANNERS)
|
|
493
688
|
|
|
494
689
|
|
|
495
690
|
def _resolve_policy_config(
|
|
@@ -543,7 +738,7 @@ def _print_scan_summary_table(scan_results: list[ScanResult]) -> None:
|
|
|
543
738
|
)
|
|
544
739
|
for r in scan_results
|
|
545
740
|
]
|
|
546
|
-
|
|
741
|
+
print_scan_summary(rows)
|
|
547
742
|
|
|
548
743
|
|
|
549
744
|
def _print_policy_summary(result: PolicyResult) -> None:
|
|
@@ -657,6 +852,26 @@ def _resolve_github_repo(override: str | None) -> tuple[str | None, str | None]:
|
|
|
657
852
|
return None, None
|
|
658
853
|
|
|
659
854
|
|
|
855
|
+
def _normalize_scanner_name(stem: str) -> str:
|
|
856
|
+
"""Normalize filename stem to scanner name.
|
|
857
|
+
|
|
858
|
+
Strips the "-results" suffix from scanner output filenames.
|
|
859
|
+
|
|
860
|
+
Examples:
|
|
861
|
+
gitleaks-results -> gitleaks
|
|
862
|
+
trivy-results -> trivy
|
|
863
|
+
semgrep-results -> semgrep
|
|
864
|
+
custom-scanner -> custom-scanner
|
|
865
|
+
|
|
866
|
+
Args:
|
|
867
|
+
stem: File stem (name without extension).
|
|
868
|
+
|
|
869
|
+
Returns:
|
|
870
|
+
Normalized scanner name.
|
|
871
|
+
"""
|
|
872
|
+
return stem.removesuffix("-results")
|
|
873
|
+
|
|
874
|
+
|
|
660
875
|
def _create_scanner(
|
|
661
876
|
name: str,
|
|
662
877
|
zap_target_url: str | None = None,
|
|
@@ -749,23 +964,44 @@ def _command_dojo(parsed: argparse.Namespace) -> int:
|
|
|
749
964
|
project_name = _resolve_dojo_project_name(parsed)
|
|
750
965
|
|
|
751
966
|
if parsed.dojo_command == "up":
|
|
752
|
-
|
|
753
|
-
|
|
967
|
+
requested_port = _resolve_dojo_port(parsed)
|
|
968
|
+
requested_tls_port = _resolve_dojo_tls_port(parsed, requested_port)
|
|
754
969
|
try:
|
|
755
|
-
env = dojo.compose_up(
|
|
970
|
+
env, actual_port, actual_tls_port = dojo.compose_up(
|
|
756
971
|
compose_root=compose_root,
|
|
757
972
|
project_name=project_name,
|
|
758
|
-
port=
|
|
759
|
-
tls_port=
|
|
973
|
+
port=requested_port,
|
|
974
|
+
tls_port=requested_tls_port,
|
|
760
975
|
wait=bool(parsed.wait),
|
|
761
976
|
open_browser=bool(parsed.open),
|
|
762
977
|
)
|
|
763
978
|
except RuntimeError as exc:
|
|
764
979
|
print(str(exc))
|
|
765
980
|
return 1
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
981
|
+
|
|
982
|
+
# Warn if port was changed due to conflict
|
|
983
|
+
if actual_port != requested_port:
|
|
984
|
+
console.print(
|
|
985
|
+
f"[warning]Port {requested_port} was in use, using {actual_port} instead[/warning]"
|
|
986
|
+
)
|
|
987
|
+
|
|
988
|
+
console.print(
|
|
989
|
+
f"\n[bold cyan]DefectDojo is ready at[/bold cyan] http://localhost:{actual_port}/"
|
|
990
|
+
)
|
|
991
|
+
console.print("\n[bold]Login credentials:[/bold]")
|
|
992
|
+
console.print(f" Username: {env.get('DD_ADMIN_USER', 'admin')}")
|
|
993
|
+
console.print(f" Password: {env.get('DD_ADMIN_PASSWORD', '(see .env)')}")
|
|
994
|
+
|
|
995
|
+
# Show API key if generated (only when --wait was used)
|
|
996
|
+
api_key = env.get("DD_API_KEY")
|
|
997
|
+
if api_key:
|
|
998
|
+
console.print(f"\n[bold]API Key (for uploads):[/bold] {api_key}")
|
|
999
|
+
else:
|
|
1000
|
+
console.print(
|
|
1001
|
+
"\n[muted]Note: Run with --wait to auto-generate API key for uploads[/muted]"
|
|
1002
|
+
)
|
|
1003
|
+
|
|
1004
|
+
console.print(f"\nCredentials saved to: {compose_root / '.env'}")
|
|
769
1005
|
return 0
|
|
770
1006
|
|
|
771
1007
|
if parsed.dojo_command == "down":
|
|
@@ -920,22 +1156,556 @@ def _threatflow_banner() -> str:
|
|
|
920
1156
|
def _command_triage(parsed: argparse.Namespace) -> int:
|
|
921
1157
|
"""Run interactive triage TUI."""
|
|
922
1158
|
from .triage import run_triage
|
|
1159
|
+
from .triage.loader import load_findings_from_path
|
|
923
1160
|
|
|
924
1161
|
input_path_str = cast(str | None, getattr(parsed, "input", None))
|
|
925
1162
|
output_path_str = cast(str | None, getattr(parsed, "output", None))
|
|
926
1163
|
|
|
927
|
-
|
|
928
|
-
|
|
1164
|
+
# Default to latest run if no input specified
|
|
1165
|
+
if not input_path_str:
|
|
1166
|
+
runs_dir = app_base_dir() / "runs"
|
|
1167
|
+
if runs_dir.exists():
|
|
1168
|
+
run_dirs = sorted(
|
|
1169
|
+
[d for d in runs_dir.iterdir() if d.is_dir()],
|
|
1170
|
+
key=lambda d: d.stat().st_mtime,
|
|
1171
|
+
)
|
|
1172
|
+
if run_dirs:
|
|
1173
|
+
input_path = run_dirs[-1]
|
|
1174
|
+
console.print(f"[info]Using latest run: {input_path.name}[/info]\n")
|
|
1175
|
+
else:
|
|
1176
|
+
console.print("[danger]No scan runs found. Run 'kekkai scan' first.[/danger]")
|
|
1177
|
+
return 1
|
|
1178
|
+
else:
|
|
1179
|
+
console.print("[danger]No scan runs found. Run 'kekkai scan' first.[/danger]")
|
|
1180
|
+
return 1
|
|
1181
|
+
else:
|
|
1182
|
+
input_path = Path(input_path_str).expanduser().resolve()
|
|
929
1183
|
|
|
930
|
-
if
|
|
931
|
-
console.print(f"[danger]Error:[/danger] Input
|
|
1184
|
+
if not input_path.exists():
|
|
1185
|
+
console.print(f"[danger]Error:[/danger] Input not found: {input_path}")
|
|
932
1186
|
return 1
|
|
933
1187
|
|
|
1188
|
+
output_path = Path(output_path_str).expanduser().resolve() if output_path_str else None
|
|
1189
|
+
|
|
934
1190
|
console.print("[bold cyan]Kekkai Triage[/bold cyan] - Interactive Finding Review")
|
|
935
1191
|
console.print("Use j/k to navigate, f=false positive, c=confirmed, d=deferred")
|
|
936
1192
|
console.print("Press Ctrl+S to save, q to quit\n")
|
|
937
1193
|
|
|
938
|
-
|
|
1194
|
+
# Use new loader that supports raw scanner outputs
|
|
1195
|
+
findings, errors = load_findings_from_path(input_path)
|
|
1196
|
+
|
|
1197
|
+
if errors:
|
|
1198
|
+
console.print("[warning]Warnings:[/warning]")
|
|
1199
|
+
for err in errors[:5]: # Limit to first 5
|
|
1200
|
+
console.print(f" - {err}")
|
|
1201
|
+
console.print()
|
|
1202
|
+
|
|
1203
|
+
if not findings:
|
|
1204
|
+
console.print("[warning]No findings to triage.[/warning]")
|
|
1205
|
+
return 0
|
|
1206
|
+
|
|
1207
|
+
console.print(f"[info]Loaded {len(findings)} finding(s)[/info]\n")
|
|
1208
|
+
|
|
1209
|
+
return run_triage(findings=findings, output_path=output_path)
|
|
1210
|
+
|
|
1211
|
+
|
|
1212
|
+
def _command_fix(parsed: argparse.Namespace) -> int:
|
|
1213
|
+
"""Run AI-powered code fix generation."""
|
|
1214
|
+
from .fix import FixConfig, FixEngine
|
|
1215
|
+
|
|
1216
|
+
# Resolve input path
|
|
1217
|
+
input_path_str = cast(str | None, getattr(parsed, "input", None))
|
|
1218
|
+
if not input_path_str:
|
|
1219
|
+
console.print("[danger]Error:[/danger] --input is required (path to scan results JSON)")
|
|
1220
|
+
return 1
|
|
1221
|
+
|
|
1222
|
+
input_path = Path(input_path_str).expanduser().resolve()
|
|
1223
|
+
if not input_path.exists():
|
|
1224
|
+
console.print(f"[danger]Error:[/danger] Input file not found: {input_path}")
|
|
1225
|
+
return 1
|
|
1226
|
+
|
|
1227
|
+
# Resolve repository path
|
|
1228
|
+
repo_override = cast(str | None, getattr(parsed, "repo", None))
|
|
1229
|
+
repo_path = Path(repo_override).expanduser().resolve() if repo_override else Path.cwd()
|
|
1230
|
+
|
|
1231
|
+
if not repo_path.exists() or not repo_path.is_dir():
|
|
1232
|
+
console.print(f"[danger]Error:[/danger] Repository path not found: {repo_path}")
|
|
1233
|
+
return 1
|
|
1234
|
+
|
|
1235
|
+
# Resolve output directory
|
|
1236
|
+
output_dir_str = cast(str | None, getattr(parsed, "output_dir", None))
|
|
1237
|
+
output_dir = Path(output_dir_str).expanduser().resolve() if output_dir_str else None
|
|
1238
|
+
|
|
1239
|
+
# Resolve model settings
|
|
1240
|
+
model_mode = getattr(parsed, "model_mode", "local") or "local"
|
|
1241
|
+
api_key = getattr(parsed, "api_key", None) or os.environ.get("KEKKAI_FIX_API_KEY")
|
|
1242
|
+
model_name = getattr(parsed, "model_name", None)
|
|
1243
|
+
max_fixes = getattr(parsed, "max_fixes", 10)
|
|
1244
|
+
timeout = getattr(parsed, "timeout", 120)
|
|
1245
|
+
no_backup = getattr(parsed, "no_backup", False)
|
|
1246
|
+
|
|
1247
|
+
# Determine dry_run: --apply overrides --dry-run
|
|
1248
|
+
apply_fixes = getattr(parsed, "apply", False)
|
|
1249
|
+
dry_run = not apply_fixes
|
|
1250
|
+
|
|
1251
|
+
# Display banner
|
|
1252
|
+
console.print("\n[bold cyan]Kekkai Fix[/bold cyan] - AI-Powered Remediation")
|
|
1253
|
+
console.print("=" * 50)
|
|
1254
|
+
console.print(f"Repository: {repo_path}")
|
|
1255
|
+
console.print(f"Input: {input_path}")
|
|
1256
|
+
console.print(f"Model mode: {model_mode}")
|
|
1257
|
+
console.print(f"Dry run: {dry_run}")
|
|
1258
|
+
|
|
1259
|
+
# Warn about remote mode
|
|
1260
|
+
if model_mode in ("openai", "anthropic"):
|
|
1261
|
+
console.print(
|
|
1262
|
+
"\n[warning]*** WARNING: Using remote API. Code will be sent to external service. ***"
|
|
1263
|
+
"[/warning]\n"
|
|
1264
|
+
)
|
|
1265
|
+
if not api_key:
|
|
1266
|
+
console.print("[danger]Error:[/danger] API key required for remote mode.")
|
|
1267
|
+
console.print(" Set --api-key or KEKKAI_FIX_API_KEY environment variable")
|
|
1268
|
+
return 1
|
|
1269
|
+
|
|
1270
|
+
# Build config
|
|
1271
|
+
config = FixConfig(
|
|
1272
|
+
model_mode=model_mode,
|
|
1273
|
+
api_key=api_key,
|
|
1274
|
+
model_name=model_name,
|
|
1275
|
+
max_fixes=max_fixes,
|
|
1276
|
+
timeout_seconds=timeout,
|
|
1277
|
+
dry_run=dry_run,
|
|
1278
|
+
create_backups=not no_backup,
|
|
1279
|
+
)
|
|
1280
|
+
|
|
1281
|
+
# Run fix engine
|
|
1282
|
+
console.print("\nAnalyzing findings...")
|
|
1283
|
+
engine = FixEngine(config)
|
|
1284
|
+
result = engine.fix_from_scan_results(input_path, repo_path, output_dir)
|
|
1285
|
+
|
|
1286
|
+
if not result.success:
|
|
1287
|
+
console.print(f"[danger]Error:[/danger] {result.error}")
|
|
1288
|
+
return 1
|
|
1289
|
+
|
|
1290
|
+
# Print results
|
|
1291
|
+
console.print("\n[success]Fix generation complete[/success]")
|
|
1292
|
+
console.print(f" Findings processed: {result.findings_processed}")
|
|
1293
|
+
console.print(f" Fixes generated: {result.fixes_generated}")
|
|
1294
|
+
|
|
1295
|
+
if not dry_run:
|
|
1296
|
+
console.print(f" Fixes applied: {result.fixes_applied}")
|
|
1297
|
+
|
|
1298
|
+
if result.warnings:
|
|
1299
|
+
console.print("\n[warning]Warnings:[/warning]")
|
|
1300
|
+
for w in result.warnings[:10]:
|
|
1301
|
+
console.print(f" - {sanitize_for_terminal(w)}")
|
|
1302
|
+
if len(result.warnings) > 10:
|
|
1303
|
+
console.print(f" ... and {len(result.warnings) - 10} more")
|
|
1304
|
+
|
|
1305
|
+
# Show fix previews in dry run mode
|
|
1306
|
+
if dry_run and result.suggestions:
|
|
1307
|
+
console.print("\n[bold]Fix Previews:[/bold]")
|
|
1308
|
+
for i, suggestion in enumerate(result.suggestions[:5], 1):
|
|
1309
|
+
if suggestion.success:
|
|
1310
|
+
console.print(
|
|
1311
|
+
f"\n--- Fix {i}: {suggestion.finding.file_path}:{suggestion.finding.line} ---"
|
|
1312
|
+
)
|
|
1313
|
+
console.print(f"Rule: {suggestion.finding.rule_id}")
|
|
1314
|
+
console.print(suggestion.preview[:1000])
|
|
1315
|
+
if len(suggestion.preview) > 1000:
|
|
1316
|
+
console.print("... (truncated)")
|
|
1317
|
+
if len(result.suggestions) > 5:
|
|
1318
|
+
console.print(f"\n... and {len(result.suggestions) - 5} more fixes")
|
|
1319
|
+
console.print("\n[info]To apply fixes, run with --apply flag[/info]")
|
|
1320
|
+
|
|
1321
|
+
if result.audit_log_path:
|
|
1322
|
+
console.print(f"\nAudit log: {result.audit_log_path}")
|
|
1323
|
+
|
|
1324
|
+
return 0
|
|
1325
|
+
|
|
1326
|
+
|
|
1327
|
+
def _command_report(parsed: argparse.Namespace) -> int:
|
|
1328
|
+
"""Generate compliance reports from scan findings."""
|
|
1329
|
+
import json as _json
|
|
1330
|
+
|
|
1331
|
+
from .report import ReportConfig, ReportFormat, generate_report
|
|
1332
|
+
|
|
1333
|
+
# Resolve input path
|
|
1334
|
+
input_path_str = cast(str, parsed.input)
|
|
1335
|
+
input_path = Path(input_path_str).expanduser().resolve()
|
|
1336
|
+
|
|
1337
|
+
if not input_path.exists():
|
|
1338
|
+
console.print(f"[danger]Error:[/danger] Input file not found: {input_path}")
|
|
1339
|
+
return 1
|
|
1340
|
+
|
|
1341
|
+
# Resolve output directory
|
|
1342
|
+
output_str = cast(str | None, getattr(parsed, "output", None))
|
|
1343
|
+
output_dir = Path(output_str).expanduser().resolve() if output_str else Path.cwd()
|
|
1344
|
+
|
|
1345
|
+
# Parse format
|
|
1346
|
+
format_str = getattr(parsed, "format", "html").lower()
|
|
1347
|
+
formats: list[ReportFormat] = []
|
|
1348
|
+
if format_str == "all":
|
|
1349
|
+
formats = [ReportFormat.HTML, ReportFormat.PDF, ReportFormat.COMPLIANCE, ReportFormat.JSON]
|
|
1350
|
+
else:
|
|
1351
|
+
for fmt in format_str.split(","):
|
|
1352
|
+
fmt = fmt.strip()
|
|
1353
|
+
try:
|
|
1354
|
+
formats.append(ReportFormat(fmt))
|
|
1355
|
+
except ValueError:
|
|
1356
|
+
console.print(f"[danger]Error:[/danger] Unknown format: {fmt}")
|
|
1357
|
+
console.print(" Available: html, pdf, compliance, json, all")
|
|
1358
|
+
return 1
|
|
1359
|
+
|
|
1360
|
+
# Parse frameworks
|
|
1361
|
+
frameworks_str = cast(str | None, getattr(parsed, "frameworks", None))
|
|
1362
|
+
if frameworks_str:
|
|
1363
|
+
frameworks = [f.strip() for f in frameworks_str.split(",")]
|
|
1364
|
+
else:
|
|
1365
|
+
frameworks = ["PCI-DSS", "SOC2", "OWASP", "HIPAA"]
|
|
1366
|
+
|
|
1367
|
+
# Build config
|
|
1368
|
+
config = ReportConfig(
|
|
1369
|
+
formats=formats,
|
|
1370
|
+
frameworks=frameworks,
|
|
1371
|
+
min_severity=getattr(parsed, "min_severity", "info"),
|
|
1372
|
+
include_executive_summary=not getattr(parsed, "no_executive_summary", False),
|
|
1373
|
+
include_remediation_timeline=not getattr(parsed, "no_timeline", False),
|
|
1374
|
+
title=getattr(parsed, "title", "Security Scan Report"),
|
|
1375
|
+
organization=getattr(parsed, "organization", ""),
|
|
1376
|
+
project_name=getattr(parsed, "project", ""),
|
|
1377
|
+
)
|
|
1378
|
+
|
|
1379
|
+
# Display banner
|
|
1380
|
+
console.print("\n[bold cyan]Kekkai Report[/bold cyan] - Compliance Mapping & Reporting")
|
|
1381
|
+
console.print("=" * 55)
|
|
1382
|
+
console.print(f"Input: {input_path}")
|
|
1383
|
+
console.print(f"Output: {output_dir}")
|
|
1384
|
+
console.print(f"Formats: {', '.join(f.value for f in formats)}")
|
|
1385
|
+
console.print(f"Frameworks: {', '.join(frameworks)}")
|
|
1386
|
+
|
|
1387
|
+
# Load findings from input file
|
|
1388
|
+
console.print("\nLoading scan results...")
|
|
1389
|
+
try:
|
|
1390
|
+
with input_path.open() as f:
|
|
1391
|
+
data = _json.load(f)
|
|
1392
|
+
except _json.JSONDecodeError as e:
|
|
1393
|
+
console.print(f"[danger]Error:[/danger] Invalid JSON: {e}")
|
|
1394
|
+
return 1
|
|
1395
|
+
|
|
1396
|
+
# Parse findings based on input format
|
|
1397
|
+
findings = _parse_findings_from_json(data)
|
|
1398
|
+
|
|
1399
|
+
if not findings:
|
|
1400
|
+
console.print("[warning]Warning:[/warning] No findings found in input file")
|
|
1401
|
+
|
|
1402
|
+
console.print(f"Found {len(findings)} findings")
|
|
1403
|
+
|
|
1404
|
+
# Generate reports
|
|
1405
|
+
console.print("\nGenerating reports...")
|
|
1406
|
+
result = generate_report(findings, output_dir, config)
|
|
1407
|
+
|
|
1408
|
+
if not result.success:
|
|
1409
|
+
console.print("[danger]Report generation failed:[/danger]")
|
|
1410
|
+
for err in result.errors:
|
|
1411
|
+
console.print(f" - {sanitize_error(err)}")
|
|
1412
|
+
return 1
|
|
1413
|
+
|
|
1414
|
+
# Print results
|
|
1415
|
+
console.print(f"\n[success]Reports generated in {result.generation_time_ms}ms[/success]")
|
|
1416
|
+
console.print("\nOutput files:")
|
|
1417
|
+
for path in result.output_files:
|
|
1418
|
+
console.print(f" - {path}")
|
|
1419
|
+
|
|
1420
|
+
if result.warnings:
|
|
1421
|
+
console.print("\n[warning]Warnings:[/warning]")
|
|
1422
|
+
for w in result.warnings[:5]:
|
|
1423
|
+
console.print(f" - {w}")
|
|
1424
|
+
|
|
1425
|
+
return 0
|
|
1426
|
+
|
|
1427
|
+
|
|
1428
|
+
def _command_upload(parsed: argparse.Namespace) -> int:
|
|
1429
|
+
"""Upload scan results to DefectDojo."""
|
|
1430
|
+
import json as _json
|
|
1431
|
+
|
|
1432
|
+
# Resolve DefectDojo configuration
|
|
1433
|
+
dojo_url = (
|
|
1434
|
+
getattr(parsed, "dojo_url", None)
|
|
1435
|
+
or os.environ.get("KEKKAI_DOJO_URL")
|
|
1436
|
+
or "http://localhost:8080"
|
|
1437
|
+
)
|
|
1438
|
+
dojo_api_key = getattr(parsed, "dojo_api_key", None) or os.environ.get("KEKKAI_DOJO_API_KEY")
|
|
1439
|
+
|
|
1440
|
+
if not dojo_api_key:
|
|
1441
|
+
# Try to read from local dojo .env file
|
|
1442
|
+
dojo_env_path = app_base_dir() / "dojo" / ".env"
|
|
1443
|
+
if dojo_env_path.exists():
|
|
1444
|
+
env_data = dojo.load_env_file(dojo_env_path)
|
|
1445
|
+
dojo_api_key = env_data.get("DD_API_KEY")
|
|
1446
|
+
|
|
1447
|
+
if not dojo_api_key:
|
|
1448
|
+
console.print("[danger]Error:[/danger] DefectDojo API key required")
|
|
1449
|
+
console.print(" Set --dojo-api-key or KEKKAI_DOJO_API_KEY environment variable")
|
|
1450
|
+
console.print(" Or run 'kekkai dojo up' to start local DefectDojo first")
|
|
1451
|
+
return 1
|
|
1452
|
+
|
|
1453
|
+
product_name = getattr(parsed, "product", "Kekkai Scans")
|
|
1454
|
+
engagement_name = getattr(parsed, "engagement", "Default Engagement")
|
|
1455
|
+
|
|
1456
|
+
# Resolve input - either specific file or find latest run
|
|
1457
|
+
input_path_str = cast(str | None, getattr(parsed, "input", None))
|
|
1458
|
+
run_id_override = cast(str | None, getattr(parsed, "run_id", None))
|
|
1459
|
+
|
|
1460
|
+
if input_path_str:
|
|
1461
|
+
# Use specific input file
|
|
1462
|
+
input_path = Path(input_path_str).expanduser().resolve()
|
|
1463
|
+
if not input_path.exists():
|
|
1464
|
+
console.print(f"[danger]Error:[/danger] Input file not found: {input_path}")
|
|
1465
|
+
return 1
|
|
1466
|
+
run_dir = input_path.parent
|
|
1467
|
+
run_id = run_dir.name
|
|
1468
|
+
else:
|
|
1469
|
+
# Find latest run
|
|
1470
|
+
runs_dir = app_base_dir() / "runs"
|
|
1471
|
+
if not runs_dir.exists():
|
|
1472
|
+
console.print("[danger]Error:[/danger] No scan runs found")
|
|
1473
|
+
console.print(" Run 'kekkai scan' first to generate results")
|
|
1474
|
+
return 1
|
|
1475
|
+
|
|
1476
|
+
if run_id_override:
|
|
1477
|
+
run_dir = runs_dir / run_id_override
|
|
1478
|
+
if not run_dir.exists():
|
|
1479
|
+
console.print(f"[danger]Error:[/danger] Run not found: {run_id_override}")
|
|
1480
|
+
return 1
|
|
1481
|
+
run_id = run_id_override
|
|
1482
|
+
else:
|
|
1483
|
+
# Find most recent run
|
|
1484
|
+
run_dirs = sorted(
|
|
1485
|
+
[d for d in runs_dir.iterdir() if d.is_dir()],
|
|
1486
|
+
key=lambda d: d.stat().st_mtime,
|
|
1487
|
+
reverse=True,
|
|
1488
|
+
)
|
|
1489
|
+
if not run_dirs:
|
|
1490
|
+
console.print("[danger]Error:[/danger] No scan runs found")
|
|
1491
|
+
return 1
|
|
1492
|
+
run_dir = run_dirs[0]
|
|
1493
|
+
run_id = run_dir.name
|
|
1494
|
+
|
|
1495
|
+
console.print("\n[bold cyan]Kekkai Upload[/bold cyan] - DefectDojo Import")
|
|
1496
|
+
console.print("=" * 45)
|
|
1497
|
+
console.print(f"DefectDojo URL: {dojo_url}")
|
|
1498
|
+
console.print(f"Run ID: {run_id}")
|
|
1499
|
+
console.print(f"Product: {product_name}")
|
|
1500
|
+
console.print(f"Engagement: {engagement_name}")
|
|
1501
|
+
|
|
1502
|
+
# Find and load scan results - prefer *-results.json first
|
|
1503
|
+
scan_files = sorted(run_dir.glob("*-results.json"))
|
|
1504
|
+
if not scan_files:
|
|
1505
|
+
# Fallback to all JSON (excluding metadata files)
|
|
1506
|
+
scan_files = sorted(
|
|
1507
|
+
[f for f in run_dir.glob("*.json") if f.name not in ("run.json", "policy-result.json")]
|
|
1508
|
+
)
|
|
1509
|
+
|
|
1510
|
+
if not scan_files:
|
|
1511
|
+
console.print(f"[danger]Error:[/danger] No scan results found in {run_dir}")
|
|
1512
|
+
return 1
|
|
1513
|
+
|
|
1514
|
+
console.print(f"\nFound {len(scan_files)} result file(s)")
|
|
1515
|
+
|
|
1516
|
+
# Build scan results for import
|
|
1517
|
+
scan_results: list[ScanResult] = []
|
|
1518
|
+
scanners_map: dict[str, Scanner] = {}
|
|
1519
|
+
|
|
1520
|
+
for scan_file in scan_files:
|
|
1521
|
+
# Normalize scanner name: "gitleaks-results" -> "gitleaks"
|
|
1522
|
+
scanner_name = _normalize_scanner_name(scan_file.stem)
|
|
1523
|
+
console.print(f" Loading {scanner_name}...")
|
|
1524
|
+
|
|
1525
|
+
# Load raw JSON
|
|
1526
|
+
try:
|
|
1527
|
+
raw_text = scan_file.read_text(encoding="utf-8")
|
|
1528
|
+
_json.loads(raw_text) # Validate JSON syntax
|
|
1529
|
+
except (OSError, _json.JSONDecodeError) as e:
|
|
1530
|
+
console.print(f" [warning]Skipped (invalid JSON): {e}[/warning]")
|
|
1531
|
+
continue
|
|
1532
|
+
|
|
1533
|
+
# Create scanner and use canonical parser
|
|
1534
|
+
scanner = _create_scanner(scanner_name)
|
|
1535
|
+
if not scanner:
|
|
1536
|
+
console.print(" [warning]Skipped (unknown scanner)[/warning]")
|
|
1537
|
+
continue
|
|
1538
|
+
|
|
1539
|
+
# Use canonical scanner parser (reuses validated logic)
|
|
1540
|
+
findings = scanner.parse(raw_text)
|
|
1541
|
+
|
|
1542
|
+
scan_results.append(
|
|
1543
|
+
ScanResult(
|
|
1544
|
+
scanner=scanner.name, # Use canonical scanner name
|
|
1545
|
+
success=True,
|
|
1546
|
+
findings=findings,
|
|
1547
|
+
raw_output_path=scan_file,
|
|
1548
|
+
duration_ms=0,
|
|
1549
|
+
)
|
|
1550
|
+
)
|
|
1551
|
+
scanners_map[scanner.name] = scanner
|
|
1552
|
+
|
|
1553
|
+
console.print(f" {len(findings)} finding(s)")
|
|
1554
|
+
|
|
1555
|
+
if not scan_results:
|
|
1556
|
+
console.print("[danger]Error:[/danger] No valid scan results to upload")
|
|
1557
|
+
return 1
|
|
1558
|
+
|
|
1559
|
+
# Import to DefectDojo
|
|
1560
|
+
console.print("\nUploading to DefectDojo...")
|
|
1561
|
+
|
|
1562
|
+
dojo_cfg = DojoConfig(
|
|
1563
|
+
base_url=dojo_url,
|
|
1564
|
+
api_key=dojo_api_key,
|
|
1565
|
+
product_name=product_name,
|
|
1566
|
+
engagement_name=engagement_name,
|
|
1567
|
+
)
|
|
1568
|
+
|
|
1569
|
+
# Get commit SHA from run manifest if available
|
|
1570
|
+
commit_sha: str | None = None
|
|
1571
|
+
manifest_path = run_dir / "run.json"
|
|
1572
|
+
if manifest_path.exists():
|
|
1573
|
+
try:
|
|
1574
|
+
with manifest_path.open() as f:
|
|
1575
|
+
manifest_data = _json.load(f)
|
|
1576
|
+
commit_sha = manifest_data.get("commit_sha")
|
|
1577
|
+
except (OSError, _json.JSONDecodeError):
|
|
1578
|
+
pass
|
|
1579
|
+
|
|
1580
|
+
import_results = import_results_to_dojo(
|
|
1581
|
+
config=dojo_cfg,
|
|
1582
|
+
results=scan_results,
|
|
1583
|
+
scanners=scanners_map,
|
|
1584
|
+
run_id=run_id,
|
|
1585
|
+
commit_sha=commit_sha,
|
|
1586
|
+
)
|
|
1587
|
+
|
|
1588
|
+
success_count = 0
|
|
1589
|
+
for idx, ir in enumerate(import_results):
|
|
1590
|
+
# Label based on actual scan_results order (not scanners_map keys)
|
|
1591
|
+
scanner_label = scan_results[idx].scanner if idx < len(scan_results) else f"scanner-{idx}"
|
|
1592
|
+
if ir.success:
|
|
1593
|
+
success_count += 1
|
|
1594
|
+
console.print(
|
|
1595
|
+
f" [success]{scanner_label}:[/success] {ir.findings_created} created, "
|
|
1596
|
+
f"{ir.findings_closed} closed"
|
|
1597
|
+
)
|
|
1598
|
+
else:
|
|
1599
|
+
err = sanitize_error(ir.error or "Unknown error")
|
|
1600
|
+
console.print(f" [danger]{scanner_label} failed:[/danger] {err}")
|
|
1601
|
+
|
|
1602
|
+
if success_count > 0:
|
|
1603
|
+
console.print(f"\n[success]Upload complete![/success] {success_count} scanner(s) imported")
|
|
1604
|
+
console.print(f"View results at: {dojo_url}")
|
|
1605
|
+
return 0
|
|
1606
|
+
else:
|
|
1607
|
+
console.print("\n[danger]Upload failed[/danger]")
|
|
1608
|
+
return 1
|
|
1609
|
+
|
|
1610
|
+
|
|
1611
|
+
def _parse_findings_from_json(data: dict[str, Any] | list[Any]) -> list[Finding]:
|
|
1612
|
+
"""Parse findings from various JSON formats (Semgrep, Trivy, unified)."""
|
|
1613
|
+
from .scanners.base import Severity
|
|
1614
|
+
|
|
1615
|
+
findings: list[Finding] = []
|
|
1616
|
+
|
|
1617
|
+
# Handle Semgrep format
|
|
1618
|
+
if isinstance(data, dict) and "results" in data:
|
|
1619
|
+
for item in data.get("results", []):
|
|
1620
|
+
severity_str = item.get("extra", {}).get("severity", "INFO")
|
|
1621
|
+
if severity_str == "ERROR":
|
|
1622
|
+
severity = Severity.HIGH
|
|
1623
|
+
elif severity_str == "WARNING":
|
|
1624
|
+
severity = Severity.MEDIUM
|
|
1625
|
+
else:
|
|
1626
|
+
severity = Severity.from_string(severity_str)
|
|
1627
|
+
|
|
1628
|
+
findings.append(
|
|
1629
|
+
Finding(
|
|
1630
|
+
scanner="semgrep",
|
|
1631
|
+
title=item.get("check_id", "Unknown"),
|
|
1632
|
+
severity=severity,
|
|
1633
|
+
description=item.get("extra", {}).get("message", ""),
|
|
1634
|
+
file_path=item.get("path"),
|
|
1635
|
+
line=item.get("start", {}).get("line"),
|
|
1636
|
+
rule_id=item.get("check_id"),
|
|
1637
|
+
cwe=_extract_cwe_from_metadata(item.get("extra", {}).get("metadata", {})),
|
|
1638
|
+
)
|
|
1639
|
+
)
|
|
1640
|
+
|
|
1641
|
+
# Handle Trivy format
|
|
1642
|
+
elif isinstance(data, dict) and "Results" in data:
|
|
1643
|
+
for result in data.get("Results", []):
|
|
1644
|
+
for vuln in result.get("Vulnerabilities", []):
|
|
1645
|
+
findings.append(
|
|
1646
|
+
Finding(
|
|
1647
|
+
scanner="trivy",
|
|
1648
|
+
title=vuln.get("Title", vuln.get("VulnerabilityID", "Unknown")),
|
|
1649
|
+
severity=Severity.from_string(vuln.get("Severity", "UNKNOWN")),
|
|
1650
|
+
description=vuln.get("Description", ""),
|
|
1651
|
+
cve=vuln.get("VulnerabilityID"),
|
|
1652
|
+
package_name=vuln.get("PkgName"),
|
|
1653
|
+
package_version=vuln.get("InstalledVersion"),
|
|
1654
|
+
fixed_version=vuln.get("FixedVersion"),
|
|
1655
|
+
)
|
|
1656
|
+
)
|
|
1657
|
+
|
|
1658
|
+
# Handle unified Kekkai format (array of findings)
|
|
1659
|
+
elif isinstance(data, list):
|
|
1660
|
+
for item in data:
|
|
1661
|
+
findings.append(
|
|
1662
|
+
Finding(
|
|
1663
|
+
scanner=item.get("scanner", "unknown"),
|
|
1664
|
+
title=item.get("title", "Unknown"),
|
|
1665
|
+
severity=Severity.from_string(item.get("severity", "unknown")),
|
|
1666
|
+
description=item.get("description", ""),
|
|
1667
|
+
file_path=item.get("file_path"),
|
|
1668
|
+
line=item.get("line"),
|
|
1669
|
+
rule_id=item.get("rule_id"),
|
|
1670
|
+
cwe=item.get("cwe"),
|
|
1671
|
+
cve=item.get("cve"),
|
|
1672
|
+
package_name=item.get("package_name"),
|
|
1673
|
+
package_version=item.get("package_version"),
|
|
1674
|
+
fixed_version=item.get("fixed_version"),
|
|
1675
|
+
)
|
|
1676
|
+
)
|
|
1677
|
+
|
|
1678
|
+
# Handle Kekkai report JSON format (with findings array)
|
|
1679
|
+
elif isinstance(data, dict) and "findings" in data:
|
|
1680
|
+
for item in data.get("findings", []):
|
|
1681
|
+
findings.append(
|
|
1682
|
+
Finding(
|
|
1683
|
+
scanner=item.get("scanner", "unknown"),
|
|
1684
|
+
title=item.get("title", "Unknown"),
|
|
1685
|
+
severity=Severity.from_string(item.get("severity", "unknown")),
|
|
1686
|
+
description=item.get("description", ""),
|
|
1687
|
+
file_path=item.get("file_path"),
|
|
1688
|
+
line=item.get("line"),
|
|
1689
|
+
rule_id=item.get("rule_id"),
|
|
1690
|
+
cwe=item.get("cwe"),
|
|
1691
|
+
cve=item.get("cve"),
|
|
1692
|
+
package_name=item.get("package_name"),
|
|
1693
|
+
package_version=item.get("package_version"),
|
|
1694
|
+
fixed_version=item.get("fixed_version"),
|
|
1695
|
+
)
|
|
1696
|
+
)
|
|
1697
|
+
|
|
1698
|
+
return findings
|
|
1699
|
+
|
|
1700
|
+
|
|
1701
|
+
def _extract_cwe_from_metadata(metadata: dict[str, Any]) -> str | None:
|
|
1702
|
+
"""Extract CWE from Semgrep metadata."""
|
|
1703
|
+
cwe_list = metadata.get("cwe", [])
|
|
1704
|
+
if isinstance(cwe_list, list) and cwe_list:
|
|
1705
|
+
return str(cwe_list[0])
|
|
1706
|
+
if isinstance(cwe_list, str):
|
|
1707
|
+
return cwe_list
|
|
1708
|
+
return None
|
|
939
1709
|
|
|
940
1710
|
|
|
941
1711
|
def _resolve_dojo_compose_dir(parsed: argparse.Namespace) -> str | None:
|