kekkai-cli 1.0.5__py3-none-any.whl → 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kekkai/cli.py +693 -14
- kekkai/compliance/__init__.py +68 -0
- kekkai/compliance/hipaa.py +235 -0
- kekkai/compliance/mappings.py +136 -0
- kekkai/compliance/owasp.py +517 -0
- kekkai/compliance/owasp_agentic.py +267 -0
- kekkai/compliance/pci_dss.py +205 -0
- kekkai/compliance/soc2.py +209 -0
- kekkai/dojo.py +91 -14
- kekkai/fix/__init__.py +47 -0
- kekkai/fix/audit.py +278 -0
- kekkai/fix/differ.py +427 -0
- kekkai/fix/engine.py +500 -0
- kekkai/fix/prompts.py +251 -0
- kekkai/output.py +10 -12
- kekkai/report/__init__.py +41 -0
- kekkai/report/compliance_matrix.py +98 -0
- kekkai/report/generator.py +365 -0
- kekkai/report/html.py +69 -0
- kekkai/report/pdf.py +63 -0
- kekkai/scanners/container.py +33 -3
- kekkai/scanners/gitleaks.py +3 -1
- kekkai/scanners/semgrep.py +1 -1
- kekkai/scanners/trivy.py +1 -1
- kekkai/threatflow/model_adapter.py +143 -1
- kekkai_cli-1.1.0.dist-info/METADATA +359 -0
- {kekkai_cli-1.0.5.dist-info → kekkai_cli-1.1.0.dist-info}/RECORD +33 -16
- portal/enterprise/__init__.py +15 -2
- portal/enterprise/licensing.py +88 -22
- portal/web.py +9 -0
- kekkai_cli-1.0.5.dist-info/METADATA +0 -135
- {kekkai_cli-1.0.5.dist-info → kekkai_cli-1.1.0.dist-info}/WHEEL +0 -0
- {kekkai_cli-1.0.5.dist-info → kekkai_cli-1.1.0.dist-info}/entry_points.txt +0 -0
- {kekkai_cli-1.0.5.dist-info → kekkai_cli-1.1.0.dist-info}/top_level.txt +0 -0
kekkai/cli.py
CHANGED
|
@@ -7,10 +7,10 @@ import sys
|
|
|
7
7
|
from collections.abc import Sequence
|
|
8
8
|
from datetime import UTC, datetime
|
|
9
9
|
from pathlib import Path
|
|
10
|
-
from typing import cast
|
|
10
|
+
from typing import Any, cast
|
|
11
11
|
|
|
12
12
|
from . import dojo, manifest
|
|
13
|
-
from .config import ConfigOverrides, DojoSettings, PolicySettings, load_config
|
|
13
|
+
from .config import DEFAULT_SCANNERS, ConfigOverrides, DojoSettings, PolicySettings, load_config
|
|
14
14
|
from .dojo_import import DojoConfig, import_results_to_dojo
|
|
15
15
|
from .output import (
|
|
16
16
|
VERSION,
|
|
@@ -179,8 +179,8 @@ def main(argv: Sequence[str] | None = None) -> int:
|
|
|
179
179
|
threatflow_parser.add_argument(
|
|
180
180
|
"--model-mode",
|
|
181
181
|
type=str,
|
|
182
|
-
choices=["local", "openai", "anthropic", "mock"],
|
|
183
|
-
help="LLM backend: local (
|
|
182
|
+
choices=["local", "ollama", "openai", "anthropic", "mock"],
|
|
183
|
+
help="LLM backend: local, ollama (recommended), openai, anthropic, or mock",
|
|
184
184
|
)
|
|
185
185
|
threatflow_parser.add_argument(
|
|
186
186
|
"--model-path", type=str, help="Path to local model file (for local mode)"
|
|
@@ -217,6 +217,165 @@ def main(argv: Sequence[str] | None = None) -> int:
|
|
|
217
217
|
help="Path for .kekkaiignore output (default: .kekkaiignore)",
|
|
218
218
|
)
|
|
219
219
|
|
|
220
|
+
# Fix subcommand - AI-powered remediation
|
|
221
|
+
fix_parser = subparsers.add_parser("fix", help="generate AI-powered code fixes for findings")
|
|
222
|
+
fix_parser.add_argument(
|
|
223
|
+
"--input",
|
|
224
|
+
type=str,
|
|
225
|
+
help="Path to scan results JSON (Semgrep format)",
|
|
226
|
+
)
|
|
227
|
+
fix_parser.add_argument(
|
|
228
|
+
"--repo",
|
|
229
|
+
type=str,
|
|
230
|
+
help="Path to repository (default: current directory)",
|
|
231
|
+
)
|
|
232
|
+
fix_parser.add_argument(
|
|
233
|
+
"--output-dir",
|
|
234
|
+
type=str,
|
|
235
|
+
help="Output directory for diffs and audit log",
|
|
236
|
+
)
|
|
237
|
+
fix_parser.add_argument(
|
|
238
|
+
"--dry-run",
|
|
239
|
+
action="store_true",
|
|
240
|
+
default=True,
|
|
241
|
+
help="Preview fixes without applying (default: True)",
|
|
242
|
+
)
|
|
243
|
+
fix_parser.add_argument(
|
|
244
|
+
"--apply",
|
|
245
|
+
action="store_true",
|
|
246
|
+
help="Apply fixes to files (requires explicit flag)",
|
|
247
|
+
)
|
|
248
|
+
fix_parser.add_argument(
|
|
249
|
+
"--model-mode",
|
|
250
|
+
type=str,
|
|
251
|
+
choices=["local", "ollama", "openai", "anthropic", "mock"],
|
|
252
|
+
default="local",
|
|
253
|
+
help="LLM backend: local (default), openai, anthropic, or mock",
|
|
254
|
+
)
|
|
255
|
+
fix_parser.add_argument(
|
|
256
|
+
"--api-key",
|
|
257
|
+
type=str,
|
|
258
|
+
help="API key for remote LLM (prefer KEKKAI_FIX_API_KEY env var)",
|
|
259
|
+
)
|
|
260
|
+
fix_parser.add_argument(
|
|
261
|
+
"--model-name",
|
|
262
|
+
type=str,
|
|
263
|
+
help="Specific model name to use",
|
|
264
|
+
)
|
|
265
|
+
fix_parser.add_argument(
|
|
266
|
+
"--max-fixes",
|
|
267
|
+
type=int,
|
|
268
|
+
default=10,
|
|
269
|
+
help="Maximum fixes to generate per run (default: 10)",
|
|
270
|
+
)
|
|
271
|
+
fix_parser.add_argument(
|
|
272
|
+
"--timeout",
|
|
273
|
+
type=int,
|
|
274
|
+
default=120,
|
|
275
|
+
help="Timeout in seconds for LLM calls (default: 120)",
|
|
276
|
+
)
|
|
277
|
+
fix_parser.add_argument(
|
|
278
|
+
"--no-backup",
|
|
279
|
+
action="store_true",
|
|
280
|
+
help="Disable backup creation when applying fixes",
|
|
281
|
+
)
|
|
282
|
+
|
|
283
|
+
# Report subcommand - Compliance mapping and reporting
|
|
284
|
+
report_parser = subparsers.add_parser(
|
|
285
|
+
"report", help="generate compliance reports from scan findings"
|
|
286
|
+
)
|
|
287
|
+
report_parser.add_argument(
|
|
288
|
+
"--input",
|
|
289
|
+
type=str,
|
|
290
|
+
required=True,
|
|
291
|
+
help="Path to scan results JSON file",
|
|
292
|
+
)
|
|
293
|
+
report_parser.add_argument(
|
|
294
|
+
"--output",
|
|
295
|
+
type=str,
|
|
296
|
+
help="Output directory for reports (default: current directory)",
|
|
297
|
+
)
|
|
298
|
+
report_parser.add_argument(
|
|
299
|
+
"--format",
|
|
300
|
+
type=str,
|
|
301
|
+
default="html",
|
|
302
|
+
help="Report format: html, pdf, compliance, json, all (default: html)",
|
|
303
|
+
)
|
|
304
|
+
report_parser.add_argument(
|
|
305
|
+
"--frameworks",
|
|
306
|
+
type=str,
|
|
307
|
+
help="Comma-separated frameworks: PCI-DSS,SOC2,OWASP,HIPAA (default: all)",
|
|
308
|
+
)
|
|
309
|
+
report_parser.add_argument(
|
|
310
|
+
"--min-severity",
|
|
311
|
+
type=str,
|
|
312
|
+
default="info",
|
|
313
|
+
help="Minimum severity to include: critical,high,medium,low,info (default: info)",
|
|
314
|
+
)
|
|
315
|
+
report_parser.add_argument(
|
|
316
|
+
"--title",
|
|
317
|
+
type=str,
|
|
318
|
+
default="Security Scan Report",
|
|
319
|
+
help="Report title",
|
|
320
|
+
)
|
|
321
|
+
report_parser.add_argument(
|
|
322
|
+
"--organization",
|
|
323
|
+
type=str,
|
|
324
|
+
default="",
|
|
325
|
+
help="Organization name for report header",
|
|
326
|
+
)
|
|
327
|
+
report_parser.add_argument(
|
|
328
|
+
"--project",
|
|
329
|
+
type=str,
|
|
330
|
+
default="",
|
|
331
|
+
help="Project name for report header",
|
|
332
|
+
)
|
|
333
|
+
report_parser.add_argument(
|
|
334
|
+
"--no-executive-summary",
|
|
335
|
+
action="store_true",
|
|
336
|
+
help="Exclude executive summary section",
|
|
337
|
+
)
|
|
338
|
+
report_parser.add_argument(
|
|
339
|
+
"--no-timeline",
|
|
340
|
+
action="store_true",
|
|
341
|
+
help="Exclude remediation timeline section",
|
|
342
|
+
)
|
|
343
|
+
|
|
344
|
+
# Upload subcommand - upload scan results to DefectDojo
|
|
345
|
+
upload_parser = subparsers.add_parser("upload", help="upload scan results to DefectDojo")
|
|
346
|
+
upload_parser.add_argument(
|
|
347
|
+
"--run-id",
|
|
348
|
+
type=str,
|
|
349
|
+
help="Run ID to upload (default: latest run)",
|
|
350
|
+
)
|
|
351
|
+
upload_parser.add_argument(
|
|
352
|
+
"--input",
|
|
353
|
+
type=str,
|
|
354
|
+
help="Path to specific results file to upload",
|
|
355
|
+
)
|
|
356
|
+
upload_parser.add_argument(
|
|
357
|
+
"--dojo-url",
|
|
358
|
+
type=str,
|
|
359
|
+
help="DefectDojo base URL (default: http://localhost:8080)",
|
|
360
|
+
)
|
|
361
|
+
upload_parser.add_argument(
|
|
362
|
+
"--dojo-api-key",
|
|
363
|
+
type=str,
|
|
364
|
+
help="DefectDojo API key (or set KEKKAI_DOJO_API_KEY env var)",
|
|
365
|
+
)
|
|
366
|
+
upload_parser.add_argument(
|
|
367
|
+
"--product",
|
|
368
|
+
type=str,
|
|
369
|
+
default="Kekkai Scans",
|
|
370
|
+
help="DefectDojo product name",
|
|
371
|
+
)
|
|
372
|
+
upload_parser.add_argument(
|
|
373
|
+
"--engagement",
|
|
374
|
+
type=str,
|
|
375
|
+
default="Default Engagement",
|
|
376
|
+
help="DefectDojo engagement name",
|
|
377
|
+
)
|
|
378
|
+
|
|
220
379
|
parsed = parser.parse_args(args)
|
|
221
380
|
if parsed.command == "init":
|
|
222
381
|
return _command_init(parsed.config, parsed.force)
|
|
@@ -249,6 +408,12 @@ def main(argv: Sequence[str] | None = None) -> int:
|
|
|
249
408
|
return _command_threatflow(parsed)
|
|
250
409
|
if parsed.command == "triage":
|
|
251
410
|
return _command_triage(parsed)
|
|
411
|
+
if parsed.command == "fix":
|
|
412
|
+
return _command_fix(parsed)
|
|
413
|
+
if parsed.command == "report":
|
|
414
|
+
return _command_report(parsed)
|
|
415
|
+
if parsed.command == "upload":
|
|
416
|
+
return _command_upload(parsed)
|
|
252
417
|
|
|
253
418
|
parser.print_help()
|
|
254
419
|
return 1
|
|
@@ -489,7 +654,7 @@ def _resolve_scanners(override: str | None, config_scanners: list[str] | None) -
|
|
|
489
654
|
return [s.strip() for s in override.split(",") if s.strip()]
|
|
490
655
|
if config_scanners:
|
|
491
656
|
return config_scanners
|
|
492
|
-
return
|
|
657
|
+
return list(DEFAULT_SCANNERS)
|
|
493
658
|
|
|
494
659
|
|
|
495
660
|
def _resolve_policy_config(
|
|
@@ -543,7 +708,7 @@ def _print_scan_summary_table(scan_results: list[ScanResult]) -> None:
|
|
|
543
708
|
)
|
|
544
709
|
for r in scan_results
|
|
545
710
|
]
|
|
546
|
-
|
|
711
|
+
print_scan_summary(rows)
|
|
547
712
|
|
|
548
713
|
|
|
549
714
|
def _print_policy_summary(result: PolicyResult) -> None:
|
|
@@ -749,23 +914,44 @@ def _command_dojo(parsed: argparse.Namespace) -> int:
|
|
|
749
914
|
project_name = _resolve_dojo_project_name(parsed)
|
|
750
915
|
|
|
751
916
|
if parsed.dojo_command == "up":
|
|
752
|
-
|
|
753
|
-
|
|
917
|
+
requested_port = _resolve_dojo_port(parsed)
|
|
918
|
+
requested_tls_port = _resolve_dojo_tls_port(parsed, requested_port)
|
|
754
919
|
try:
|
|
755
|
-
env = dojo.compose_up(
|
|
920
|
+
env, actual_port, actual_tls_port = dojo.compose_up(
|
|
756
921
|
compose_root=compose_root,
|
|
757
922
|
project_name=project_name,
|
|
758
|
-
port=
|
|
759
|
-
tls_port=
|
|
923
|
+
port=requested_port,
|
|
924
|
+
tls_port=requested_tls_port,
|
|
760
925
|
wait=bool(parsed.wait),
|
|
761
926
|
open_browser=bool(parsed.open),
|
|
762
927
|
)
|
|
763
928
|
except RuntimeError as exc:
|
|
764
929
|
print(str(exc))
|
|
765
930
|
return 1
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
931
|
+
|
|
932
|
+
# Warn if port was changed due to conflict
|
|
933
|
+
if actual_port != requested_port:
|
|
934
|
+
console.print(
|
|
935
|
+
f"[warning]Port {requested_port} was in use, using {actual_port} instead[/warning]"
|
|
936
|
+
)
|
|
937
|
+
|
|
938
|
+
console.print(
|
|
939
|
+
f"\n[bold cyan]DefectDojo is ready at[/bold cyan] http://localhost:{actual_port}/"
|
|
940
|
+
)
|
|
941
|
+
console.print("\n[bold]Login credentials:[/bold]")
|
|
942
|
+
console.print(f" Username: {env.get('DD_ADMIN_USER', 'admin')}")
|
|
943
|
+
console.print(f" Password: {env.get('DD_ADMIN_PASSWORD', '(see .env)')}")
|
|
944
|
+
|
|
945
|
+
# Show API key if generated (only when --wait was used)
|
|
946
|
+
api_key = env.get("DD_API_KEY")
|
|
947
|
+
if api_key:
|
|
948
|
+
console.print(f"\n[bold]API Key (for uploads):[/bold] {api_key}")
|
|
949
|
+
else:
|
|
950
|
+
console.print(
|
|
951
|
+
"\n[muted]Note: Run with --wait to auto-generate API key for uploads[/muted]"
|
|
952
|
+
)
|
|
953
|
+
|
|
954
|
+
console.print(f"\nCredentials saved to: {compose_root / '.env'}")
|
|
769
955
|
return 0
|
|
770
956
|
|
|
771
957
|
if parsed.dojo_command == "down":
|
|
@@ -938,6 +1124,499 @@ def _command_triage(parsed: argparse.Namespace) -> int:
|
|
|
938
1124
|
return run_triage(input_path=input_path, output_path=output_path)
|
|
939
1125
|
|
|
940
1126
|
|
|
1127
|
+
def _command_fix(parsed: argparse.Namespace) -> int:
|
|
1128
|
+
"""Run AI-powered code fix generation."""
|
|
1129
|
+
from .fix import FixConfig, FixEngine
|
|
1130
|
+
|
|
1131
|
+
# Resolve input path
|
|
1132
|
+
input_path_str = cast(str | None, getattr(parsed, "input", None))
|
|
1133
|
+
if not input_path_str:
|
|
1134
|
+
console.print("[danger]Error:[/danger] --input is required (path to scan results JSON)")
|
|
1135
|
+
return 1
|
|
1136
|
+
|
|
1137
|
+
input_path = Path(input_path_str).expanduser().resolve()
|
|
1138
|
+
if not input_path.exists():
|
|
1139
|
+
console.print(f"[danger]Error:[/danger] Input file not found: {input_path}")
|
|
1140
|
+
return 1
|
|
1141
|
+
|
|
1142
|
+
# Resolve repository path
|
|
1143
|
+
repo_override = cast(str | None, getattr(parsed, "repo", None))
|
|
1144
|
+
repo_path = Path(repo_override).expanduser().resolve() if repo_override else Path.cwd()
|
|
1145
|
+
|
|
1146
|
+
if not repo_path.exists() or not repo_path.is_dir():
|
|
1147
|
+
console.print(f"[danger]Error:[/danger] Repository path not found: {repo_path}")
|
|
1148
|
+
return 1
|
|
1149
|
+
|
|
1150
|
+
# Resolve output directory
|
|
1151
|
+
output_dir_str = cast(str | None, getattr(parsed, "output_dir", None))
|
|
1152
|
+
output_dir = Path(output_dir_str).expanduser().resolve() if output_dir_str else None
|
|
1153
|
+
|
|
1154
|
+
# Resolve model settings
|
|
1155
|
+
model_mode = getattr(parsed, "model_mode", "local") or "local"
|
|
1156
|
+
api_key = getattr(parsed, "api_key", None) or os.environ.get("KEKKAI_FIX_API_KEY")
|
|
1157
|
+
model_name = getattr(parsed, "model_name", None)
|
|
1158
|
+
max_fixes = getattr(parsed, "max_fixes", 10)
|
|
1159
|
+
timeout = getattr(parsed, "timeout", 120)
|
|
1160
|
+
no_backup = getattr(parsed, "no_backup", False)
|
|
1161
|
+
|
|
1162
|
+
# Determine dry_run: --apply overrides --dry-run
|
|
1163
|
+
apply_fixes = getattr(parsed, "apply", False)
|
|
1164
|
+
dry_run = not apply_fixes
|
|
1165
|
+
|
|
1166
|
+
# Display banner
|
|
1167
|
+
console.print("\n[bold cyan]Kekkai Fix[/bold cyan] - AI-Powered Remediation")
|
|
1168
|
+
console.print("=" * 50)
|
|
1169
|
+
console.print(f"Repository: {repo_path}")
|
|
1170
|
+
console.print(f"Input: {input_path}")
|
|
1171
|
+
console.print(f"Model mode: {model_mode}")
|
|
1172
|
+
console.print(f"Dry run: {dry_run}")
|
|
1173
|
+
|
|
1174
|
+
# Warn about remote mode
|
|
1175
|
+
if model_mode in ("openai", "anthropic"):
|
|
1176
|
+
console.print(
|
|
1177
|
+
"\n[warning]*** WARNING: Using remote API. Code will be sent to external service. ***"
|
|
1178
|
+
"[/warning]\n"
|
|
1179
|
+
)
|
|
1180
|
+
if not api_key:
|
|
1181
|
+
console.print("[danger]Error:[/danger] API key required for remote mode.")
|
|
1182
|
+
console.print(" Set --api-key or KEKKAI_FIX_API_KEY environment variable")
|
|
1183
|
+
return 1
|
|
1184
|
+
|
|
1185
|
+
# Build config
|
|
1186
|
+
config = FixConfig(
|
|
1187
|
+
model_mode=model_mode,
|
|
1188
|
+
api_key=api_key,
|
|
1189
|
+
model_name=model_name,
|
|
1190
|
+
max_fixes=max_fixes,
|
|
1191
|
+
timeout_seconds=timeout,
|
|
1192
|
+
dry_run=dry_run,
|
|
1193
|
+
create_backups=not no_backup,
|
|
1194
|
+
)
|
|
1195
|
+
|
|
1196
|
+
# Run fix engine
|
|
1197
|
+
console.print("\nAnalyzing findings...")
|
|
1198
|
+
engine = FixEngine(config)
|
|
1199
|
+
result = engine.fix_from_scan_results(input_path, repo_path, output_dir)
|
|
1200
|
+
|
|
1201
|
+
if not result.success:
|
|
1202
|
+
console.print(f"[danger]Error:[/danger] {result.error}")
|
|
1203
|
+
return 1
|
|
1204
|
+
|
|
1205
|
+
# Print results
|
|
1206
|
+
console.print("\n[success]Fix generation complete[/success]")
|
|
1207
|
+
console.print(f" Findings processed: {result.findings_processed}")
|
|
1208
|
+
console.print(f" Fixes generated: {result.fixes_generated}")
|
|
1209
|
+
|
|
1210
|
+
if not dry_run:
|
|
1211
|
+
console.print(f" Fixes applied: {result.fixes_applied}")
|
|
1212
|
+
|
|
1213
|
+
if result.warnings:
|
|
1214
|
+
console.print("\n[warning]Warnings:[/warning]")
|
|
1215
|
+
for w in result.warnings[:10]:
|
|
1216
|
+
console.print(f" - {sanitize_for_terminal(w)}")
|
|
1217
|
+
if len(result.warnings) > 10:
|
|
1218
|
+
console.print(f" ... and {len(result.warnings) - 10} more")
|
|
1219
|
+
|
|
1220
|
+
# Show fix previews in dry run mode
|
|
1221
|
+
if dry_run and result.suggestions:
|
|
1222
|
+
console.print("\n[bold]Fix Previews:[/bold]")
|
|
1223
|
+
for i, suggestion in enumerate(result.suggestions[:5], 1):
|
|
1224
|
+
if suggestion.success:
|
|
1225
|
+
console.print(
|
|
1226
|
+
f"\n--- Fix {i}: {suggestion.finding.file_path}:{suggestion.finding.line} ---"
|
|
1227
|
+
)
|
|
1228
|
+
console.print(f"Rule: {suggestion.finding.rule_id}")
|
|
1229
|
+
console.print(suggestion.preview[:1000])
|
|
1230
|
+
if len(suggestion.preview) > 1000:
|
|
1231
|
+
console.print("... (truncated)")
|
|
1232
|
+
if len(result.suggestions) > 5:
|
|
1233
|
+
console.print(f"\n... and {len(result.suggestions) - 5} more fixes")
|
|
1234
|
+
console.print("\n[info]To apply fixes, run with --apply flag[/info]")
|
|
1235
|
+
|
|
1236
|
+
if result.audit_log_path:
|
|
1237
|
+
console.print(f"\nAudit log: {result.audit_log_path}")
|
|
1238
|
+
|
|
1239
|
+
return 0
|
|
1240
|
+
|
|
1241
|
+
|
|
1242
|
+
def _command_report(parsed: argparse.Namespace) -> int:
|
|
1243
|
+
"""Generate compliance reports from scan findings."""
|
|
1244
|
+
import json as _json
|
|
1245
|
+
|
|
1246
|
+
from .report import ReportConfig, ReportFormat, generate_report
|
|
1247
|
+
|
|
1248
|
+
# Resolve input path
|
|
1249
|
+
input_path_str = cast(str, parsed.input)
|
|
1250
|
+
input_path = Path(input_path_str).expanduser().resolve()
|
|
1251
|
+
|
|
1252
|
+
if not input_path.exists():
|
|
1253
|
+
console.print(f"[danger]Error:[/danger] Input file not found: {input_path}")
|
|
1254
|
+
return 1
|
|
1255
|
+
|
|
1256
|
+
# Resolve output directory
|
|
1257
|
+
output_str = cast(str | None, getattr(parsed, "output", None))
|
|
1258
|
+
output_dir = Path(output_str).expanduser().resolve() if output_str else Path.cwd()
|
|
1259
|
+
|
|
1260
|
+
# Parse format
|
|
1261
|
+
format_str = getattr(parsed, "format", "html").lower()
|
|
1262
|
+
formats: list[ReportFormat] = []
|
|
1263
|
+
if format_str == "all":
|
|
1264
|
+
formats = [ReportFormat.HTML, ReportFormat.PDF, ReportFormat.COMPLIANCE, ReportFormat.JSON]
|
|
1265
|
+
else:
|
|
1266
|
+
for fmt in format_str.split(","):
|
|
1267
|
+
fmt = fmt.strip()
|
|
1268
|
+
try:
|
|
1269
|
+
formats.append(ReportFormat(fmt))
|
|
1270
|
+
except ValueError:
|
|
1271
|
+
console.print(f"[danger]Error:[/danger] Unknown format: {fmt}")
|
|
1272
|
+
console.print(" Available: html, pdf, compliance, json, all")
|
|
1273
|
+
return 1
|
|
1274
|
+
|
|
1275
|
+
# Parse frameworks
|
|
1276
|
+
frameworks_str = cast(str | None, getattr(parsed, "frameworks", None))
|
|
1277
|
+
if frameworks_str:
|
|
1278
|
+
frameworks = [f.strip() for f in frameworks_str.split(",")]
|
|
1279
|
+
else:
|
|
1280
|
+
frameworks = ["PCI-DSS", "SOC2", "OWASP", "HIPAA"]
|
|
1281
|
+
|
|
1282
|
+
# Build config
|
|
1283
|
+
config = ReportConfig(
|
|
1284
|
+
formats=formats,
|
|
1285
|
+
frameworks=frameworks,
|
|
1286
|
+
min_severity=getattr(parsed, "min_severity", "info"),
|
|
1287
|
+
include_executive_summary=not getattr(parsed, "no_executive_summary", False),
|
|
1288
|
+
include_remediation_timeline=not getattr(parsed, "no_timeline", False),
|
|
1289
|
+
title=getattr(parsed, "title", "Security Scan Report"),
|
|
1290
|
+
organization=getattr(parsed, "organization", ""),
|
|
1291
|
+
project_name=getattr(parsed, "project", ""),
|
|
1292
|
+
)
|
|
1293
|
+
|
|
1294
|
+
# Display banner
|
|
1295
|
+
console.print("\n[bold cyan]Kekkai Report[/bold cyan] - Compliance Mapping & Reporting")
|
|
1296
|
+
console.print("=" * 55)
|
|
1297
|
+
console.print(f"Input: {input_path}")
|
|
1298
|
+
console.print(f"Output: {output_dir}")
|
|
1299
|
+
console.print(f"Formats: {', '.join(f.value for f in formats)}")
|
|
1300
|
+
console.print(f"Frameworks: {', '.join(frameworks)}")
|
|
1301
|
+
|
|
1302
|
+
# Load findings from input file
|
|
1303
|
+
console.print("\nLoading scan results...")
|
|
1304
|
+
try:
|
|
1305
|
+
with input_path.open() as f:
|
|
1306
|
+
data = _json.load(f)
|
|
1307
|
+
except _json.JSONDecodeError as e:
|
|
1308
|
+
console.print(f"[danger]Error:[/danger] Invalid JSON: {e}")
|
|
1309
|
+
return 1
|
|
1310
|
+
|
|
1311
|
+
# Parse findings based on input format
|
|
1312
|
+
findings = _parse_findings_from_json(data)
|
|
1313
|
+
|
|
1314
|
+
if not findings:
|
|
1315
|
+
console.print("[warning]Warning:[/warning] No findings found in input file")
|
|
1316
|
+
|
|
1317
|
+
console.print(f"Found {len(findings)} findings")
|
|
1318
|
+
|
|
1319
|
+
# Generate reports
|
|
1320
|
+
console.print("\nGenerating reports...")
|
|
1321
|
+
result = generate_report(findings, output_dir, config)
|
|
1322
|
+
|
|
1323
|
+
if not result.success:
|
|
1324
|
+
console.print("[danger]Report generation failed:[/danger]")
|
|
1325
|
+
for err in result.errors:
|
|
1326
|
+
console.print(f" - {sanitize_error(err)}")
|
|
1327
|
+
return 1
|
|
1328
|
+
|
|
1329
|
+
# Print results
|
|
1330
|
+
console.print(f"\n[success]Reports generated in {result.generation_time_ms}ms[/success]")
|
|
1331
|
+
console.print("\nOutput files:")
|
|
1332
|
+
for path in result.output_files:
|
|
1333
|
+
console.print(f" - {path}")
|
|
1334
|
+
|
|
1335
|
+
if result.warnings:
|
|
1336
|
+
console.print("\n[warning]Warnings:[/warning]")
|
|
1337
|
+
for w in result.warnings[:5]:
|
|
1338
|
+
console.print(f" - {w}")
|
|
1339
|
+
|
|
1340
|
+
return 0
|
|
1341
|
+
|
|
1342
|
+
|
|
1343
|
+
def _command_upload(parsed: argparse.Namespace) -> int:
|
|
1344
|
+
"""Upload scan results to DefectDojo."""
|
|
1345
|
+
import json as _json
|
|
1346
|
+
|
|
1347
|
+
# Resolve DefectDojo configuration
|
|
1348
|
+
dojo_url = (
|
|
1349
|
+
getattr(parsed, "dojo_url", None)
|
|
1350
|
+
or os.environ.get("KEKKAI_DOJO_URL")
|
|
1351
|
+
or "http://localhost:8080"
|
|
1352
|
+
)
|
|
1353
|
+
dojo_api_key = getattr(parsed, "dojo_api_key", None) or os.environ.get("KEKKAI_DOJO_API_KEY")
|
|
1354
|
+
|
|
1355
|
+
if not dojo_api_key:
|
|
1356
|
+
# Try to read from local dojo .env file
|
|
1357
|
+
dojo_env_path = app_base_dir() / "dojo" / ".env"
|
|
1358
|
+
if dojo_env_path.exists():
|
|
1359
|
+
env_data = dojo.load_env_file(dojo_env_path)
|
|
1360
|
+
dojo_api_key = env_data.get("DD_API_KEY")
|
|
1361
|
+
|
|
1362
|
+
if not dojo_api_key:
|
|
1363
|
+
console.print("[danger]Error:[/danger] DefectDojo API key required")
|
|
1364
|
+
console.print(" Set --dojo-api-key or KEKKAI_DOJO_API_KEY environment variable")
|
|
1365
|
+
console.print(" Or run 'kekkai dojo up' to start local DefectDojo first")
|
|
1366
|
+
return 1
|
|
1367
|
+
|
|
1368
|
+
product_name = getattr(parsed, "product", "Kekkai Scans")
|
|
1369
|
+
engagement_name = getattr(parsed, "engagement", "Default Engagement")
|
|
1370
|
+
|
|
1371
|
+
# Resolve input - either specific file or find latest run
|
|
1372
|
+
input_path_str = cast(str | None, getattr(parsed, "input", None))
|
|
1373
|
+
run_id_override = cast(str | None, getattr(parsed, "run_id", None))
|
|
1374
|
+
|
|
1375
|
+
if input_path_str:
|
|
1376
|
+
# Use specific input file
|
|
1377
|
+
input_path = Path(input_path_str).expanduser().resolve()
|
|
1378
|
+
if not input_path.exists():
|
|
1379
|
+
console.print(f"[danger]Error:[/danger] Input file not found: {input_path}")
|
|
1380
|
+
return 1
|
|
1381
|
+
run_dir = input_path.parent
|
|
1382
|
+
run_id = run_dir.name
|
|
1383
|
+
else:
|
|
1384
|
+
# Find latest run
|
|
1385
|
+
runs_dir = app_base_dir() / "runs"
|
|
1386
|
+
if not runs_dir.exists():
|
|
1387
|
+
console.print("[danger]Error:[/danger] No scan runs found")
|
|
1388
|
+
console.print(" Run 'kekkai scan' first to generate results")
|
|
1389
|
+
return 1
|
|
1390
|
+
|
|
1391
|
+
if run_id_override:
|
|
1392
|
+
run_dir = runs_dir / run_id_override
|
|
1393
|
+
if not run_dir.exists():
|
|
1394
|
+
console.print(f"[danger]Error:[/danger] Run not found: {run_id_override}")
|
|
1395
|
+
return 1
|
|
1396
|
+
run_id = run_id_override
|
|
1397
|
+
else:
|
|
1398
|
+
# Find most recent run
|
|
1399
|
+
run_dirs = sorted(
|
|
1400
|
+
[d for d in runs_dir.iterdir() if d.is_dir()],
|
|
1401
|
+
key=lambda d: d.stat().st_mtime,
|
|
1402
|
+
reverse=True,
|
|
1403
|
+
)
|
|
1404
|
+
if not run_dirs:
|
|
1405
|
+
console.print("[danger]Error:[/danger] No scan runs found")
|
|
1406
|
+
return 1
|
|
1407
|
+
run_dir = run_dirs[0]
|
|
1408
|
+
run_id = run_dir.name
|
|
1409
|
+
|
|
1410
|
+
console.print("\n[bold cyan]Kekkai Upload[/bold cyan] - DefectDojo Import")
|
|
1411
|
+
console.print("=" * 45)
|
|
1412
|
+
console.print(f"DefectDojo URL: {dojo_url}")
|
|
1413
|
+
console.print(f"Run ID: {run_id}")
|
|
1414
|
+
console.print(f"Product: {product_name}")
|
|
1415
|
+
console.print(f"Engagement: {engagement_name}")
|
|
1416
|
+
|
|
1417
|
+
# Find and load scan results
|
|
1418
|
+
scan_files = list(run_dir.glob("*.json"))
|
|
1419
|
+
scan_files = [f for f in scan_files if f.name not in ("run.json", "policy-result.json")]
|
|
1420
|
+
|
|
1421
|
+
if not scan_files:
|
|
1422
|
+
console.print(f"[danger]Error:[/danger] No scan results found in {run_dir}")
|
|
1423
|
+
return 1
|
|
1424
|
+
|
|
1425
|
+
console.print(f"\nFound {len(scan_files)} result file(s)")
|
|
1426
|
+
|
|
1427
|
+
# Build scan results for import
|
|
1428
|
+
scan_results: list[ScanResult] = []
|
|
1429
|
+
scanners_map: dict[str, Scanner] = {}
|
|
1430
|
+
|
|
1431
|
+
for scan_file in scan_files:
|
|
1432
|
+
scanner_name = scan_file.stem # e.g., "trivy", "semgrep", "gitleaks"
|
|
1433
|
+
console.print(f" Loading {scanner_name}...")
|
|
1434
|
+
|
|
1435
|
+
try:
|
|
1436
|
+
with scan_file.open() as f:
|
|
1437
|
+
data = _json.load(f)
|
|
1438
|
+
except _json.JSONDecodeError as e:
|
|
1439
|
+
console.print(f" [warning]Skipped (invalid JSON): {e}[/warning]")
|
|
1440
|
+
continue
|
|
1441
|
+
|
|
1442
|
+
# Parse findings based on format
|
|
1443
|
+
findings = _parse_findings_from_json(data)
|
|
1444
|
+
|
|
1445
|
+
if findings:
|
|
1446
|
+
scan_results.append(
|
|
1447
|
+
ScanResult(
|
|
1448
|
+
scanner=scanner_name,
|
|
1449
|
+
success=True,
|
|
1450
|
+
findings=findings,
|
|
1451
|
+
raw_output_path=scan_file,
|
|
1452
|
+
duration_ms=0,
|
|
1453
|
+
)
|
|
1454
|
+
)
|
|
1455
|
+
# Create scanner instance for import
|
|
1456
|
+
scanner = _create_scanner(scanner_name)
|
|
1457
|
+
if scanner:
|
|
1458
|
+
scanners_map[scanner_name] = scanner
|
|
1459
|
+
|
|
1460
|
+
console.print(f" {len(findings)} findings")
|
|
1461
|
+
|
|
1462
|
+
if not scan_results:
|
|
1463
|
+
console.print("[danger]Error:[/danger] No valid scan results to upload")
|
|
1464
|
+
return 1
|
|
1465
|
+
|
|
1466
|
+
# Import to DefectDojo
|
|
1467
|
+
console.print("\nUploading to DefectDojo...")
|
|
1468
|
+
|
|
1469
|
+
dojo_cfg = DojoConfig(
|
|
1470
|
+
base_url=dojo_url,
|
|
1471
|
+
api_key=dojo_api_key,
|
|
1472
|
+
product_name=product_name,
|
|
1473
|
+
engagement_name=engagement_name,
|
|
1474
|
+
)
|
|
1475
|
+
|
|
1476
|
+
# Get commit SHA from run manifest if available
|
|
1477
|
+
commit_sha: str | None = None
|
|
1478
|
+
manifest_path = run_dir / "run.json"
|
|
1479
|
+
if manifest_path.exists():
|
|
1480
|
+
try:
|
|
1481
|
+
with manifest_path.open() as f:
|
|
1482
|
+
manifest_data = _json.load(f)
|
|
1483
|
+
commit_sha = manifest_data.get("commit_sha")
|
|
1484
|
+
except (OSError, _json.JSONDecodeError):
|
|
1485
|
+
pass
|
|
1486
|
+
|
|
1487
|
+
import_results = import_results_to_dojo(
|
|
1488
|
+
config=dojo_cfg,
|
|
1489
|
+
results=scan_results,
|
|
1490
|
+
scanners=scanners_map,
|
|
1491
|
+
run_id=run_id,
|
|
1492
|
+
commit_sha=commit_sha,
|
|
1493
|
+
)
|
|
1494
|
+
|
|
1495
|
+
success_count = 0
|
|
1496
|
+
scanner_names_list = list(scanners_map.keys())
|
|
1497
|
+
for idx, ir in enumerate(import_results):
|
|
1498
|
+
scanner_label = (
|
|
1499
|
+
scanner_names_list[idx] if idx < len(scanner_names_list) else f"scanner-{idx}"
|
|
1500
|
+
)
|
|
1501
|
+
if ir.success:
|
|
1502
|
+
success_count += 1
|
|
1503
|
+
console.print(
|
|
1504
|
+
f" [success]{scanner_label}:[/success] {ir.findings_created} created, "
|
|
1505
|
+
f"{ir.findings_closed} closed"
|
|
1506
|
+
)
|
|
1507
|
+
else:
|
|
1508
|
+
err = sanitize_error(ir.error or "Unknown error")
|
|
1509
|
+
console.print(f" [danger]{scanner_label} failed:[/danger] {err}")
|
|
1510
|
+
|
|
1511
|
+
if success_count > 0:
|
|
1512
|
+
console.print(f"\n[success]Upload complete![/success] {success_count} scanner(s) imported")
|
|
1513
|
+
console.print(f"View results at: {dojo_url}")
|
|
1514
|
+
return 0
|
|
1515
|
+
else:
|
|
1516
|
+
console.print("\n[danger]Upload failed[/danger]")
|
|
1517
|
+
return 1
|
|
1518
|
+
|
|
1519
|
+
|
|
1520
|
+
def _parse_findings_from_json(data: dict[str, Any] | list[Any]) -> list[Finding]:
|
|
1521
|
+
"""Parse findings from various JSON formats (Semgrep, Trivy, unified)."""
|
|
1522
|
+
from .scanners.base import Severity
|
|
1523
|
+
|
|
1524
|
+
findings: list[Finding] = []
|
|
1525
|
+
|
|
1526
|
+
# Handle Semgrep format
|
|
1527
|
+
if isinstance(data, dict) and "results" in data:
|
|
1528
|
+
for item in data.get("results", []):
|
|
1529
|
+
severity_str = item.get("extra", {}).get("severity", "INFO")
|
|
1530
|
+
if severity_str == "ERROR":
|
|
1531
|
+
severity = Severity.HIGH
|
|
1532
|
+
elif severity_str == "WARNING":
|
|
1533
|
+
severity = Severity.MEDIUM
|
|
1534
|
+
else:
|
|
1535
|
+
severity = Severity.from_string(severity_str)
|
|
1536
|
+
|
|
1537
|
+
findings.append(
|
|
1538
|
+
Finding(
|
|
1539
|
+
scanner="semgrep",
|
|
1540
|
+
title=item.get("check_id", "Unknown"),
|
|
1541
|
+
severity=severity,
|
|
1542
|
+
description=item.get("extra", {}).get("message", ""),
|
|
1543
|
+
file_path=item.get("path"),
|
|
1544
|
+
line=item.get("start", {}).get("line"),
|
|
1545
|
+
rule_id=item.get("check_id"),
|
|
1546
|
+
cwe=_extract_cwe_from_metadata(item.get("extra", {}).get("metadata", {})),
|
|
1547
|
+
)
|
|
1548
|
+
)
|
|
1549
|
+
|
|
1550
|
+
# Handle Trivy format
|
|
1551
|
+
elif isinstance(data, dict) and "Results" in data:
|
|
1552
|
+
for result in data.get("Results", []):
|
|
1553
|
+
for vuln in result.get("Vulnerabilities", []):
|
|
1554
|
+
findings.append(
|
|
1555
|
+
Finding(
|
|
1556
|
+
scanner="trivy",
|
|
1557
|
+
title=vuln.get("Title", vuln.get("VulnerabilityID", "Unknown")),
|
|
1558
|
+
severity=Severity.from_string(vuln.get("Severity", "UNKNOWN")),
|
|
1559
|
+
description=vuln.get("Description", ""),
|
|
1560
|
+
cve=vuln.get("VulnerabilityID"),
|
|
1561
|
+
package_name=vuln.get("PkgName"),
|
|
1562
|
+
package_version=vuln.get("InstalledVersion"),
|
|
1563
|
+
fixed_version=vuln.get("FixedVersion"),
|
|
1564
|
+
)
|
|
1565
|
+
)
|
|
1566
|
+
|
|
1567
|
+
# Handle unified Kekkai format (array of findings)
|
|
1568
|
+
elif isinstance(data, list):
|
|
1569
|
+
for item in data:
|
|
1570
|
+
findings.append(
|
|
1571
|
+
Finding(
|
|
1572
|
+
scanner=item.get("scanner", "unknown"),
|
|
1573
|
+
title=item.get("title", "Unknown"),
|
|
1574
|
+
severity=Severity.from_string(item.get("severity", "unknown")),
|
|
1575
|
+
description=item.get("description", ""),
|
|
1576
|
+
file_path=item.get("file_path"),
|
|
1577
|
+
line=item.get("line"),
|
|
1578
|
+
rule_id=item.get("rule_id"),
|
|
1579
|
+
cwe=item.get("cwe"),
|
|
1580
|
+
cve=item.get("cve"),
|
|
1581
|
+
package_name=item.get("package_name"),
|
|
1582
|
+
package_version=item.get("package_version"),
|
|
1583
|
+
fixed_version=item.get("fixed_version"),
|
|
1584
|
+
)
|
|
1585
|
+
)
|
|
1586
|
+
|
|
1587
|
+
# Handle Kekkai report JSON format (with findings array)
|
|
1588
|
+
elif isinstance(data, dict) and "findings" in data:
|
|
1589
|
+
for item in data.get("findings", []):
|
|
1590
|
+
findings.append(
|
|
1591
|
+
Finding(
|
|
1592
|
+
scanner=item.get("scanner", "unknown"),
|
|
1593
|
+
title=item.get("title", "Unknown"),
|
|
1594
|
+
severity=Severity.from_string(item.get("severity", "unknown")),
|
|
1595
|
+
description=item.get("description", ""),
|
|
1596
|
+
file_path=item.get("file_path"),
|
|
1597
|
+
line=item.get("line"),
|
|
1598
|
+
rule_id=item.get("rule_id"),
|
|
1599
|
+
cwe=item.get("cwe"),
|
|
1600
|
+
cve=item.get("cve"),
|
|
1601
|
+
package_name=item.get("package_name"),
|
|
1602
|
+
package_version=item.get("package_version"),
|
|
1603
|
+
fixed_version=item.get("fixed_version"),
|
|
1604
|
+
)
|
|
1605
|
+
)
|
|
1606
|
+
|
|
1607
|
+
return findings
|
|
1608
|
+
|
|
1609
|
+
|
|
1610
|
+
def _extract_cwe_from_metadata(metadata: dict[str, Any]) -> str | None:
|
|
1611
|
+
"""Extract CWE from Semgrep metadata."""
|
|
1612
|
+
cwe_list = metadata.get("cwe", [])
|
|
1613
|
+
if isinstance(cwe_list, list) and cwe_list:
|
|
1614
|
+
return str(cwe_list[0])
|
|
1615
|
+
if isinstance(cwe_list, str):
|
|
1616
|
+
return cwe_list
|
|
1617
|
+
return None
|
|
1618
|
+
|
|
1619
|
+
|
|
941
1620
|
def _resolve_dojo_compose_dir(parsed: argparse.Namespace) -> str | None:
|
|
942
1621
|
compose_dir = cast(str | None, getattr(parsed, "compose_dir", None))
|
|
943
1622
|
if compose_dir:
|