kekkai-cli 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. kekkai/__init__.py +7 -0
  2. kekkai/cli.py +1038 -0
  3. kekkai/config.py +403 -0
  4. kekkai/dojo.py +419 -0
  5. kekkai/dojo_import.py +213 -0
  6. kekkai/github/__init__.py +16 -0
  7. kekkai/github/commenter.py +198 -0
  8. kekkai/github/models.py +56 -0
  9. kekkai/github/sanitizer.py +112 -0
  10. kekkai/installer/__init__.py +39 -0
  11. kekkai/installer/errors.py +23 -0
  12. kekkai/installer/extract.py +161 -0
  13. kekkai/installer/manager.py +252 -0
  14. kekkai/installer/manifest.py +189 -0
  15. kekkai/installer/verify.py +86 -0
  16. kekkai/manifest.py +77 -0
  17. kekkai/output.py +218 -0
  18. kekkai/paths.py +46 -0
  19. kekkai/policy.py +326 -0
  20. kekkai/runner.py +70 -0
  21. kekkai/scanners/__init__.py +67 -0
  22. kekkai/scanners/backends/__init__.py +14 -0
  23. kekkai/scanners/backends/base.py +73 -0
  24. kekkai/scanners/backends/docker.py +178 -0
  25. kekkai/scanners/backends/native.py +240 -0
  26. kekkai/scanners/base.py +110 -0
  27. kekkai/scanners/container.py +144 -0
  28. kekkai/scanners/falco.py +237 -0
  29. kekkai/scanners/gitleaks.py +237 -0
  30. kekkai/scanners/semgrep.py +227 -0
  31. kekkai/scanners/trivy.py +246 -0
  32. kekkai/scanners/url_policy.py +163 -0
  33. kekkai/scanners/zap.py +340 -0
  34. kekkai/threatflow/__init__.py +94 -0
  35. kekkai/threatflow/artifacts.py +476 -0
  36. kekkai/threatflow/chunking.py +361 -0
  37. kekkai/threatflow/core.py +438 -0
  38. kekkai/threatflow/mermaid.py +374 -0
  39. kekkai/threatflow/model_adapter.py +491 -0
  40. kekkai/threatflow/prompts.py +277 -0
  41. kekkai/threatflow/redaction.py +228 -0
  42. kekkai/threatflow/sanitizer.py +643 -0
  43. kekkai/triage/__init__.py +33 -0
  44. kekkai/triage/app.py +168 -0
  45. kekkai/triage/audit.py +203 -0
  46. kekkai/triage/ignore.py +269 -0
  47. kekkai/triage/models.py +185 -0
  48. kekkai/triage/screens.py +341 -0
  49. kekkai/triage/widgets.py +169 -0
  50. kekkai_cli-1.0.0.dist-info/METADATA +135 -0
  51. kekkai_cli-1.0.0.dist-info/RECORD +90 -0
  52. kekkai_cli-1.0.0.dist-info/WHEEL +5 -0
  53. kekkai_cli-1.0.0.dist-info/entry_points.txt +3 -0
  54. kekkai_cli-1.0.0.dist-info/top_level.txt +3 -0
  55. kekkai_core/__init__.py +3 -0
  56. kekkai_core/ci/__init__.py +11 -0
  57. kekkai_core/ci/benchmarks.py +354 -0
  58. kekkai_core/ci/metadata.py +104 -0
  59. kekkai_core/ci/validators.py +92 -0
  60. kekkai_core/docker/__init__.py +17 -0
  61. kekkai_core/docker/metadata.py +153 -0
  62. kekkai_core/docker/sbom.py +173 -0
  63. kekkai_core/docker/security.py +158 -0
  64. kekkai_core/docker/signing.py +135 -0
  65. kekkai_core/redaction.py +84 -0
  66. kekkai_core/slsa/__init__.py +13 -0
  67. kekkai_core/slsa/verify.py +121 -0
  68. kekkai_core/windows/__init__.py +29 -0
  69. kekkai_core/windows/chocolatey.py +335 -0
  70. kekkai_core/windows/installer.py +256 -0
  71. kekkai_core/windows/scoop.py +165 -0
  72. kekkai_core/windows/validators.py +220 -0
  73. portal/__init__.py +19 -0
  74. portal/api.py +155 -0
  75. portal/auth.py +103 -0
  76. portal/enterprise/__init__.py +32 -0
  77. portal/enterprise/audit.py +435 -0
  78. portal/enterprise/licensing.py +342 -0
  79. portal/enterprise/rbac.py +276 -0
  80. portal/enterprise/saml.py +595 -0
  81. portal/ops/__init__.py +53 -0
  82. portal/ops/backup.py +553 -0
  83. portal/ops/log_shipper.py +469 -0
  84. portal/ops/monitoring.py +517 -0
  85. portal/ops/restore.py +469 -0
  86. portal/ops/secrets.py +408 -0
  87. portal/ops/upgrade.py +591 -0
  88. portal/tenants.py +340 -0
  89. portal/uploads.py +259 -0
  90. portal/web.py +384 -0
kekkai/cli.py ADDED
@@ -0,0 +1,1038 @@
1
+ from __future__ import annotations
2
+
3
+ import argparse
4
+ import os
5
+ import re
6
+ import sys
7
+ from collections.abc import Sequence
8
+ from datetime import UTC, datetime
9
+ from pathlib import Path
10
+ from typing import cast
11
+
12
+ from . import dojo, manifest
13
+ from .config import ConfigOverrides, DojoSettings, PolicySettings, load_config
14
+ from .dojo_import import DojoConfig, import_results_to_dojo
15
+ from .output import (
16
+ VERSION,
17
+ ScanSummaryRow,
18
+ console,
19
+ print_quick_start,
20
+ print_scan_summary,
21
+ sanitize_error,
22
+ sanitize_for_terminal,
23
+ splash,
24
+ )
25
+ from .paths import app_base_dir, config_path, ensure_dir, is_within_base, safe_join
26
+ from .policy import (
27
+ EXIT_SCAN_ERROR,
28
+ PolicyConfig,
29
+ PolicyResult,
30
+ default_ci_policy,
31
+ evaluate_policy,
32
+ parse_fail_on,
33
+ )
34
+ from .runner import StepResult, run_step
35
+ from .scanners import (
36
+ OPTIONAL_SCANNERS,
37
+ SCANNER_REGISTRY,
38
+ Finding,
39
+ ScanContext,
40
+ Scanner,
41
+ ScanResult,
42
+ create_falco_scanner,
43
+ create_zap_scanner,
44
+ dedupe_findings,
45
+ )
46
+
47
+ RUN_ID_PATTERN = re.compile(r"^[a-zA-Z0-9_.-]{3,64}$")
48
+
49
+
50
+ def main(argv: Sequence[str] | None = None) -> int:
51
+ args = list(argv) if argv is not None else sys.argv[1:]
52
+ if not args:
53
+ return _handle_no_args()
54
+
55
+ parser = argparse.ArgumentParser(prog="kekkai")
56
+ parser.add_argument("--version", action="version", version=f"kekkai {VERSION}")
57
+ subparsers = parser.add_subparsers(dest="command")
58
+
59
+ init_parser = subparsers.add_parser("init", help="initialize config and directories")
60
+ init_parser.add_argument("--config", type=str, help="Path to config file")
61
+ init_parser.add_argument("--force", action="store_true", help="Overwrite existing config")
62
+
63
+ scan_parser = subparsers.add_parser("scan", help="run a scan pipeline")
64
+ scan_parser.add_argument("--config", type=str, help="Path to config file")
65
+ scan_parser.add_argument("--repo", type=str, help="Path to repository")
66
+ scan_parser.add_argument("--run-dir", type=str, help="Override run output directory")
67
+ scan_parser.add_argument("--run-id", type=str, help="Override run id")
68
+ scan_parser.add_argument(
69
+ "--scanners",
70
+ type=str,
71
+ help="Comma-separated list of scanners (trivy,semgrep,gitleaks)",
72
+ )
73
+ scan_parser.add_argument(
74
+ "--import-dojo",
75
+ action="store_true",
76
+ help="Import results to local DefectDojo",
77
+ )
78
+ scan_parser.add_argument("--dojo-url", type=str, help="DefectDojo base URL")
79
+ scan_parser.add_argument("--dojo-api-key", type=str, help="DefectDojo API key")
80
+
81
+ # ZAP DAST scanner options
82
+ scan_parser.add_argument(
83
+ "--target-url",
84
+ type=str,
85
+ help="Target URL for ZAP DAST scanning (required if zap in scanners)",
86
+ )
87
+ scan_parser.add_argument(
88
+ "--allow-private-ips",
89
+ action="store_true",
90
+ help="Allow ZAP to scan private/internal IPs (DANGEROUS)",
91
+ )
92
+
93
+ # Falco runtime security options
94
+ scan_parser.add_argument(
95
+ "--enable-falco",
96
+ action="store_true",
97
+ help="Enable Falco runtime security (Linux-only, experimental)",
98
+ )
99
+
100
+ # CI mode and policy enforcement options
101
+ scan_parser.add_argument(
102
+ "--ci",
103
+ action="store_true",
104
+ help="Enable CI mode: fail on policy violations (default: critical/high)",
105
+ )
106
+ scan_parser.add_argument(
107
+ "--fail-on",
108
+ type=str,
109
+ help="Severity levels to fail on (e.g., 'critical,high' or 'medium')",
110
+ )
111
+ scan_parser.add_argument(
112
+ "--output",
113
+ type=str,
114
+ help="Path for policy result JSON output",
115
+ )
116
+
117
+ # GitHub PR comment options
118
+ scan_parser.add_argument(
119
+ "--pr-comment",
120
+ action="store_true",
121
+ help="Post findings as GitHub PR review comments",
122
+ )
123
+ scan_parser.add_argument(
124
+ "--github-token",
125
+ type=str,
126
+ help="GitHub token (or set GITHUB_TOKEN env var)",
127
+ )
128
+ scan_parser.add_argument(
129
+ "--pr-number",
130
+ type=int,
131
+ help="PR number to comment on (auto-detected in GitHub Actions)",
132
+ )
133
+ scan_parser.add_argument(
134
+ "--github-repo",
135
+ type=str,
136
+ help="GitHub repository (owner/repo, auto-detected in GitHub Actions)",
137
+ )
138
+ scan_parser.add_argument(
139
+ "--max-comments",
140
+ type=int,
141
+ default=50,
142
+ help="Maximum PR comments to post (default: 50)",
143
+ )
144
+ scan_parser.add_argument(
145
+ "--comment-severity",
146
+ type=str,
147
+ default="medium",
148
+ help="Minimum severity for PR comments (default: medium)",
149
+ )
150
+
151
+ dojo_parser = subparsers.add_parser("dojo", help="manage local DefectDojo stack")
152
+ dojo_subparsers = dojo_parser.add_subparsers(dest="dojo_command")
153
+
154
+ dojo_up = dojo_subparsers.add_parser("up", help="start the local DefectDojo stack")
155
+ dojo_up.add_argument("--compose-dir", type=str, help="Directory for compose files")
156
+ dojo_up.add_argument("--project-name", type=str, help="Docker Compose project name")
157
+ dojo_up.add_argument("--port", type=int, help="HTTP port for the UI")
158
+ dojo_up.add_argument("--tls-port", type=int, help="HTTPS port for the UI")
159
+ dojo_up.add_argument("--wait", action="store_true", help="Wait for UI readiness")
160
+ dojo_up.add_argument("--open", action="store_true", help="Open the UI in a browser")
161
+
162
+ dojo_down = dojo_subparsers.add_parser("down", help="stop the local DefectDojo stack")
163
+ dojo_down.add_argument("--compose-dir", type=str, help="Directory for compose files")
164
+ dojo_down.add_argument("--project-name", type=str, help="Docker Compose project name")
165
+
166
+ dojo_status = dojo_subparsers.add_parser("status", help="show stack status")
167
+ dojo_status.add_argument("--compose-dir", type=str, help="Directory for compose files")
168
+ dojo_status.add_argument("--project-name", type=str, help="Docker Compose project name")
169
+
170
+ dojo_open = dojo_subparsers.add_parser("open", help="open the local UI in a browser")
171
+ dojo_open.add_argument("--compose-dir", type=str, help="Directory for compose files")
172
+ dojo_open.add_argument("--port", type=int, help="HTTP port for the UI")
173
+
174
+ # ThreatFlow threat modeling subcommand
175
+ threatflow_parser = subparsers.add_parser(
176
+ "threatflow", help="generate threat model for a repository"
177
+ )
178
+ threatflow_parser.add_argument("--repo", type=str, help="Path to repository to analyze")
179
+ threatflow_parser.add_argument("--output-dir", type=str, help="Output directory for artifacts")
180
+ threatflow_parser.add_argument(
181
+ "--model-mode",
182
+ type=str,
183
+ choices=["local", "openai", "anthropic", "mock"],
184
+ help="LLM backend: local (default), openai, anthropic, or mock for testing",
185
+ )
186
+ threatflow_parser.add_argument(
187
+ "--model-path", type=str, help="Path to local model file (for local mode)"
188
+ )
189
+ threatflow_parser.add_argument(
190
+ "--api-key", type=str, help="API key for remote LLM (prefer env var)"
191
+ )
192
+ threatflow_parser.add_argument("--model-name", type=str, help="Specific model name to use")
193
+ threatflow_parser.add_argument(
194
+ "--max-files", type=int, default=500, help="Maximum files to analyze"
195
+ )
196
+ threatflow_parser.add_argument(
197
+ "--timeout", type=int, default=300, help="Timeout in seconds for model calls"
198
+ )
199
+ threatflow_parser.add_argument(
200
+ "--no-redact", action="store_true", help="Disable secret redaction (NOT RECOMMENDED)"
201
+ )
202
+ threatflow_parser.add_argument(
203
+ "--no-sanitize",
204
+ action="store_true",
205
+ help="Disable prompt injection sanitization (NOT RECOMMENDED)",
206
+ )
207
+
208
+ # Triage TUI subcommand
209
+ triage_parser = subparsers.add_parser("triage", help="interactively triage security findings")
210
+ triage_parser.add_argument(
211
+ "--input",
212
+ type=str,
213
+ help="Path to findings JSON file (from scan output)",
214
+ )
215
+ triage_parser.add_argument(
216
+ "--output",
217
+ type=str,
218
+ help="Path for .kekkaiignore output (default: .kekkaiignore)",
219
+ )
220
+
221
+ parsed = parser.parse_args(args)
222
+ if parsed.command == "init":
223
+ return _command_init(parsed.config, parsed.force)
224
+ if parsed.command == "scan":
225
+ return _command_scan(
226
+ parsed.config,
227
+ parsed.repo,
228
+ parsed.run_dir,
229
+ parsed.run_id,
230
+ parsed.scanners,
231
+ parsed.import_dojo,
232
+ parsed.dojo_url,
233
+ parsed.dojo_api_key,
234
+ parsed.target_url,
235
+ parsed.allow_private_ips,
236
+ parsed.enable_falco,
237
+ parsed.ci,
238
+ parsed.fail_on,
239
+ parsed.output,
240
+ pr_comment=parsed.pr_comment,
241
+ github_token=parsed.github_token,
242
+ pr_number=parsed.pr_number,
243
+ github_repo=parsed.github_repo,
244
+ max_comments=parsed.max_comments,
245
+ comment_severity=parsed.comment_severity,
246
+ )
247
+ if parsed.command == "dojo":
248
+ return _command_dojo(parsed)
249
+ if parsed.command == "threatflow":
250
+ return _command_threatflow(parsed)
251
+ if parsed.command == "triage":
252
+ return _command_triage(parsed)
253
+
254
+ parser.print_help()
255
+ return 1
256
+
257
+
258
+ def _handle_no_args() -> int:
259
+ cfg_path = config_path()
260
+ if not cfg_path.exists():
261
+ return _command_init(None, False)
262
+ console.print(splash())
263
+ console.print("Config exists. Run one of:")
264
+ console.print(" [green]kekkai scan[/green]")
265
+ console.print(" [green]kekkai init --force[/green]")
266
+ return 0
267
+
268
+
269
+ def _command_init(config_override: str | None, force: bool) -> int:
270
+ cfg_path = _resolve_config_path(config_override)
271
+ if cfg_path.exists() and not force:
272
+ print(f"Config already exists at {cfg_path}. Use --force to overwrite.")
273
+ return 1
274
+
275
+ base_dir = app_base_dir()
276
+ ensure_dir(base_dir)
277
+ ensure_dir(base_dir / "runs")
278
+ ensure_dir(cfg_path.parent)
279
+
280
+ cfg_path.write_text(load_config_text(base_dir))
281
+ console.print(splash())
282
+ console.print(f"Initialized config at [cyan]{cfg_path}[/cyan]")
283
+ console.print(print_quick_start())
284
+ return 0
285
+
286
+
287
+ def _command_scan(
288
+ config_override: str | None,
289
+ repo_override: str | None,
290
+ run_dir_override: str | None,
291
+ run_id_override: str | None,
292
+ scanners_override: str | None,
293
+ import_dojo: bool,
294
+ dojo_url_override: str | None,
295
+ dojo_api_key_override: str | None,
296
+ target_url_override: str | None = None,
297
+ allow_private_ips: bool = False,
298
+ enable_falco: bool = False,
299
+ ci_mode: bool = False,
300
+ fail_on_override: str | None = None,
301
+ output_path: str | None = None,
302
+ *,
303
+ pr_comment: bool = False,
304
+ github_token: str | None = None,
305
+ pr_number: int | None = None,
306
+ github_repo: str | None = None,
307
+ max_comments: int = 50,
308
+ comment_severity: str = "medium",
309
+ ) -> int:
310
+ cfg_path = _resolve_config_path(config_override)
311
+ if not cfg_path.exists():
312
+ print(f"Config not found at {cfg_path}. Run `kekkai init`.")
313
+ return 1
314
+
315
+ overrides = ConfigOverrides(repo_path=Path(repo_override) if repo_override else None)
316
+ cfg = load_config(cfg_path, overrides=overrides, base_dir=app_base_dir())
317
+
318
+ repo_path = _resolve_repo_path(cfg.repo_path)
319
+ if not repo_path.exists() or not repo_path.is_dir():
320
+ print(f"Repo path not found: {repo_path}")
321
+ return 1
322
+
323
+ run_id = _resolve_run_id(run_id_override)
324
+ if not RUN_ID_PATTERN.match(run_id):
325
+ print("Run id must be 3-64 chars (letters, digits, ._-)")
326
+ return 1
327
+
328
+ base_dir = app_base_dir()
329
+ run_dir = _resolve_run_dir(base_dir, cfg.run_base_dir, run_id, run_dir_override)
330
+ ensure_dir(run_dir)
331
+
332
+ # Determine which scanners to run
333
+ scanner_names = _resolve_scanners(scanners_override, cfg.scanners)
334
+
335
+ started_at = _now_iso()
336
+ step_results: list[StepResult] = []
337
+ scan_results: list[ScanResult] = []
338
+ status_ok = True
339
+
340
+ # Run pipeline steps if configured
341
+ for step in cfg.pipeline:
342
+ result = run_step(
343
+ step,
344
+ cwd=repo_path,
345
+ env_allowlist=cfg.env_allowlist,
346
+ timeout_seconds=cfg.timeout_seconds,
347
+ )
348
+ step_results.append(result)
349
+ if result.exit_code != 0:
350
+ status_ok = False
351
+ break
352
+
353
+ # Run container-based scanners
354
+ if scanner_names and status_ok:
355
+ commit_sha = _get_commit_sha(repo_path)
356
+ ctx = ScanContext(
357
+ repo_path=repo_path,
358
+ output_dir=run_dir,
359
+ run_id=run_id,
360
+ commit_sha=commit_sha,
361
+ timeout_seconds=cfg.timeout_seconds,
362
+ )
363
+ scanners_map = {}
364
+
365
+ # Resolve ZAP target URL
366
+ zap_target_url = target_url_override or os.environ.get("KEKKAI_ZAP_TARGET_URL")
367
+ if cfg.zap and cfg.zap.target_url:
368
+ zap_target_url = zap_target_url or cfg.zap.target_url
369
+
370
+ # Resolve ZAP allow_private_ips
371
+ zap_allow_private = allow_private_ips
372
+ if cfg.zap and cfg.zap.allow_private_ips:
373
+ zap_allow_private = True
374
+
375
+ # Resolve Falco enabled
376
+ falco_enabled = enable_falco or os.environ.get("KEKKAI_ENABLE_FALCO") == "1"
377
+ if cfg.falco and cfg.falco.enabled:
378
+ falco_enabled = True
379
+
380
+ for name in scanner_names:
381
+ scanner = _create_scanner(
382
+ name=name,
383
+ zap_target_url=zap_target_url,
384
+ zap_allow_private_ips=zap_allow_private,
385
+ zap_allowed_domains=cfg.zap.allowed_domains if cfg.zap else [],
386
+ falco_enabled=falco_enabled,
387
+ )
388
+ if scanner is None:
389
+ console.print(f"[warning]Unknown scanner: {sanitize_for_terminal(name)}[/warning]")
390
+ continue
391
+
392
+ scanners_map[name] = scanner
393
+ console.print(f"Running [cyan]{sanitize_for_terminal(name)}[/cyan]...")
394
+ scan_result = scanner.run(ctx)
395
+ scan_results.append(scan_result)
396
+ if not scan_result.success:
397
+ err_msg = sanitize_error(scan_result.error or "Unknown error")
398
+ console.print(f" [danger]{sanitize_for_terminal(name)} failed:[/danger] {err_msg}")
399
+ # For ZAP/Falco: failures should not be hidden
400
+ if name in ("zap", "falco"):
401
+ status_ok = False
402
+ else:
403
+ deduped = dedupe_findings(scan_result.findings)
404
+ console.print(
405
+ f" [success]{sanitize_for_terminal(name)}:[/success] {len(deduped)} findings"
406
+ )
407
+
408
+ # Import to DefectDojo if requested
409
+ if import_dojo or (cfg.dojo and cfg.dojo.enabled):
410
+ dojo_cfg = _resolve_dojo_config(
411
+ cfg.dojo,
412
+ dojo_url_override,
413
+ dojo_api_key_override,
414
+ )
415
+ if dojo_cfg and dojo_cfg.api_key:
416
+ console.print("Importing to DefectDojo...")
417
+ import_results = import_results_to_dojo(
418
+ config=dojo_cfg,
419
+ results=scan_results,
420
+ scanners=scanners_map,
421
+ run_id=run_id,
422
+ commit_sha=commit_sha,
423
+ )
424
+ for ir in import_results:
425
+ if ir.success:
426
+ created, closed = ir.findings_created, ir.findings_closed
427
+ console.print(
428
+ f" [success]Imported:[/success] {created} created, {closed} closed"
429
+ )
430
+ else:
431
+ err = sanitize_error(ir.error or "")
432
+ console.print(f" [danger]Import failed:[/danger] {err}")
433
+ else:
434
+ console.print("[muted]DefectDojo import skipped: no API key configured[/muted]")
435
+
436
+ finished_at = _now_iso()
437
+ run_manifest = manifest.build_manifest(
438
+ run_id=run_id,
439
+ repo_path=repo_path,
440
+ run_dir=run_dir,
441
+ started_at=started_at,
442
+ finished_at=finished_at,
443
+ steps=step_results,
444
+ )
445
+ manifest.write_manifest(run_dir / "run.json", run_manifest)
446
+
447
+ # Collect all findings for policy evaluation
448
+ all_findings: list[Finding] = []
449
+ scan_errors: list[str] = []
450
+ for scan_res in scan_results:
451
+ if scan_res.success:
452
+ all_findings.extend(dedupe_findings(scan_res.findings))
453
+ elif scan_res.error:
454
+ scan_errors.append(f"{scan_res.scanner}: {scan_res.error}")
455
+
456
+ # Post PR comments if requested
457
+ if pr_comment and all_findings:
458
+ _post_pr_comments(
459
+ all_findings,
460
+ github_token=github_token,
461
+ pr_number=pr_number,
462
+ github_repo=github_repo,
463
+ max_comments=max_comments,
464
+ min_severity=comment_severity,
465
+ )
466
+
467
+ # Apply policy in CI mode
468
+ if ci_mode or fail_on_override:
469
+ policy_config = _resolve_policy_config(cfg.policy, fail_on_override, ci_mode)
470
+ policy_result = evaluate_policy(all_findings, policy_config, scan_errors)
471
+
472
+ # Write policy result JSON
473
+ result_path = Path(output_path) if output_path else (run_dir / "policy-result.json")
474
+ policy_result.write_json(result_path)
475
+
476
+ # Print summary
477
+ _print_policy_summary(policy_result)
478
+
479
+ # Print scan summary table
480
+ _print_scan_summary_table(scan_results)
481
+
482
+ console.print(f"Run complete: [cyan]{run_dir}[/cyan]")
483
+ return policy_result.exit_code
484
+
485
+ # Print scan summary table
486
+ _print_scan_summary_table(scan_results)
487
+
488
+ console.print(f"Run complete: [cyan]{run_dir}[/cyan]")
489
+ return 0 if status_ok else EXIT_SCAN_ERROR
490
+
491
+
492
+ def _resolve_scanners(override: str | None, config_scanners: list[str] | None) -> list[str]:
493
+ if override:
494
+ return [s.strip() for s in override.split(",") if s.strip()]
495
+ if config_scanners:
496
+ return config_scanners
497
+ return []
498
+
499
+
500
+ def _resolve_policy_config(
501
+ settings: PolicySettings | None,
502
+ fail_on_override: str | None,
503
+ ci_mode: bool,
504
+ ) -> PolicyConfig:
505
+ """Resolve policy configuration from settings and overrides.
506
+
507
+ Priority: --fail-on > config file [policy] > default CI policy
508
+ """
509
+ # --fail-on takes highest priority
510
+ if fail_on_override:
511
+ return parse_fail_on(fail_on_override)
512
+
513
+ # Use config file settings if available
514
+ if settings:
515
+ return PolicyConfig(
516
+ fail_on_critical=settings.fail_on_critical,
517
+ fail_on_high=settings.fail_on_high,
518
+ fail_on_medium=settings.fail_on_medium,
519
+ fail_on_low=settings.fail_on_low,
520
+ fail_on_info=settings.fail_on_info,
521
+ max_critical=settings.max_critical,
522
+ max_high=settings.max_high,
523
+ max_medium=settings.max_medium,
524
+ max_low=settings.max_low,
525
+ max_info=settings.max_info,
526
+ max_total=settings.max_total,
527
+ )
528
+
529
+ # Default CI policy
530
+ if ci_mode:
531
+ return default_ci_policy()
532
+
533
+ # Fallback (shouldn't reach here if ci_mode or fail_on is set)
534
+ return default_ci_policy()
535
+
536
+
537
+ def _print_scan_summary_table(scan_results: list[ScanResult]) -> None:
538
+ """Print scan results summary table."""
539
+ if not scan_results:
540
+ return
541
+
542
+ rows = [
543
+ ScanSummaryRow(
544
+ scanner=r.scanner,
545
+ success=r.success,
546
+ findings_count=len(dedupe_findings(r.findings)) if r.success else 0,
547
+ duration_ms=r.duration_ms,
548
+ )
549
+ for r in scan_results
550
+ ]
551
+ console.print(print_scan_summary(rows))
552
+
553
+
554
+ def _print_policy_summary(result: PolicyResult) -> None:
555
+ """Print policy evaluation summary to stdout."""
556
+ counts = result.counts
557
+ status = "[success]PASSED[/success]" if result.passed else "[danger]FAILED[/danger]"
558
+ console.print(f"\nPolicy Evaluation: {status}")
559
+ console.print(f" Findings: {counts.total} total")
560
+ console.print(f" [danger]Critical:[/danger] {counts.critical}")
561
+ console.print(f" [warning]High:[/warning] {counts.high}")
562
+ console.print(f" [info]Medium:[/info] {counts.medium}")
563
+ console.print(f" Low: {counts.low}")
564
+ console.print(f" [muted]Info:[/muted] {counts.info}")
565
+
566
+ if result.violations:
567
+ console.print(" [danger]Violations:[/danger]")
568
+ for v in result.violations:
569
+ console.print(f" - {sanitize_for_terminal(v.message)}")
570
+
571
+ if result.scan_errors:
572
+ console.print(" [warning]Scan Errors:[/warning]")
573
+ for e in result.scan_errors:
574
+ console.print(f" - {sanitize_error(e)}")
575
+
576
+
577
+ def _post_pr_comments(
578
+ findings: list[Finding],
579
+ *,
580
+ github_token: str | None,
581
+ pr_number: int | None,
582
+ github_repo: str | None,
583
+ max_comments: int,
584
+ min_severity: str,
585
+ ) -> None:
586
+ """Post findings as GitHub PR review comments."""
587
+ # Resolve token from env if not provided
588
+ token = github_token or os.environ.get("GITHUB_TOKEN")
589
+ if not token:
590
+ console.print("[warning]PR comment requested but no GitHub token provided[/warning]")
591
+ return
592
+
593
+ # Auto-detect PR number from GitHub Actions event
594
+ if pr_number is None:
595
+ pr_number = _detect_pr_number()
596
+ if pr_number is None:
597
+ console.print("[warning]PR comment requested but no PR number detected[/warning]")
598
+ return
599
+
600
+ # Resolve owner/repo
601
+ owner, repo = _resolve_github_repo(github_repo)
602
+ if not owner or not repo:
603
+ console.print("[warning]PR comment requested but repository not detected[/warning]")
604
+ return
605
+
606
+ try:
607
+ from .github import GitHubConfig
608
+ from .github import post_pr_comments as _post_comments
609
+
610
+ config = GitHubConfig(
611
+ token=token,
612
+ owner=owner,
613
+ repo=repo,
614
+ pr_number=pr_number,
615
+ )
616
+ result = _post_comments(
617
+ findings,
618
+ config,
619
+ max_comments=max_comments,
620
+ min_severity=min_severity,
621
+ )
622
+ if result.success:
623
+ console.print(f"[success]Posted {result.comments_posted} PR comment(s)[/success]")
624
+ if result.review_url:
625
+ console.print(f" Review: [link]{result.review_url}[/link]")
626
+ else:
627
+ for err in result.errors:
628
+ console.print(f"[warning]PR comment error: {sanitize_error(err)}[/warning]")
629
+ except Exception as e:
630
+ console.print(f"[warning]Failed to post PR comments: {sanitize_error(str(e))}[/warning]")
631
+
632
+
633
+ def _detect_pr_number() -> int | None:
634
+ """Auto-detect PR number from GitHub Actions environment."""
635
+ import json as _json
636
+
637
+ event_path = os.environ.get("GITHUB_EVENT_PATH")
638
+ if not event_path:
639
+ return None
640
+
641
+ try:
642
+ with open(event_path) as f:
643
+ event: dict[str, dict[str, int]] = _json.load(f)
644
+ pr = event.get("pull_request", {})
645
+ return pr.get("number")
646
+ except (OSError, ValueError, KeyError):
647
+ return None
648
+
649
+
650
+ def _resolve_github_repo(override: str | None) -> tuple[str | None, str | None]:
651
+ """Resolve GitHub owner/repo from override or environment."""
652
+ if override and "/" in override:
653
+ parts = override.split("/", 1)
654
+ return parts[0], parts[1]
655
+
656
+ # Try GITHUB_REPOSITORY env var (set in GitHub Actions)
657
+ repo_env = os.environ.get("GITHUB_REPOSITORY")
658
+ if repo_env and "/" in repo_env:
659
+ parts = repo_env.split("/", 1)
660
+ return parts[0], parts[1]
661
+
662
+ return None, None
663
+
664
+
665
+ def _create_scanner(
666
+ name: str,
667
+ zap_target_url: str | None = None,
668
+ zap_allow_private_ips: bool = False,
669
+ zap_allowed_domains: list[str] | None = None,
670
+ falco_enabled: bool = False,
671
+ ) -> Scanner | None:
672
+ """Create a scanner instance by name.
673
+
674
+ Handles both core scanners (SAST/SCA) and optional scanners (DAST/runtime).
675
+ """
676
+ # Check core scanners first
677
+ scanner_cls = SCANNER_REGISTRY.get(name)
678
+ if scanner_cls:
679
+ scanner: Scanner = scanner_cls()
680
+ return scanner
681
+
682
+ # Handle optional scanners with special configuration
683
+ if name == "zap":
684
+ zap: Scanner = create_zap_scanner(
685
+ target_url=zap_target_url,
686
+ allow_private_ips=zap_allow_private_ips,
687
+ allowed_domains=zap_allowed_domains or [],
688
+ )
689
+ return zap
690
+
691
+ if name == "falco":
692
+ falco: Scanner = create_falco_scanner(enabled=falco_enabled)
693
+ return falco
694
+
695
+ # Check optional scanners registry (shouldn't reach here normally)
696
+ if name in OPTIONAL_SCANNERS:
697
+ optional: Scanner = OPTIONAL_SCANNERS[name]()
698
+ return optional
699
+
700
+ return None
701
+
702
+
703
+ def _resolve_dojo_config(
704
+ settings: DojoSettings | None,
705
+ url_override: str | None,
706
+ api_key_override: str | None,
707
+ ) -> DojoConfig | None:
708
+ base_url = url_override or os.environ.get("KEKKAI_DOJO_URL")
709
+ api_key = api_key_override or os.environ.get("KEKKAI_DOJO_API_KEY")
710
+
711
+ if settings:
712
+ base_url = base_url or settings.base_url
713
+ api_key = api_key or settings.api_key
714
+ return DojoConfig(
715
+ base_url=base_url or "http://localhost:8080",
716
+ api_key=api_key or "",
717
+ product_name=settings.product_name,
718
+ engagement_name=settings.engagement_name,
719
+ )
720
+
721
+ if base_url or api_key:
722
+ return DojoConfig(
723
+ base_url=base_url or "http://localhost:8080",
724
+ api_key=api_key or "",
725
+ )
726
+ return None
727
+
728
+
729
+ def _get_commit_sha(repo_path: Path) -> str | None:
730
+ import shutil
731
+ import subprocess # nosec B404
732
+
733
+ git = shutil.which("git")
734
+ if not git:
735
+ return None
736
+ try:
737
+ result = subprocess.run( # noqa: S603 # nosec B603
738
+ [git, "rev-parse", "HEAD"],
739
+ cwd=str(repo_path),
740
+ capture_output=True,
741
+ text=True,
742
+ timeout=10,
743
+ check=False,
744
+ )
745
+ if result.returncode == 0:
746
+ return result.stdout.strip()
747
+ except (OSError, subprocess.SubprocessError):
748
+ return None
749
+ return None
750
+
751
+
752
+ def _command_dojo(parsed: argparse.Namespace) -> int:
753
+ compose_root = dojo.compose_dir(_resolve_dojo_compose_dir(parsed))
754
+ project_name = _resolve_dojo_project_name(parsed)
755
+
756
+ if parsed.dojo_command == "up":
757
+ port = _resolve_dojo_port(parsed)
758
+ tls_port = _resolve_dojo_tls_port(parsed, port)
759
+ try:
760
+ env = dojo.compose_up(
761
+ compose_root=compose_root,
762
+ project_name=project_name,
763
+ port=port,
764
+ tls_port=tls_port,
765
+ wait=bool(parsed.wait),
766
+ open_browser=bool(parsed.open),
767
+ )
768
+ except RuntimeError as exc:
769
+ print(str(exc))
770
+ return 1
771
+ print(f"DefectDojo is starting at http://localhost:{port}/")
772
+ print(f"Admin user: {env.get('DD_ADMIN_USER', 'admin')}")
773
+ print("Admin password stored in .env")
774
+ return 0
775
+
776
+ if parsed.dojo_command == "down":
777
+ try:
778
+ dojo.compose_down(compose_root=compose_root, project_name=project_name)
779
+ except RuntimeError as exc:
780
+ print(str(exc))
781
+ return 1
782
+ print("DefectDojo stack stopped")
783
+ return 0
784
+
785
+ if parsed.dojo_command == "status":
786
+ try:
787
+ statuses = dojo.compose_status(compose_root=compose_root, project_name=project_name)
788
+ except RuntimeError as exc:
789
+ print(str(exc))
790
+ return 1
791
+ if not statuses:
792
+ print("No running services found. Run `kekkai dojo up`.")
793
+ return 0
794
+ for status in statuses:
795
+ details = [status.state]
796
+ if status.health:
797
+ details.append(f"health={status.health}")
798
+ if status.ports:
799
+ details.append(f"ports={status.ports}")
800
+ print(f"{status.name}: {' '.join(details)}")
801
+ return 0
802
+
803
+ if parsed.dojo_command == "open":
804
+ port = _resolve_dojo_open_port(parsed, compose_root)
805
+ dojo.open_ui(port)
806
+ return 0
807
+
808
+ print("Unknown dojo command. Use `kekkai dojo --help`.")
809
+ return 1
810
+
811
+
812
+ def _command_threatflow(parsed: argparse.Namespace) -> int:
813
+ """Run ThreatFlow threat model analysis."""
814
+ from .threatflow import ThreatFlow, ThreatFlowConfig
815
+
816
+ # Resolve repository path
817
+ repo_override = cast(str | None, getattr(parsed, "repo", None))
818
+ repo_path = Path(repo_override) if repo_override else Path.cwd()
819
+ repo_path = repo_path.expanduser().resolve()
820
+
821
+ if not repo_path.exists() or not repo_path.is_dir():
822
+ print(f"Error: Repository path not found: {repo_path}")
823
+ return 1
824
+
825
+ # Build config from CLI args and environment
826
+ model_mode_raw = getattr(parsed, "model_mode", None) or os.environ.get("KEKKAI_THREATFLOW_MODE")
827
+ model_mode: str = model_mode_raw if model_mode_raw else "local"
828
+ model_path = getattr(parsed, "model_path", None) or os.environ.get(
829
+ "KEKKAI_THREATFLOW_MODEL_PATH"
830
+ )
831
+ api_key = getattr(parsed, "api_key", None) or os.environ.get("KEKKAI_THREATFLOW_API_KEY")
832
+ model_name = getattr(parsed, "model_name", None) or os.environ.get(
833
+ "KEKKAI_THREATFLOW_MODEL_NAME"
834
+ )
835
+
836
+ config = ThreatFlowConfig(
837
+ model_mode=model_mode,
838
+ model_path=model_path,
839
+ api_key=api_key,
840
+ model_name=model_name,
841
+ max_files=getattr(parsed, "max_files", 500),
842
+ timeout_seconds=getattr(parsed, "timeout", 300),
843
+ redact_secrets=not getattr(parsed, "no_redact", False),
844
+ sanitize_content=not getattr(parsed, "no_sanitize", False),
845
+ )
846
+
847
+ # Resolve output directory
848
+ output_dir_override = cast(str | None, getattr(parsed, "output_dir", None))
849
+ output_dir = Path(output_dir_override) if output_dir_override else None
850
+
851
+ # Display banner
852
+ print(_threatflow_banner())
853
+ print(f"Repository: {repo_path}")
854
+ print(f"Model mode: {model_mode}")
855
+
856
+ # Warn about remote mode
857
+ if model_mode in ("openai", "anthropic"):
858
+ print(
859
+ "\n*** WARNING: Using remote API. Code content will be sent to external service. ***\n"
860
+ )
861
+ if not api_key:
862
+ print("Error: API key required for remote mode.")
863
+ print(" Set --api-key or KEKKAI_THREATFLOW_API_KEY")
864
+ return 1
865
+
866
+ # Warn about disabled security controls
867
+ if config.redact_secrets is False:
868
+ print("*** WARNING: Secret redaction is DISABLED. Secrets may be sent to LLM. ***")
869
+ if config.sanitize_content is False:
870
+ print("*** WARNING: Prompt sanitization is DISABLED. Injection attacks possible. ***")
871
+
872
+ print("\nAnalyzing repository...")
873
+
874
+ # Run analysis
875
+ tf = ThreatFlow(config=config)
876
+ result = tf.analyze(repo_path=repo_path, output_dir=output_dir)
877
+
878
+ if not result.success:
879
+ print(f"\nAnalysis failed: {result.error}")
880
+ return 1
881
+
882
+ # Print results
883
+ print(f"\nAnalysis complete in {result.duration_ms}ms")
884
+ print(f"Files processed: {result.files_processed}")
885
+ print(f"Files skipped: {result.files_skipped}")
886
+
887
+ if result.warnings:
888
+ print("\nWarnings:")
889
+ for w in result.warnings:
890
+ print(f" - {w}")
891
+
892
+ if result.injection_warnings:
893
+ print("\nInjection patterns detected (sanitized):")
894
+ for w in result.injection_warnings[:5]: # Limit output
895
+ print(f" - {w}")
896
+ if len(result.injection_warnings) > 5:
897
+ print(f" ... and {len(result.injection_warnings) - 5} more")
898
+
899
+ print("\nOutput files:")
900
+ for path in result.output_files:
901
+ print(f" - {path}")
902
+
903
+ # Print threat summary if available
904
+ if result.artifacts:
905
+ counts = result.artifacts.threat_count_by_risk()
906
+ total = len(result.artifacts.threats)
907
+ print(f"\nThreats identified: {total}")
908
+ for level in ["critical", "high", "medium", "low"]:
909
+ if counts.get(level, 0) > 0:
910
+ print(f" - {level.capitalize()}: {counts[level]}")
911
+
912
+ return 0
913
+
914
+
915
+ def _threatflow_banner() -> str:
916
+ """Return ThreatFlow banner."""
917
+ return (
918
+ "\n"
919
+ "ThreatFlow — AI-Assisted Threat Modeling\n"
920
+ "=========================================\n"
921
+ "STRIDE analysis powered by local-first LLM\n"
922
+ )
923
+
924
+
925
+ def _command_triage(parsed: argparse.Namespace) -> int:
926
+ """Run interactive triage TUI."""
927
+ from .triage import run_triage
928
+
929
+ input_path_str = cast(str | None, getattr(parsed, "input", None))
930
+ output_path_str = cast(str | None, getattr(parsed, "output", None))
931
+
932
+ input_path = Path(input_path_str).expanduser().resolve() if input_path_str else None
933
+ output_path = Path(output_path_str).expanduser().resolve() if output_path_str else None
934
+
935
+ if input_path and not input_path.exists():
936
+ console.print(f"[danger]Error:[/danger] Input file not found: {input_path}")
937
+ return 1
938
+
939
+ console.print("[bold cyan]Kekkai Triage[/bold cyan] - Interactive Finding Review")
940
+ console.print("Use j/k to navigate, f=false positive, c=confirmed, d=deferred")
941
+ console.print("Press Ctrl+S to save, q to quit\n")
942
+
943
+ return run_triage(input_path=input_path, output_path=output_path)
944
+
945
+
946
+ def _resolve_dojo_compose_dir(parsed: argparse.Namespace) -> str | None:
947
+ compose_dir = cast(str | None, getattr(parsed, "compose_dir", None))
948
+ if compose_dir:
949
+ return compose_dir
950
+ return os.environ.get("KEKKAI_DOJO_COMPOSE_DIR")
951
+
952
+
953
+ def _resolve_dojo_project_name(parsed: argparse.Namespace) -> str:
954
+ project_name = cast(str | None, getattr(parsed, "project_name", None))
955
+ if project_name:
956
+ return project_name
957
+ return os.environ.get("KEKKAI_DOJO_PROJECT_NAME", dojo.DEFAULT_PROJECT_NAME)
958
+
959
+
960
+ def _resolve_dojo_port(parsed: argparse.Namespace) -> int:
961
+ port = cast(int | None, getattr(parsed, "port", None))
962
+ if port is not None:
963
+ return port
964
+ if env_port := os.environ.get("KEKKAI_DOJO_PORT"):
965
+ return int(env_port)
966
+ return dojo.DEFAULT_PORT
967
+
968
+
969
+ def _resolve_dojo_tls_port(parsed: argparse.Namespace, port: int) -> int:
970
+ tls_port = cast(int | None, getattr(parsed, "tls_port", None))
971
+ if tls_port is not None:
972
+ return tls_port
973
+ if env_port := os.environ.get("KEKKAI_DOJO_TLS_PORT"):
974
+ return int(env_port)
975
+ return dojo.DEFAULT_TLS_PORT if port != dojo.DEFAULT_TLS_PORT else port + 1
976
+
977
+
978
+ def _resolve_dojo_open_port(parsed: argparse.Namespace, compose_root: Path) -> int:
979
+ port = cast(int | None, getattr(parsed, "port", None))
980
+ if port is not None:
981
+ return port
982
+ env = dojo.load_env_file(compose_root / ".env")
983
+ if value := env.get("DD_PORT"):
984
+ return int(value)
985
+ if env_port := os.environ.get("KEKKAI_DOJO_PORT"):
986
+ return int(env_port)
987
+ return dojo.DEFAULT_PORT
988
+
989
+
990
+ def _resolve_config_path(config_override: str | None) -> Path:
991
+ if config_override:
992
+ return Path(config_override).expanduser().resolve()
993
+ return config_path()
994
+
995
+
996
+ def _resolve_repo_path(repo_path: Path) -> Path:
997
+ if repo_path.is_absolute():
998
+ return repo_path.resolve()
999
+ return (Path.cwd() / repo_path).resolve()
1000
+
1001
+
1002
+ def _resolve_run_id(override: str | None) -> str:
1003
+ return override or os.environ.get("KEKKAI_RUN_ID") or _generate_run_id()
1004
+
1005
+
1006
+ def _resolve_run_dir(
1007
+ base_dir: Path,
1008
+ run_base_dir: Path,
1009
+ run_id: str,
1010
+ run_dir_override: str | None,
1011
+ ) -> Path:
1012
+ env_override = os.environ.get("KEKKAI_RUN_DIR")
1013
+ override = run_dir_override or env_override
1014
+ if override:
1015
+ return Path(override).expanduser().resolve()
1016
+
1017
+ resolved_base = run_base_dir.expanduser()
1018
+ if not resolved_base.is_absolute():
1019
+ resolved_base = (base_dir / resolved_base).resolve()
1020
+
1021
+ if is_within_base(base_dir, resolved_base):
1022
+ return safe_join(resolved_base, run_id)
1023
+ return (resolved_base / run_id).resolve()
1024
+
1025
+
1026
+ def _generate_run_id() -> str:
1027
+ timestamp = datetime.now(UTC).strftime("%Y%m%d-%H%M%S")
1028
+ return f"run-{timestamp}"
1029
+
1030
+
1031
+ def _now_iso() -> str:
1032
+ return datetime.now(UTC).isoformat()
1033
+
1034
+
1035
+ def load_config_text(base_dir: Path) -> str:
1036
+ from .config import default_config_text
1037
+
1038
+ return default_config_text(base_dir)