kekkai-cli 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kekkai/__init__.py +7 -0
- kekkai/cli.py +1038 -0
- kekkai/config.py +403 -0
- kekkai/dojo.py +419 -0
- kekkai/dojo_import.py +213 -0
- kekkai/github/__init__.py +16 -0
- kekkai/github/commenter.py +198 -0
- kekkai/github/models.py +56 -0
- kekkai/github/sanitizer.py +112 -0
- kekkai/installer/__init__.py +39 -0
- kekkai/installer/errors.py +23 -0
- kekkai/installer/extract.py +161 -0
- kekkai/installer/manager.py +252 -0
- kekkai/installer/manifest.py +189 -0
- kekkai/installer/verify.py +86 -0
- kekkai/manifest.py +77 -0
- kekkai/output.py +218 -0
- kekkai/paths.py +46 -0
- kekkai/policy.py +326 -0
- kekkai/runner.py +70 -0
- kekkai/scanners/__init__.py +67 -0
- kekkai/scanners/backends/__init__.py +14 -0
- kekkai/scanners/backends/base.py +73 -0
- kekkai/scanners/backends/docker.py +178 -0
- kekkai/scanners/backends/native.py +240 -0
- kekkai/scanners/base.py +110 -0
- kekkai/scanners/container.py +144 -0
- kekkai/scanners/falco.py +237 -0
- kekkai/scanners/gitleaks.py +237 -0
- kekkai/scanners/semgrep.py +227 -0
- kekkai/scanners/trivy.py +246 -0
- kekkai/scanners/url_policy.py +163 -0
- kekkai/scanners/zap.py +340 -0
- kekkai/threatflow/__init__.py +94 -0
- kekkai/threatflow/artifacts.py +476 -0
- kekkai/threatflow/chunking.py +361 -0
- kekkai/threatflow/core.py +438 -0
- kekkai/threatflow/mermaid.py +374 -0
- kekkai/threatflow/model_adapter.py +491 -0
- kekkai/threatflow/prompts.py +277 -0
- kekkai/threatflow/redaction.py +228 -0
- kekkai/threatflow/sanitizer.py +643 -0
- kekkai/triage/__init__.py +33 -0
- kekkai/triage/app.py +168 -0
- kekkai/triage/audit.py +203 -0
- kekkai/triage/ignore.py +269 -0
- kekkai/triage/models.py +185 -0
- kekkai/triage/screens.py +341 -0
- kekkai/triage/widgets.py +169 -0
- kekkai_cli-1.0.0.dist-info/METADATA +135 -0
- kekkai_cli-1.0.0.dist-info/RECORD +90 -0
- kekkai_cli-1.0.0.dist-info/WHEEL +5 -0
- kekkai_cli-1.0.0.dist-info/entry_points.txt +3 -0
- kekkai_cli-1.0.0.dist-info/top_level.txt +3 -0
- kekkai_core/__init__.py +3 -0
- kekkai_core/ci/__init__.py +11 -0
- kekkai_core/ci/benchmarks.py +354 -0
- kekkai_core/ci/metadata.py +104 -0
- kekkai_core/ci/validators.py +92 -0
- kekkai_core/docker/__init__.py +17 -0
- kekkai_core/docker/metadata.py +153 -0
- kekkai_core/docker/sbom.py +173 -0
- kekkai_core/docker/security.py +158 -0
- kekkai_core/docker/signing.py +135 -0
- kekkai_core/redaction.py +84 -0
- kekkai_core/slsa/__init__.py +13 -0
- kekkai_core/slsa/verify.py +121 -0
- kekkai_core/windows/__init__.py +29 -0
- kekkai_core/windows/chocolatey.py +335 -0
- kekkai_core/windows/installer.py +256 -0
- kekkai_core/windows/scoop.py +165 -0
- kekkai_core/windows/validators.py +220 -0
- portal/__init__.py +19 -0
- portal/api.py +155 -0
- portal/auth.py +103 -0
- portal/enterprise/__init__.py +32 -0
- portal/enterprise/audit.py +435 -0
- portal/enterprise/licensing.py +342 -0
- portal/enterprise/rbac.py +276 -0
- portal/enterprise/saml.py +595 -0
- portal/ops/__init__.py +53 -0
- portal/ops/backup.py +553 -0
- portal/ops/log_shipper.py +469 -0
- portal/ops/monitoring.py +517 -0
- portal/ops/restore.py +469 -0
- portal/ops/secrets.py +408 -0
- portal/ops/upgrade.py +591 -0
- portal/tenants.py +340 -0
- portal/uploads.py +259 -0
- portal/web.py +384 -0
kekkai/scanners/zap.py
ADDED
|
@@ -0,0 +1,340 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from .backends import (
|
|
8
|
+
BackendType,
|
|
9
|
+
NativeBackend,
|
|
10
|
+
ToolNotFoundError,
|
|
11
|
+
ToolVersionError,
|
|
12
|
+
detect_tool,
|
|
13
|
+
docker_available,
|
|
14
|
+
)
|
|
15
|
+
from .base import Finding, ScanContext, ScanResult, Severity
|
|
16
|
+
from .container import ContainerConfig, run_container
|
|
17
|
+
from .url_policy import UrlPolicy, UrlPolicyError, validate_target_url
|
|
18
|
+
|
|
19
|
+
ZAP_IMAGE = "ghcr.io/zaproxy/zaproxy"
|
|
20
|
+
ZAP_DIGEST = "sha256:a1b2c3d4e5f6" # Placeholder - update with real digest
|
|
21
|
+
SCAN_TYPE = "ZAP Scan"
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class ZapScanner:
|
|
25
|
+
"""OWASP ZAP baseline scanner adapter.
|
|
26
|
+
|
|
27
|
+
DAST scanner that requires explicit target URL and enforces URL policy.
|
|
28
|
+
By default, blocks scanning of private/internal networks (SSRF protection).
|
|
29
|
+
|
|
30
|
+
Native mode support (opt-in):
|
|
31
|
+
- Requires zap-cli or zap.sh to be installed and in PATH
|
|
32
|
+
- URL policy enforcement is still applied
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
def __init__(
|
|
36
|
+
self,
|
|
37
|
+
target_url: str | None = None,
|
|
38
|
+
policy: UrlPolicy | None = None,
|
|
39
|
+
image: str = ZAP_IMAGE,
|
|
40
|
+
digest: str | None = ZAP_DIGEST,
|
|
41
|
+
timeout_seconds: int = 900,
|
|
42
|
+
backend: BackendType | None = None,
|
|
43
|
+
) -> None:
|
|
44
|
+
self._target_url = target_url
|
|
45
|
+
self._policy = policy or UrlPolicy()
|
|
46
|
+
self._image = image
|
|
47
|
+
self._digest = digest
|
|
48
|
+
self._timeout = timeout_seconds
|
|
49
|
+
self._validated_url: str | None = None
|
|
50
|
+
self._backend = backend
|
|
51
|
+
self._resolved_backend: BackendType | None = None
|
|
52
|
+
|
|
53
|
+
@property
|
|
54
|
+
def name(self) -> str:
|
|
55
|
+
return "zap"
|
|
56
|
+
|
|
57
|
+
@property
|
|
58
|
+
def scan_type(self) -> str:
|
|
59
|
+
return SCAN_TYPE
|
|
60
|
+
|
|
61
|
+
@property
|
|
62
|
+
def backend_used(self) -> BackendType | None:
|
|
63
|
+
"""Return the backend used for the last scan."""
|
|
64
|
+
return self._resolved_backend
|
|
65
|
+
|
|
66
|
+
def _select_backend(self) -> BackendType:
|
|
67
|
+
"""Select backend: explicit choice, or auto-detect (Docker preferred for ZAP)."""
|
|
68
|
+
if self._backend is not None:
|
|
69
|
+
return self._backend
|
|
70
|
+
|
|
71
|
+
available, _ = docker_available()
|
|
72
|
+
if available:
|
|
73
|
+
return BackendType.DOCKER
|
|
74
|
+
|
|
75
|
+
try:
|
|
76
|
+
detect_tool("zap-cli", min_version=(0, 10, 0))
|
|
77
|
+
return BackendType.NATIVE
|
|
78
|
+
except (ToolNotFoundError, ToolVersionError):
|
|
79
|
+
return BackendType.DOCKER
|
|
80
|
+
|
|
81
|
+
def validate_target(self) -> str:
|
|
82
|
+
"""Validate and return the target URL.
|
|
83
|
+
|
|
84
|
+
Raises:
|
|
85
|
+
UrlPolicyError: If target URL is missing or invalid
|
|
86
|
+
"""
|
|
87
|
+
if not self._target_url:
|
|
88
|
+
raise UrlPolicyError("ZAP requires explicit --target-url")
|
|
89
|
+
self._validated_url = validate_target_url(self._target_url, self._policy)
|
|
90
|
+
return self._validated_url
|
|
91
|
+
|
|
92
|
+
def run(self, ctx: ScanContext) -> ScanResult:
|
|
93
|
+
backend = self._select_backend()
|
|
94
|
+
self._resolved_backend = backend
|
|
95
|
+
|
|
96
|
+
if backend == BackendType.NATIVE:
|
|
97
|
+
return self._run_native(ctx)
|
|
98
|
+
return self._run_docker(ctx)
|
|
99
|
+
|
|
100
|
+
def _run_docker(self, ctx: ScanContext) -> ScanResult:
|
|
101
|
+
# Validate target URL before running
|
|
102
|
+
try:
|
|
103
|
+
validated_url = self.validate_target()
|
|
104
|
+
except UrlPolicyError as e:
|
|
105
|
+
return ScanResult(
|
|
106
|
+
scanner=self.name,
|
|
107
|
+
success=False,
|
|
108
|
+
findings=[],
|
|
109
|
+
error=f"URL policy violation: {e}",
|
|
110
|
+
duration_ms=0,
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
config = ContainerConfig(
|
|
114
|
+
image=self._image,
|
|
115
|
+
image_digest=self._digest,
|
|
116
|
+
read_only=False, # ZAP needs to write its own files
|
|
117
|
+
network_disabled=False, # ZAP needs network to scan target
|
|
118
|
+
no_new_privileges=True,
|
|
119
|
+
memory_limit="4g", # ZAP can be memory-hungry
|
|
120
|
+
cpu_limit="2",
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
# ZAP baseline scan command
|
|
124
|
+
# Uses zap-baseline.py which is designed for CI/CD
|
|
125
|
+
command = [
|
|
126
|
+
"zap-baseline.py",
|
|
127
|
+
"-t",
|
|
128
|
+
validated_url,
|
|
129
|
+
"-J",
|
|
130
|
+
"/zap/wrk/zap-results.json",
|
|
131
|
+
"-I", # Don't fail on warnings
|
|
132
|
+
"-d", # Show debug messages
|
|
133
|
+
]
|
|
134
|
+
|
|
135
|
+
result = run_container(
|
|
136
|
+
config=config,
|
|
137
|
+
repo_path=ctx.repo_path, # Not really used for ZAP
|
|
138
|
+
output_path=ctx.output_dir,
|
|
139
|
+
command=command,
|
|
140
|
+
timeout_seconds=self._timeout,
|
|
141
|
+
workdir="/zap/wrk",
|
|
142
|
+
output_mount="/zap/wrk",
|
|
143
|
+
skip_repo_mount=True, # ZAP doesn't need repo
|
|
144
|
+
user=None, # ZAP container has its own user setup
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
return self._process_result(
|
|
148
|
+
result.timed_out, result.duration_ms, result.stderr, ctx.output_dir
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
def _run_native(self, ctx: ScanContext) -> ScanResult:
|
|
152
|
+
"""Run ZAP natively using zap-cli.
|
|
153
|
+
|
|
154
|
+
Note: Native ZAP execution requires zap-cli and a running ZAP daemon.
|
|
155
|
+
This is primarily for environments where Docker is not available.
|
|
156
|
+
"""
|
|
157
|
+
try:
|
|
158
|
+
validated_url = self.validate_target()
|
|
159
|
+
except UrlPolicyError as e:
|
|
160
|
+
return ScanResult(
|
|
161
|
+
scanner=self.name,
|
|
162
|
+
success=False,
|
|
163
|
+
findings=[],
|
|
164
|
+
error=f"URL policy violation: {e}",
|
|
165
|
+
duration_ms=0,
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
try:
|
|
169
|
+
tool_info = detect_tool("zap-cli", min_version=(0, 10, 0))
|
|
170
|
+
except (ToolNotFoundError, ToolVersionError) as e:
|
|
171
|
+
return ScanResult(
|
|
172
|
+
scanner=self.name,
|
|
173
|
+
success=False,
|
|
174
|
+
findings=[],
|
|
175
|
+
error=f"ZAP native mode unavailable: {e}",
|
|
176
|
+
duration_ms=0,
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
output_file = ctx.output_dir / "zap-results.json"
|
|
180
|
+
backend = NativeBackend()
|
|
181
|
+
|
|
182
|
+
args = [
|
|
183
|
+
"quick-scan",
|
|
184
|
+
"--self-contained",
|
|
185
|
+
"-o",
|
|
186
|
+
str(output_file),
|
|
187
|
+
"-f",
|
|
188
|
+
"json",
|
|
189
|
+
validated_url,
|
|
190
|
+
]
|
|
191
|
+
|
|
192
|
+
result = backend.execute(
|
|
193
|
+
tool=tool_info.path,
|
|
194
|
+
args=args,
|
|
195
|
+
repo_path=ctx.repo_path,
|
|
196
|
+
output_path=ctx.output_dir,
|
|
197
|
+
timeout_seconds=self._timeout,
|
|
198
|
+
network_required=True,
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
return self._process_result(
|
|
202
|
+
result.timed_out, result.duration_ms, result.stderr, ctx.output_dir
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
def _process_result(
|
|
206
|
+
self, timed_out: bool, duration_ms: int, stderr: str, output_dir: Path
|
|
207
|
+
) -> ScanResult:
|
|
208
|
+
"""Process scan result from either backend."""
|
|
209
|
+
if timed_out:
|
|
210
|
+
return ScanResult(
|
|
211
|
+
scanner=self.name,
|
|
212
|
+
success=False,
|
|
213
|
+
findings=[],
|
|
214
|
+
error="ZAP scan timed out",
|
|
215
|
+
duration_ms=duration_ms,
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
zap_output = output_dir / "zap-results.json"
|
|
219
|
+
if not zap_output.exists():
|
|
220
|
+
alt_output = output_dir / "wrk" / "zap-results.json"
|
|
221
|
+
if alt_output.exists():
|
|
222
|
+
zap_output = alt_output
|
|
223
|
+
|
|
224
|
+
if not zap_output.exists():
|
|
225
|
+
return ScanResult(
|
|
226
|
+
scanner=self.name,
|
|
227
|
+
success=False,
|
|
228
|
+
findings=[],
|
|
229
|
+
error=stderr or "No output file produced",
|
|
230
|
+
duration_ms=duration_ms,
|
|
231
|
+
)
|
|
232
|
+
|
|
233
|
+
try:
|
|
234
|
+
findings = self.parse(zap_output.read_text())
|
|
235
|
+
except (json.JSONDecodeError, KeyError) as exc:
|
|
236
|
+
return ScanResult(
|
|
237
|
+
scanner=self.name,
|
|
238
|
+
success=False,
|
|
239
|
+
findings=[],
|
|
240
|
+
raw_output_path=zap_output,
|
|
241
|
+
error=f"Parse error: {exc}",
|
|
242
|
+
duration_ms=duration_ms,
|
|
243
|
+
)
|
|
244
|
+
|
|
245
|
+
return ScanResult(
|
|
246
|
+
scanner=self.name,
|
|
247
|
+
success=True,
|
|
248
|
+
findings=findings,
|
|
249
|
+
raw_output_path=zap_output,
|
|
250
|
+
duration_ms=duration_ms,
|
|
251
|
+
)
|
|
252
|
+
|
|
253
|
+
def parse(self, raw_output: str) -> list[Finding]:
|
|
254
|
+
"""Parse ZAP JSON output to Finding objects."""
|
|
255
|
+
data = json.loads(raw_output)
|
|
256
|
+
findings: list[Finding] = []
|
|
257
|
+
|
|
258
|
+
# ZAP baseline outputs alerts in "site" -> "alerts" structure
|
|
259
|
+
for site in data.get("site", []):
|
|
260
|
+
site_name = site.get("@name", "")
|
|
261
|
+
for alert in site.get("alerts", []):
|
|
262
|
+
findings.append(self._parse_alert(alert, site_name))
|
|
263
|
+
|
|
264
|
+
return findings
|
|
265
|
+
|
|
266
|
+
def _parse_alert(self, alert: dict[str, Any], site: str) -> Finding:
|
|
267
|
+
"""Parse a single ZAP alert to a Finding."""
|
|
268
|
+
# Map ZAP risk levels to our severity
|
|
269
|
+
risk = alert.get("riskcode", "0")
|
|
270
|
+
severity = self._map_risk_to_severity(risk)
|
|
271
|
+
|
|
272
|
+
# Get CWE if available
|
|
273
|
+
cwe = None
|
|
274
|
+
if cweid := alert.get("cweid"):
|
|
275
|
+
cwe = f"CWE-{cweid}"
|
|
276
|
+
|
|
277
|
+
# Build description from multiple fields
|
|
278
|
+
desc_parts = [alert.get("desc", "")]
|
|
279
|
+
if solution := alert.get("solution"):
|
|
280
|
+
desc_parts.append(f"Solution: {solution}")
|
|
281
|
+
if reference := alert.get("reference"):
|
|
282
|
+
desc_parts.append(f"Reference: {reference}")
|
|
283
|
+
|
|
284
|
+
# Get affected instances
|
|
285
|
+
instances = alert.get("instances", [])
|
|
286
|
+
affected_url = instances[0].get("uri", site) if instances else site
|
|
287
|
+
|
|
288
|
+
return Finding(
|
|
289
|
+
scanner=self.name,
|
|
290
|
+
title=alert.get("name", "ZAP Alert"),
|
|
291
|
+
severity=severity,
|
|
292
|
+
description="\n\n".join(desc_parts),
|
|
293
|
+
file_path=affected_url,
|
|
294
|
+
rule_id=alert.get("pluginid"),
|
|
295
|
+
cwe=cwe,
|
|
296
|
+
extra={
|
|
297
|
+
"confidence": alert.get("confidence", ""),
|
|
298
|
+
"count": str(alert.get("count", len(instances))),
|
|
299
|
+
"site": site,
|
|
300
|
+
},
|
|
301
|
+
)
|
|
302
|
+
|
|
303
|
+
def _map_risk_to_severity(self, risk: str | int) -> Severity:
|
|
304
|
+
"""Map ZAP risk code to Severity."""
|
|
305
|
+
risk_int = int(risk) if isinstance(risk, str) else risk
|
|
306
|
+
mapping = {
|
|
307
|
+
3: Severity.HIGH,
|
|
308
|
+
2: Severity.MEDIUM,
|
|
309
|
+
1: Severity.LOW,
|
|
310
|
+
0: Severity.INFO,
|
|
311
|
+
}
|
|
312
|
+
return mapping.get(risk_int, Severity.UNKNOWN)
|
|
313
|
+
|
|
314
|
+
|
|
315
|
+
def create_zap_scanner(
|
|
316
|
+
target_url: str | None = None,
|
|
317
|
+
allow_private_ips: bool = False,
|
|
318
|
+
allowed_domains: list[str] | None = None,
|
|
319
|
+
timeout_seconds: int = 900,
|
|
320
|
+
) -> ZapScanner:
|
|
321
|
+
"""Factory function to create a ZAP scanner with policy.
|
|
322
|
+
|
|
323
|
+
Args:
|
|
324
|
+
target_url: The URL to scan (required)
|
|
325
|
+
allow_private_ips: Whether to allow scanning private IPs (default: False)
|
|
326
|
+
allowed_domains: Optional allowlist of domains
|
|
327
|
+
timeout_seconds: Scan timeout
|
|
328
|
+
|
|
329
|
+
Returns:
|
|
330
|
+
Configured ZapScanner instance
|
|
331
|
+
"""
|
|
332
|
+
policy = UrlPolicy(
|
|
333
|
+
allow_private_ips=allow_private_ips,
|
|
334
|
+
allowed_domains=frozenset(allowed_domains) if allowed_domains else frozenset(),
|
|
335
|
+
)
|
|
336
|
+
return ZapScanner(
|
|
337
|
+
target_url=target_url,
|
|
338
|
+
policy=policy,
|
|
339
|
+
timeout_seconds=timeout_seconds,
|
|
340
|
+
)
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
"""ThreatFlow - Agentic threat modeling with local-first LLM support.
|
|
2
|
+
|
|
3
|
+
Security-conscious threat modeling that:
|
|
4
|
+
- Never executes repository code
|
|
5
|
+
- Redacts secrets before LLM processing
|
|
6
|
+
- Defends against prompt injection
|
|
7
|
+
- Supports local models by default
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
from .artifacts import (
|
|
13
|
+
ArtifactGenerator,
|
|
14
|
+
DataFlowEntry,
|
|
15
|
+
ThreatEntry,
|
|
16
|
+
ThreatModelArtifacts,
|
|
17
|
+
)
|
|
18
|
+
from .chunking import ChunkingConfig, FileChunk, chunk_files
|
|
19
|
+
from .core import ThreatFlow, ThreatFlowConfig, ThreatFlowResult
|
|
20
|
+
from .mermaid import (
|
|
21
|
+
MermaidDFDGenerator,
|
|
22
|
+
MermaidEdge,
|
|
23
|
+
MermaidNode,
|
|
24
|
+
NodeType,
|
|
25
|
+
generate_dfd_mermaid,
|
|
26
|
+
)
|
|
27
|
+
from .model_adapter import (
|
|
28
|
+
LocalModelAdapter,
|
|
29
|
+
MockModelAdapter,
|
|
30
|
+
ModelAdapter,
|
|
31
|
+
ModelResponse,
|
|
32
|
+
RemoteModelAdapter,
|
|
33
|
+
)
|
|
34
|
+
from .prompts import PromptBuilder, STRIDECategory
|
|
35
|
+
from .redaction import ThreatFlowRedactor
|
|
36
|
+
from .sanitizer import (
|
|
37
|
+
ClassifierResult,
|
|
38
|
+
DefenseLayer,
|
|
39
|
+
InjectionClassifier,
|
|
40
|
+
InjectionPattern,
|
|
41
|
+
InjectionRisk,
|
|
42
|
+
OutputValidationResult,
|
|
43
|
+
SanitizeConfig,
|
|
44
|
+
Sanitizer,
|
|
45
|
+
SanitizeResult,
|
|
46
|
+
TieredSanitizer,
|
|
47
|
+
TieredSanitizeResult,
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
__all__ = [
|
|
51
|
+
# Core
|
|
52
|
+
"ThreatFlow",
|
|
53
|
+
"ThreatFlowConfig",
|
|
54
|
+
"ThreatFlowResult",
|
|
55
|
+
# Artifacts
|
|
56
|
+
"ArtifactGenerator",
|
|
57
|
+
"ThreatModelArtifacts",
|
|
58
|
+
"ThreatEntry",
|
|
59
|
+
"DataFlowEntry",
|
|
60
|
+
# Chunking
|
|
61
|
+
"ChunkingConfig",
|
|
62
|
+
"FileChunk",
|
|
63
|
+
"chunk_files",
|
|
64
|
+
# Model adapters
|
|
65
|
+
"ModelAdapter",
|
|
66
|
+
"ModelResponse",
|
|
67
|
+
"LocalModelAdapter",
|
|
68
|
+
"RemoteModelAdapter",
|
|
69
|
+
"MockModelAdapter",
|
|
70
|
+
# Prompts
|
|
71
|
+
"PromptBuilder",
|
|
72
|
+
"STRIDECategory",
|
|
73
|
+
# Redaction
|
|
74
|
+
"ThreatFlowRedactor",
|
|
75
|
+
# Sanitizer
|
|
76
|
+
"Sanitizer",
|
|
77
|
+
"SanitizeResult",
|
|
78
|
+
"InjectionPattern",
|
|
79
|
+
"InjectionRisk",
|
|
80
|
+
# Tiered Sanitizer (Milestone 5)
|
|
81
|
+
"TieredSanitizer",
|
|
82
|
+
"TieredSanitizeResult",
|
|
83
|
+
"SanitizeConfig",
|
|
84
|
+
"DefenseLayer",
|
|
85
|
+
"InjectionClassifier",
|
|
86
|
+
"ClassifierResult",
|
|
87
|
+
"OutputValidationResult",
|
|
88
|
+
# Mermaid DFD (Milestone 3)
|
|
89
|
+
"MermaidDFDGenerator",
|
|
90
|
+
"MermaidNode",
|
|
91
|
+
"MermaidEdge",
|
|
92
|
+
"NodeType",
|
|
93
|
+
"generate_dfd_mermaid",
|
|
94
|
+
]
|