kekkai-cli 1.0.5__py3-none-any.whl → 1.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kekkai/cli.py +789 -19
- kekkai/compliance/__init__.py +68 -0
- kekkai/compliance/hipaa.py +235 -0
- kekkai/compliance/mappings.py +136 -0
- kekkai/compliance/owasp.py +517 -0
- kekkai/compliance/owasp_agentic.py +267 -0
- kekkai/compliance/pci_dss.py +205 -0
- kekkai/compliance/soc2.py +209 -0
- kekkai/dojo.py +91 -14
- kekkai/dojo_import.py +9 -1
- kekkai/fix/__init__.py +47 -0
- kekkai/fix/audit.py +278 -0
- kekkai/fix/differ.py +427 -0
- kekkai/fix/engine.py +500 -0
- kekkai/fix/prompts.py +251 -0
- kekkai/output.py +10 -12
- kekkai/report/__init__.py +41 -0
- kekkai/report/compliance_matrix.py +98 -0
- kekkai/report/generator.py +365 -0
- kekkai/report/html.py +69 -0
- kekkai/report/pdf.py +63 -0
- kekkai/report/unified.py +226 -0
- kekkai/scanners/container.py +33 -3
- kekkai/scanners/gitleaks.py +3 -1
- kekkai/scanners/semgrep.py +1 -1
- kekkai/scanners/trivy.py +1 -1
- kekkai/threatflow/model_adapter.py +143 -1
- kekkai/triage/__init__.py +54 -1
- kekkai/triage/loader.py +196 -0
- kekkai_cli-1.1.1.dist-info/METADATA +379 -0
- {kekkai_cli-1.0.5.dist-info → kekkai_cli-1.1.1.dist-info}/RECORD +34 -33
- {kekkai_cli-1.0.5.dist-info → kekkai_cli-1.1.1.dist-info}/entry_points.txt +0 -1
- {kekkai_cli-1.0.5.dist-info → kekkai_cli-1.1.1.dist-info}/top_level.txt +0 -1
- kekkai_cli-1.0.5.dist-info/METADATA +0 -135
- portal/__init__.py +0 -19
- portal/api.py +0 -155
- portal/auth.py +0 -103
- portal/enterprise/__init__.py +0 -32
- portal/enterprise/audit.py +0 -435
- portal/enterprise/licensing.py +0 -342
- portal/enterprise/rbac.py +0 -276
- portal/enterprise/saml.py +0 -595
- portal/ops/__init__.py +0 -53
- portal/ops/backup.py +0 -553
- portal/ops/log_shipper.py +0 -469
- portal/ops/monitoring.py +0 -517
- portal/ops/restore.py +0 -469
- portal/ops/secrets.py +0 -408
- portal/ops/upgrade.py +0 -591
- portal/tenants.py +0 -340
- portal/uploads.py +0 -259
- portal/web.py +0 -384
- {kekkai_cli-1.0.5.dist-info → kekkai_cli-1.1.1.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,365 @@
|
|
|
1
|
+
"""Report generation orchestration.
|
|
2
|
+
|
|
3
|
+
Handles report generation workflow including compliance mapping,
|
|
4
|
+
format selection, and output management.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import hashlib
|
|
10
|
+
import json
|
|
11
|
+
from dataclasses import dataclass, field
|
|
12
|
+
from datetime import UTC, datetime
|
|
13
|
+
from enum import Enum
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
from typing import TYPE_CHECKING, Any
|
|
16
|
+
|
|
17
|
+
if TYPE_CHECKING:
|
|
18
|
+
from collections.abc import Sequence
|
|
19
|
+
|
|
20
|
+
from kekkai.compliance.mappings import ComplianceMappingResult
|
|
21
|
+
from kekkai.scanners.base import Finding
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class ReportFormat(str, Enum):
|
|
25
|
+
"""Available report formats."""
|
|
26
|
+
|
|
27
|
+
HTML = "html"
|
|
28
|
+
PDF = "pdf"
|
|
29
|
+
COMPLIANCE = "compliance"
|
|
30
|
+
JSON = "json"
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
@dataclass
|
|
34
|
+
class ReportConfig:
|
|
35
|
+
"""Configuration for report generation."""
|
|
36
|
+
|
|
37
|
+
formats: list[ReportFormat] = field(default_factory=lambda: [ReportFormat.HTML])
|
|
38
|
+
frameworks: list[str] = field(default_factory=lambda: ["PCI-DSS", "SOC2", "OWASP", "HIPAA"])
|
|
39
|
+
min_severity: str = "info"
|
|
40
|
+
include_executive_summary: bool = True
|
|
41
|
+
include_remediation_timeline: bool = True
|
|
42
|
+
title: str = "Security Scan Report"
|
|
43
|
+
organization: str = ""
|
|
44
|
+
project_name: str = ""
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
@dataclass
|
|
48
|
+
class ReportResult:
|
|
49
|
+
"""Result of report generation."""
|
|
50
|
+
|
|
51
|
+
success: bool
|
|
52
|
+
output_files: list[Path] = field(default_factory=list)
|
|
53
|
+
errors: list[str] = field(default_factory=list)
|
|
54
|
+
warnings: list[str] = field(default_factory=list)
|
|
55
|
+
generation_time_ms: int = 0
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
@dataclass
|
|
59
|
+
class ReportMetadata:
|
|
60
|
+
"""Metadata included in generated reports."""
|
|
61
|
+
|
|
62
|
+
generated_at: str
|
|
63
|
+
generator_version: str
|
|
64
|
+
findings_count: int
|
|
65
|
+
frameworks_mapped: list[str]
|
|
66
|
+
content_hash: str
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def generate_report(
|
|
70
|
+
findings: Sequence[Finding],
|
|
71
|
+
output_dir: Path,
|
|
72
|
+
config: ReportConfig | None = None,
|
|
73
|
+
) -> ReportResult:
|
|
74
|
+
"""Generate reports in specified formats.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
findings: Security findings to include
|
|
78
|
+
output_dir: Directory for output files
|
|
79
|
+
config: Report configuration
|
|
80
|
+
|
|
81
|
+
Returns:
|
|
82
|
+
ReportResult with output files and status
|
|
83
|
+
"""
|
|
84
|
+
generator = ReportGenerator(config or ReportConfig())
|
|
85
|
+
return generator.generate(findings, output_dir)
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
class ReportGenerator:
|
|
89
|
+
"""Orchestrates report generation across formats."""
|
|
90
|
+
|
|
91
|
+
def __init__(self, config: ReportConfig) -> None:
|
|
92
|
+
self.config = config
|
|
93
|
+
|
|
94
|
+
def generate(
|
|
95
|
+
self,
|
|
96
|
+
findings: Sequence[Finding],
|
|
97
|
+
output_dir: Path,
|
|
98
|
+
) -> ReportResult:
|
|
99
|
+
"""Generate reports for all configured formats."""
|
|
100
|
+
import time
|
|
101
|
+
|
|
102
|
+
start_time = time.monotonic()
|
|
103
|
+
result = ReportResult(success=True)
|
|
104
|
+
|
|
105
|
+
# Ensure output directory exists
|
|
106
|
+
output_dir = output_dir.expanduser().resolve()
|
|
107
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
108
|
+
|
|
109
|
+
# Filter findings by severity
|
|
110
|
+
filtered = self._filter_by_severity(list(findings))
|
|
111
|
+
|
|
112
|
+
# Map to compliance frameworks
|
|
113
|
+
from kekkai.compliance import map_findings_to_all_frameworks
|
|
114
|
+
|
|
115
|
+
compliance_result = map_findings_to_all_frameworks(filtered)
|
|
116
|
+
|
|
117
|
+
# Build report data
|
|
118
|
+
report_data = self._build_report_data(filtered, compliance_result)
|
|
119
|
+
|
|
120
|
+
# Generate each format
|
|
121
|
+
for fmt in self.config.formats:
|
|
122
|
+
try:
|
|
123
|
+
output_path = self._generate_format(fmt, report_data, output_dir)
|
|
124
|
+
if output_path:
|
|
125
|
+
result.output_files.append(output_path)
|
|
126
|
+
except Exception as e:
|
|
127
|
+
result.errors.append(f"Failed to generate {fmt.value}: {e}")
|
|
128
|
+
result.success = False
|
|
129
|
+
|
|
130
|
+
result.generation_time_ms = int((time.monotonic() - start_time) * 1000)
|
|
131
|
+
return result
|
|
132
|
+
|
|
133
|
+
def _filter_by_severity(self, findings: list[Finding]) -> list[Finding]:
|
|
134
|
+
"""Filter findings by minimum severity."""
|
|
135
|
+
from kekkai.scanners.base import Severity
|
|
136
|
+
|
|
137
|
+
severity_order = [
|
|
138
|
+
Severity.CRITICAL,
|
|
139
|
+
Severity.HIGH,
|
|
140
|
+
Severity.MEDIUM,
|
|
141
|
+
Severity.LOW,
|
|
142
|
+
Severity.INFO,
|
|
143
|
+
Severity.UNKNOWN,
|
|
144
|
+
]
|
|
145
|
+
min_sev = Severity.from_string(self.config.min_severity)
|
|
146
|
+
try:
|
|
147
|
+
min_index = severity_order.index(min_sev)
|
|
148
|
+
except ValueError:
|
|
149
|
+
min_index = len(severity_order) - 1
|
|
150
|
+
|
|
151
|
+
return [f for f in findings if severity_order.index(f.severity) <= min_index]
|
|
152
|
+
|
|
153
|
+
def _build_report_data(
|
|
154
|
+
self,
|
|
155
|
+
findings: list[Finding],
|
|
156
|
+
compliance_result: ComplianceMappingResult,
|
|
157
|
+
) -> dict[str, Any]:
|
|
158
|
+
"""Build unified report data structure."""
|
|
159
|
+
from kekkai.output import VERSION
|
|
160
|
+
|
|
161
|
+
# Calculate content hash for integrity
|
|
162
|
+
content = json.dumps(
|
|
163
|
+
[f.dedupe_hash() for f in findings],
|
|
164
|
+
sort_keys=True,
|
|
165
|
+
)
|
|
166
|
+
content_hash = hashlib.sha256(content.encode()).hexdigest()[:16]
|
|
167
|
+
|
|
168
|
+
metadata = ReportMetadata(
|
|
169
|
+
generated_at=datetime.now(UTC).isoformat(),
|
|
170
|
+
generator_version=VERSION,
|
|
171
|
+
findings_count=len(findings),
|
|
172
|
+
frameworks_mapped=list(compliance_result.framework_summary.keys()),
|
|
173
|
+
content_hash=content_hash,
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
# Severity counts
|
|
177
|
+
severity_counts = self._count_by_severity(findings)
|
|
178
|
+
|
|
179
|
+
# Executive summary
|
|
180
|
+
executive_summary = self._build_executive_summary(findings, compliance_result)
|
|
181
|
+
|
|
182
|
+
# Remediation timeline
|
|
183
|
+
remediation_timeline = self._build_remediation_timeline(findings)
|
|
184
|
+
|
|
185
|
+
return {
|
|
186
|
+
"metadata": metadata,
|
|
187
|
+
"config": self.config,
|
|
188
|
+
"findings": findings,
|
|
189
|
+
"compliance": compliance_result,
|
|
190
|
+
"severity_counts": severity_counts,
|
|
191
|
+
"executive_summary": executive_summary,
|
|
192
|
+
"remediation_timeline": remediation_timeline,
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
def _count_by_severity(self, findings: list[Finding]) -> dict[str, int]:
|
|
196
|
+
"""Count findings by severity."""
|
|
197
|
+
counts: dict[str, int] = {
|
|
198
|
+
"critical": 0,
|
|
199
|
+
"high": 0,
|
|
200
|
+
"medium": 0,
|
|
201
|
+
"low": 0,
|
|
202
|
+
"info": 0,
|
|
203
|
+
}
|
|
204
|
+
for f in findings:
|
|
205
|
+
key = f.severity.value
|
|
206
|
+
if key in counts:
|
|
207
|
+
counts[key] += 1
|
|
208
|
+
return counts
|
|
209
|
+
|
|
210
|
+
def _build_executive_summary(
|
|
211
|
+
self,
|
|
212
|
+
findings: list[Finding],
|
|
213
|
+
compliance_result: ComplianceMappingResult,
|
|
214
|
+
) -> dict[str, Any]:
|
|
215
|
+
"""Build executive summary section."""
|
|
216
|
+
severity_counts = self._count_by_severity(findings)
|
|
217
|
+
|
|
218
|
+
# Risk score (simple weighted calculation)
|
|
219
|
+
risk_score = (
|
|
220
|
+
severity_counts["critical"] * 10
|
|
221
|
+
+ severity_counts["high"] * 5
|
|
222
|
+
+ severity_counts["medium"] * 2
|
|
223
|
+
+ severity_counts["low"] * 1
|
|
224
|
+
)
|
|
225
|
+
max_possible = len(findings) * 10 if findings else 1
|
|
226
|
+
risk_percentage = min(100, int((risk_score / max_possible) * 100))
|
|
227
|
+
|
|
228
|
+
# Risk level
|
|
229
|
+
if risk_percentage >= 70:
|
|
230
|
+
risk_level = "Critical"
|
|
231
|
+
elif risk_percentage >= 50:
|
|
232
|
+
risk_level = "High"
|
|
233
|
+
elif risk_percentage >= 30:
|
|
234
|
+
risk_level = "Medium"
|
|
235
|
+
elif risk_percentage > 0:
|
|
236
|
+
risk_level = "Low"
|
|
237
|
+
else:
|
|
238
|
+
risk_level = "None"
|
|
239
|
+
|
|
240
|
+
return {
|
|
241
|
+
"total_findings": len(findings),
|
|
242
|
+
"severity_counts": severity_counts,
|
|
243
|
+
"risk_score": risk_score,
|
|
244
|
+
"risk_percentage": risk_percentage,
|
|
245
|
+
"risk_level": risk_level,
|
|
246
|
+
"frameworks_impacted": compliance_result.framework_summary,
|
|
247
|
+
"top_issues": self._get_top_issues(findings),
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
def _get_top_issues(self, findings: list[Finding], limit: int = 5) -> list[dict[str, Any]]:
|
|
251
|
+
"""Get top issues by severity."""
|
|
252
|
+
sorted_findings = sorted(
|
|
253
|
+
findings,
|
|
254
|
+
key=lambda f: (
|
|
255
|
+
["critical", "high", "medium", "low", "info", "unknown"].index(f.severity.value),
|
|
256
|
+
f.title,
|
|
257
|
+
),
|
|
258
|
+
)
|
|
259
|
+
return [
|
|
260
|
+
{
|
|
261
|
+
"title": f.title,
|
|
262
|
+
"severity": f.severity.value,
|
|
263
|
+
"file": f.file_path,
|
|
264
|
+
"rule_id": f.rule_id,
|
|
265
|
+
}
|
|
266
|
+
for f in sorted_findings[:limit]
|
|
267
|
+
]
|
|
268
|
+
|
|
269
|
+
def _build_remediation_timeline(self, findings: list[Finding]) -> dict[str, Any]:
|
|
270
|
+
"""Build remediation timeline recommendations."""
|
|
271
|
+
severity_counts = self._count_by_severity(findings)
|
|
272
|
+
|
|
273
|
+
# SLA recommendations based on industry standards
|
|
274
|
+
return {
|
|
275
|
+
"immediate": {
|
|
276
|
+
"description": "Address within 24 hours",
|
|
277
|
+
"count": severity_counts["critical"],
|
|
278
|
+
"severity": "critical",
|
|
279
|
+
},
|
|
280
|
+
"urgent": {
|
|
281
|
+
"description": "Address within 7 days",
|
|
282
|
+
"count": severity_counts["high"],
|
|
283
|
+
"severity": "high",
|
|
284
|
+
},
|
|
285
|
+
"standard": {
|
|
286
|
+
"description": "Address within 30 days",
|
|
287
|
+
"count": severity_counts["medium"],
|
|
288
|
+
"severity": "medium",
|
|
289
|
+
},
|
|
290
|
+
"planned": {
|
|
291
|
+
"description": "Address within 90 days",
|
|
292
|
+
"count": severity_counts["low"],
|
|
293
|
+
"severity": "low",
|
|
294
|
+
},
|
|
295
|
+
"informational": {
|
|
296
|
+
"description": "Review and document",
|
|
297
|
+
"count": severity_counts["info"],
|
|
298
|
+
"severity": "info",
|
|
299
|
+
},
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
def _generate_format(
|
|
303
|
+
self,
|
|
304
|
+
fmt: ReportFormat,
|
|
305
|
+
report_data: dict[str, Any],
|
|
306
|
+
output_dir: Path,
|
|
307
|
+
) -> Path | None:
|
|
308
|
+
"""Generate a specific report format."""
|
|
309
|
+
if fmt == ReportFormat.HTML:
|
|
310
|
+
from .html import HTMLReportGenerator
|
|
311
|
+
|
|
312
|
+
html_gen = HTMLReportGenerator()
|
|
313
|
+
return html_gen.generate(report_data, output_dir)
|
|
314
|
+
|
|
315
|
+
if fmt == ReportFormat.PDF:
|
|
316
|
+
from .pdf import PDFReportGenerator
|
|
317
|
+
|
|
318
|
+
pdf_gen = PDFReportGenerator()
|
|
319
|
+
return pdf_gen.generate(report_data, output_dir)
|
|
320
|
+
|
|
321
|
+
if fmt == ReportFormat.COMPLIANCE:
|
|
322
|
+
from .compliance_matrix import generate_compliance_matrix
|
|
323
|
+
|
|
324
|
+
return generate_compliance_matrix(report_data, output_dir)
|
|
325
|
+
|
|
326
|
+
if fmt == ReportFormat.JSON:
|
|
327
|
+
return self._generate_json(report_data, output_dir)
|
|
328
|
+
|
|
329
|
+
return None
|
|
330
|
+
|
|
331
|
+
def _generate_json(self, report_data: dict[str, Any], output_dir: Path) -> Path:
|
|
332
|
+
"""Generate JSON report."""
|
|
333
|
+
output_path = output_dir / "report.json"
|
|
334
|
+
|
|
335
|
+
# Convert dataclasses to dicts for JSON serialization
|
|
336
|
+
json_data = {
|
|
337
|
+
"metadata": {
|
|
338
|
+
"generated_at": report_data["metadata"].generated_at,
|
|
339
|
+
"generator_version": report_data["metadata"].generator_version,
|
|
340
|
+
"findings_count": report_data["metadata"].findings_count,
|
|
341
|
+
"frameworks_mapped": report_data["metadata"].frameworks_mapped,
|
|
342
|
+
"content_hash": report_data["metadata"].content_hash,
|
|
343
|
+
},
|
|
344
|
+
"executive_summary": report_data["executive_summary"],
|
|
345
|
+
"remediation_timeline": report_data["remediation_timeline"],
|
|
346
|
+
"severity_counts": report_data["severity_counts"],
|
|
347
|
+
"compliance_summary": report_data["compliance"].framework_summary,
|
|
348
|
+
"findings": [
|
|
349
|
+
{
|
|
350
|
+
"title": f.title,
|
|
351
|
+
"severity": f.severity.value,
|
|
352
|
+
"scanner": f.scanner,
|
|
353
|
+
"file_path": f.file_path,
|
|
354
|
+
"line": f.line,
|
|
355
|
+
"rule_id": f.rule_id,
|
|
356
|
+
"cwe": f.cwe,
|
|
357
|
+
"cve": f.cve,
|
|
358
|
+
"description": f.description,
|
|
359
|
+
}
|
|
360
|
+
for f in report_data["findings"]
|
|
361
|
+
],
|
|
362
|
+
}
|
|
363
|
+
|
|
364
|
+
output_path.write_text(json.dumps(json_data, indent=2))
|
|
365
|
+
return output_path
|
kekkai/report/html.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
"""HTML report generator.
|
|
2
|
+
|
|
3
|
+
Uses Jinja2 for templating with autoescaping enabled for XSS prevention.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Any
|
|
10
|
+
|
|
11
|
+
from jinja2 import Environment, PackageLoader, select_autoescape
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class HTMLReportGenerator:
|
|
15
|
+
"""Generates HTML security reports."""
|
|
16
|
+
|
|
17
|
+
def __init__(self) -> None:
|
|
18
|
+
self.env = Environment(
|
|
19
|
+
loader=PackageLoader("kekkai.report", "templates"),
|
|
20
|
+
autoescape=select_autoescape(["html", "xml"]),
|
|
21
|
+
trim_blocks=True,
|
|
22
|
+
lstrip_blocks=True,
|
|
23
|
+
)
|
|
24
|
+
# Add custom filters
|
|
25
|
+
self.env.filters["severity_class"] = self._severity_class
|
|
26
|
+
self.env.filters["severity_badge"] = self._severity_badge
|
|
27
|
+
|
|
28
|
+
def generate(self, report_data: dict[str, Any], output_dir: Path) -> Path:
|
|
29
|
+
"""Generate HTML report file."""
|
|
30
|
+
template = self.env.get_template("report.html")
|
|
31
|
+
|
|
32
|
+
html_content = template.render(
|
|
33
|
+
metadata=report_data["metadata"],
|
|
34
|
+
config=report_data["config"],
|
|
35
|
+
findings=report_data["findings"],
|
|
36
|
+
compliance=report_data["compliance"],
|
|
37
|
+
severity_counts=report_data["severity_counts"],
|
|
38
|
+
executive_summary=report_data["executive_summary"],
|
|
39
|
+
remediation_timeline=report_data["remediation_timeline"],
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
output_path = output_dir / "report.html"
|
|
43
|
+
output_path.write_text(html_content, encoding="utf-8")
|
|
44
|
+
return output_path
|
|
45
|
+
|
|
46
|
+
@staticmethod
|
|
47
|
+
def _severity_class(severity: str) -> str:
|
|
48
|
+
"""Return CSS class for severity level."""
|
|
49
|
+
classes = {
|
|
50
|
+
"critical": "severity-critical",
|
|
51
|
+
"high": "severity-high",
|
|
52
|
+
"medium": "severity-medium",
|
|
53
|
+
"low": "severity-low",
|
|
54
|
+
"info": "severity-info",
|
|
55
|
+
}
|
|
56
|
+
return classes.get(severity.lower(), "severity-unknown")
|
|
57
|
+
|
|
58
|
+
@staticmethod
|
|
59
|
+
def _severity_badge(severity: str) -> str:
|
|
60
|
+
"""Return badge HTML for severity level."""
|
|
61
|
+
colors = {
|
|
62
|
+
"critical": "#dc3545",
|
|
63
|
+
"high": "#fd7e14",
|
|
64
|
+
"medium": "#ffc107",
|
|
65
|
+
"low": "#17a2b8",
|
|
66
|
+
"info": "#6c757d",
|
|
67
|
+
}
|
|
68
|
+
color = colors.get(severity.lower(), "#6c757d")
|
|
69
|
+
return f'<span class="badge" style="background-color: {color};">{severity.upper()}</span>'
|
kekkai/report/pdf.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
"""PDF report generator.
|
|
2
|
+
|
|
3
|
+
Uses weasyprint for HTML-to-PDF conversion if available.
|
|
4
|
+
Falls back to HTML-only with warning if weasyprint is not installed.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
from .html import HTMLReportGenerator
|
|
13
|
+
|
|
14
|
+
# Check weasyprint availability
|
|
15
|
+
_WEASYPRINT_AVAILABLE = False
|
|
16
|
+
try:
|
|
17
|
+
from weasyprint import HTML as WeasyprintHTML # type: ignore[import-not-found]
|
|
18
|
+
|
|
19
|
+
_WEASYPRINT_AVAILABLE = True
|
|
20
|
+
except ImportError:
|
|
21
|
+
WeasyprintHTML = None
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class PDFReportGenerator:
|
|
25
|
+
"""Generates PDF security reports.
|
|
26
|
+
|
|
27
|
+
Requires weasyprint to be installed. If not available,
|
|
28
|
+
falls back to HTML generation with a warning.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def __init__(self) -> None:
|
|
32
|
+
self.html_generator = HTMLReportGenerator()
|
|
33
|
+
|
|
34
|
+
@property
|
|
35
|
+
def is_available(self) -> bool:
|
|
36
|
+
"""Check if PDF generation is available."""
|
|
37
|
+
return _WEASYPRINT_AVAILABLE
|
|
38
|
+
|
|
39
|
+
def generate(self, report_data: dict[str, Any], output_dir: Path) -> Path:
|
|
40
|
+
"""Generate PDF report file.
|
|
41
|
+
|
|
42
|
+
If weasyprint is not available, generates HTML instead.
|
|
43
|
+
"""
|
|
44
|
+
# First generate HTML
|
|
45
|
+
html_path = self.html_generator.generate(report_data, output_dir)
|
|
46
|
+
|
|
47
|
+
if not _WEASYPRINT_AVAILABLE:
|
|
48
|
+
# Return HTML path with warning - caller should handle
|
|
49
|
+
return html_path
|
|
50
|
+
|
|
51
|
+
# Convert HTML to PDF
|
|
52
|
+
pdf_path = output_dir / "report.pdf"
|
|
53
|
+
|
|
54
|
+
html_content = html_path.read_text(encoding="utf-8")
|
|
55
|
+
html_doc = WeasyprintHTML(string=html_content, base_url=str(output_dir))
|
|
56
|
+
html_doc.write_pdf(pdf_path)
|
|
57
|
+
|
|
58
|
+
return pdf_path
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def is_pdf_available() -> bool:
|
|
62
|
+
"""Check if PDF generation is available."""
|
|
63
|
+
return _WEASYPRINT_AVAILABLE
|