pandoraspec 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pandoraspec/__init__.py +0 -0
- pandoraspec/cli.py +70 -0
- pandoraspec/core.py +354 -0
- pandoraspec/reporting.py +209 -0
- pandoraspec-0.1.1.dist-info/METADATA +72 -0
- pandoraspec-0.1.1.dist-info/RECORD +9 -0
- pandoraspec-0.1.1.dist-info/WHEEL +5 -0
- pandoraspec-0.1.1.dist-info/entry_points.txt +2 -0
- pandoraspec-0.1.1.dist-info/top_level.txt +1 -0
pandoraspec/__init__.py
ADDED
|
File without changes
|
pandoraspec/cli.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
import typer
|
|
2
|
+
from rich.console import Console
|
|
3
|
+
from rich.table import Table
|
|
4
|
+
from rich.panel import Panel
|
|
5
|
+
from rich.text import Text
|
|
6
|
+
from .core import AuditEngine
|
|
7
|
+
from .reporting import generate_report
|
|
8
|
+
|
|
9
|
+
app = typer.Typer(help="DORA Audit CLI - Verify Compliance of OpenAI Specs")
|
|
10
|
+
console = Console()
|
|
11
|
+
|
|
12
|
+
@app.command(name="scan")
|
|
13
|
+
def scan(
|
|
14
|
+
schema_url: str = typer.Argument(..., help="URL or path to OpenAPI schema"),
|
|
15
|
+
api_key: str = typer.Option(None, "--key", "-k", help="API Key for authenticated endpoints"),
|
|
16
|
+
vendor: str = typer.Option("Vendor", "--vendor", "-v", help="Vendor name for the report")
|
|
17
|
+
):
|
|
18
|
+
"""
|
|
19
|
+
Run a DORA audit against an OpenAPI schema.
|
|
20
|
+
"""
|
|
21
|
+
console.print(Panel(f"[bold blue]Starting DORA Audit for {vendor}[/bold blue]", border_style="blue"))
|
|
22
|
+
console.print(f"š Scanning [bold]{schema_url}[/bold]...")
|
|
23
|
+
|
|
24
|
+
try:
|
|
25
|
+
engine = AuditEngine(schema_url=schema_url, api_key=api_key)
|
|
26
|
+
|
|
27
|
+
# We need a progress spinner, but AuditEngine is synchronous and prints logs.
|
|
28
|
+
# For MVP CLI, we'll let AuditEngine logs show or suppress them?
|
|
29
|
+
# The user requested "Rich terminal output".
|
|
30
|
+
# Let's run it.
|
|
31
|
+
|
|
32
|
+
results = engine.run_full_audit()
|
|
33
|
+
|
|
34
|
+
# Display Summary Table
|
|
35
|
+
table = Table(title="Audit Summary")
|
|
36
|
+
table.add_column("Module", style="cyan", no_wrap=True)
|
|
37
|
+
table.add_column("Status", style="bold")
|
|
38
|
+
table.add_column("Issues (Pass/Fail)", style="magenta")
|
|
39
|
+
|
|
40
|
+
# Drift
|
|
41
|
+
drift_pass = len([r for r in results["drift_check"] if r.get("status") == "PASS"])
|
|
42
|
+
drift_fail = len([r for r in results["drift_check"] if r.get("status") != "PASS"])
|
|
43
|
+
drift_status = "[bold red]FAIL[/bold red]" if drift_fail > 0 else "[bold green]PASS[/bold green]"
|
|
44
|
+
table.add_row("Module A: Integrity", drift_status, f"{drift_pass} / {drift_fail}")
|
|
45
|
+
|
|
46
|
+
# Resilience
|
|
47
|
+
res_pass = len([r for r in results["resilience"] if r.get("status") == "PASS"])
|
|
48
|
+
res_fail = len([r for r in results["resilience"] if r.get("status") != "PASS"])
|
|
49
|
+
res_status = "[bold red]FAIL[/bold red]" if res_fail > 0 else "[bold green]PASS[/bold green]"
|
|
50
|
+
table.add_row("Module B: Resilience", res_status, f"{res_pass} / {res_fail}")
|
|
51
|
+
|
|
52
|
+
# Security
|
|
53
|
+
sec_pass = len([r for r in results["security"] if r.get("status") == "PASS"])
|
|
54
|
+
sec_fail = len([r for r in results["security"] if r.get("status") != "PASS"])
|
|
55
|
+
sec_status = "[bold red]FAIL[/bold red]" if sec_fail > 0 else "[bold green]PASS[/bold green]"
|
|
56
|
+
table.add_row("Module C: Security", sec_status, f"{sec_pass} / {sec_fail}")
|
|
57
|
+
|
|
58
|
+
console.print(table)
|
|
59
|
+
|
|
60
|
+
# Generate Report
|
|
61
|
+
report_path = generate_report(vendor, results)
|
|
62
|
+
|
|
63
|
+
console.print(Panel(f"[bold green]Audit Complete![/bold green]\nš Report generated: [link={report_path}]{report_path}[/link]", border_style="green"))
|
|
64
|
+
|
|
65
|
+
except Exception as e:
|
|
66
|
+
console.print(f"[bold red]Error:[/bold red] {str(e)}")
|
|
67
|
+
raise typer.Exit(code=1)
|
|
68
|
+
|
|
69
|
+
if __name__ == "__main__":
|
|
70
|
+
app()
|
pandoraspec/core.py
ADDED
|
@@ -0,0 +1,354 @@
|
|
|
1
|
+
import schemathesis
|
|
2
|
+
from typing import List, Dict
|
|
3
|
+
import requests
|
|
4
|
+
from schemathesis import checks
|
|
5
|
+
from schemathesis.specs.openapi import checks as oai_checks
|
|
6
|
+
from schemathesis.checks import CheckContext, ChecksConfig
|
|
7
|
+
import html
|
|
8
|
+
import os
|
|
9
|
+
|
|
10
|
+
class AuditEngine:
|
|
11
|
+
def __init__(self, schema_url: str, base_url: str = None, api_key: str = None):
|
|
12
|
+
self.schema_url = schema_url
|
|
13
|
+
self.api_key = api_key
|
|
14
|
+
|
|
15
|
+
# --- FIXED LOCALHOST HANDLING ---
|
|
16
|
+
# If running in Docker (implied by this environment), 'localhost' refers to the container.
|
|
17
|
+
# We need to try to reach the host machine.
|
|
18
|
+
working_schema_url = schema_url
|
|
19
|
+
if "localhost" in schema_url or "127.0.0.1" in schema_url:
|
|
20
|
+
# Try host.docker.internal first (standard for Docker Desktop)
|
|
21
|
+
# We DON'T change self.schema_url so the report still shows what the user entered.
|
|
22
|
+
try:
|
|
23
|
+
print(f"DEBUG: Attempting to resolve localhost URL using host.docker.internal")
|
|
24
|
+
test_url = schema_url.replace("localhost", "host.docker.internal").replace("127.0.0.1", "host.docker.internal")
|
|
25
|
+
requests.head(test_url, timeout=2) # Quick check
|
|
26
|
+
working_schema_url = test_url
|
|
27
|
+
print(f"DEBUG: Successfully resolved to {working_schema_url}")
|
|
28
|
+
except Exception:
|
|
29
|
+
print(f"DEBUG: Failed to reach host.docker.internal, trying original")
|
|
30
|
+
pass
|
|
31
|
+
|
|
32
|
+
try:
|
|
33
|
+
if os.path.exists(working_schema_url) and os.path.isfile(working_schema_url):
|
|
34
|
+
print(f"DEBUG: Loading schema from local file: {working_schema_url}")
|
|
35
|
+
self.schema = schemathesis.openapi.from_path(working_schema_url)
|
|
36
|
+
else:
|
|
37
|
+
self.schema = schemathesis.openapi.from_url(working_schema_url)
|
|
38
|
+
|
|
39
|
+
# 1. Use explicitly provided base_url if available
|
|
40
|
+
if base_url:
|
|
41
|
+
self.schema.base_url = base_url
|
|
42
|
+
self.base_url = base_url
|
|
43
|
+
else:
|
|
44
|
+
# 2. Priority 1: Extract from the 'servers' field in the spec
|
|
45
|
+
resolved_url = None
|
|
46
|
+
if hasattr(self.schema, "raw_schema"):
|
|
47
|
+
servers = self.schema.raw_schema.get("servers", [])
|
|
48
|
+
if servers and isinstance(servers, list) and len(servers) > 0:
|
|
49
|
+
spec_server_url = servers[0].get("url")
|
|
50
|
+
if spec_server_url:
|
|
51
|
+
resolved_url = spec_server_url
|
|
52
|
+
print(f"DEBUG: Found server URL in specification: {resolved_url}")
|
|
53
|
+
|
|
54
|
+
# 3. Priority 2: Use whatever schemathesis resolved automatically (fallback)
|
|
55
|
+
if not resolved_url:
|
|
56
|
+
resolved_url = getattr(self.schema, "base_url", None)
|
|
57
|
+
print(f"DEBUG: Falling back to Schemathesis resolved base_url: {resolved_url}")
|
|
58
|
+
|
|
59
|
+
if not resolved_url and self.schema_url:
|
|
60
|
+
# Fallback: Derive from schema_url (e.g., remove swagger.json)
|
|
61
|
+
try:
|
|
62
|
+
from urllib.parse import urlparse, urlunparse
|
|
63
|
+
parsed = urlparse(self.schema_url)
|
|
64
|
+
path_parts = parsed.path.split('/')
|
|
65
|
+
# Simple heuristic: remove the last segment (e.g. swagger.json) to get base
|
|
66
|
+
if '.' in path_parts[-1]:
|
|
67
|
+
path_parts.pop()
|
|
68
|
+
new_path = '/'.join(path_parts)
|
|
69
|
+
resolved_url = urlunparse(parsed._replace(path=new_path))
|
|
70
|
+
print(f"DEBUG: Derived base_url from schema_url: {resolved_url}")
|
|
71
|
+
except Exception as e:
|
|
72
|
+
print(f"DEBUG: Failed to derive base_url from schema_url: {e}")
|
|
73
|
+
|
|
74
|
+
print(f"DEBUG: Final resolved base_url for engine: {resolved_url}")
|
|
75
|
+
|
|
76
|
+
# Fix base_url if it's localhost as well
|
|
77
|
+
if resolved_url and ("localhost" in resolved_url or "127.0.0.1" in resolved_url):
|
|
78
|
+
print(f"DEBUG: Adjusting base_url '{resolved_url}' for Docker environment")
|
|
79
|
+
resolved_url = resolved_url.replace("localhost", "host.docker.internal").replace("127.0.0.1", "host.docker.internal")
|
|
80
|
+
|
|
81
|
+
self.base_url = resolved_url
|
|
82
|
+
if resolved_url:
|
|
83
|
+
try:
|
|
84
|
+
self.schema.base_url = resolved_url
|
|
85
|
+
except Exception:
|
|
86
|
+
pass
|
|
87
|
+
|
|
88
|
+
except Exception as e:
|
|
89
|
+
if isinstance(e, AttributeError) and "base_url" in str(e):
|
|
90
|
+
self.base_url = None
|
|
91
|
+
else:
|
|
92
|
+
raise ValueError(f"Failed to load OpenAPI schema from {schema_url}. Error: {str(e)}")
|
|
93
|
+
|
|
94
|
+
def run_drift_check(self) -> List[Dict]:
|
|
95
|
+
"""
|
|
96
|
+
Module A: The 'Docs vs. Code' Drift Check (The Integrity Test)
|
|
97
|
+
Uses schemathesis to verify if the API implementation matches the spec.
|
|
98
|
+
"""
|
|
99
|
+
results = []
|
|
100
|
+
# Mapping check names to actual functions
|
|
101
|
+
check_map = {
|
|
102
|
+
"not_a_server_error": checks.not_a_server_error,
|
|
103
|
+
"status_code_conformance": oai_checks.status_code_conformance,
|
|
104
|
+
"response_schema_conformance": oai_checks.response_schema_conformance
|
|
105
|
+
}
|
|
106
|
+
check_names = list(check_map.keys())
|
|
107
|
+
|
|
108
|
+
# Schemathesis 4.x checks require a context object
|
|
109
|
+
checks_config = ChecksConfig()
|
|
110
|
+
check_ctx = CheckContext(
|
|
111
|
+
override=None,
|
|
112
|
+
auth=None,
|
|
113
|
+
headers=None,
|
|
114
|
+
config=checks_config,
|
|
115
|
+
transport_kwargs=None,
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
for op in self.schema.get_all_operations():
|
|
119
|
+
# Handle Result type (Ok/Err) wrapping if present
|
|
120
|
+
operation = op.ok() if hasattr(op, "ok") else op
|
|
121
|
+
|
|
122
|
+
operation_path = f"{operation.method.upper()} {operation.path}"
|
|
123
|
+
print(f"AUDIT LOG: Testing endpoint {operation_path}")
|
|
124
|
+
|
|
125
|
+
try:
|
|
126
|
+
# Generate test case
|
|
127
|
+
try:
|
|
128
|
+
case = operation.as_strategy().example()
|
|
129
|
+
except (AttributeError, Exception):
|
|
130
|
+
try:
|
|
131
|
+
cases = list(operation.make_case())
|
|
132
|
+
case = cases[0] if cases else None
|
|
133
|
+
except (AttributeError, Exception):
|
|
134
|
+
case = None
|
|
135
|
+
|
|
136
|
+
if not case:
|
|
137
|
+
continue
|
|
138
|
+
|
|
139
|
+
# Prepare headers
|
|
140
|
+
headers = {}
|
|
141
|
+
if self.api_key:
|
|
142
|
+
auth_header = self.api_key if self.api_key.lower().startswith("bearer ") else f"Bearer {self.api_key}"
|
|
143
|
+
headers["Authorization"] = auth_header
|
|
144
|
+
|
|
145
|
+
# Call the API
|
|
146
|
+
target_url = f"{self.base_url.rstrip('/')}/{operation.path.lstrip('/')}"
|
|
147
|
+
print(f"AUDIT LOG: Calling {operation.method.upper()} {target_url}")
|
|
148
|
+
|
|
149
|
+
response = case.call(base_url=self.base_url, headers=headers)
|
|
150
|
+
print(f"AUDIT LOG: Response Status Code: {response.status_code}")
|
|
151
|
+
|
|
152
|
+
# --- FIXED VALIDATION LOGIC ---
|
|
153
|
+
# We manually call the check function to ensure arguments are passed correctly.
|
|
154
|
+
for check_name in check_names:
|
|
155
|
+
check_func = check_map[check_name]
|
|
156
|
+
try:
|
|
157
|
+
# Direct call: check_func(ctx, response, case)
|
|
158
|
+
check_func(check_ctx, response, case)
|
|
159
|
+
|
|
160
|
+
# If we get here, the check passed
|
|
161
|
+
results.append({
|
|
162
|
+
"module": "A",
|
|
163
|
+
"endpoint": f"{operation.method.upper()} {operation.path}",
|
|
164
|
+
"issue": f"{check_name} - Passed",
|
|
165
|
+
"status": "PASS",
|
|
166
|
+
"severity": "INFO",
|
|
167
|
+
"details": f"Status: {response.status_code}"
|
|
168
|
+
})
|
|
169
|
+
|
|
170
|
+
except AssertionError as e:
|
|
171
|
+
# This catches actual drift (e.g., Schema validation failed)
|
|
172
|
+
# Capture and format detailed error info
|
|
173
|
+
validation_errors = []
|
|
174
|
+
|
|
175
|
+
# Safely get causes if they exist and are iterable
|
|
176
|
+
causes = getattr(e, "causes", None)
|
|
177
|
+
if causes:
|
|
178
|
+
for cause in causes:
|
|
179
|
+
if hasattr(cause, "message"):
|
|
180
|
+
validation_errors.append(cause.message)
|
|
181
|
+
else:
|
|
182
|
+
validation_errors.append(str(cause))
|
|
183
|
+
|
|
184
|
+
if not validation_errors:
|
|
185
|
+
validation_errors.append(str(e) or "Validation failed")
|
|
186
|
+
|
|
187
|
+
err_msg = "<br>".join(validation_errors)
|
|
188
|
+
safe_err = html.escape(err_msg)
|
|
189
|
+
|
|
190
|
+
# Add helpful context (Status & Body Preview)
|
|
191
|
+
context_msg = f"Status: {response.status_code}"
|
|
192
|
+
try:
|
|
193
|
+
if response.content:
|
|
194
|
+
preview = response.text[:500]
|
|
195
|
+
safe_preview = html.escape(preview)
|
|
196
|
+
context_msg += f"<br>Response: {safe_preview}"
|
|
197
|
+
except Exception:
|
|
198
|
+
pass
|
|
199
|
+
|
|
200
|
+
full_details = f"<strong>Error:</strong> {safe_err}<br><br><strong>Context:</strong><br>{context_msg}"
|
|
201
|
+
|
|
202
|
+
print(f"AUDIT LOG: Validation {check_name} failed: {err_msg}")
|
|
203
|
+
results.append({
|
|
204
|
+
"module": "A",
|
|
205
|
+
"endpoint": f"{operation.method.upper()} {operation.path}",
|
|
206
|
+
"issue": f"Schema Drift Detected ({check_name})",
|
|
207
|
+
"status": "FAIL",
|
|
208
|
+
"details": full_details,
|
|
209
|
+
"severity": "HIGH"
|
|
210
|
+
})
|
|
211
|
+
except Exception as e:
|
|
212
|
+
# This catches unexpected coding errors
|
|
213
|
+
print(f"AUDIT LOG: Error executing check {check_name}: {str(e)}")
|
|
214
|
+
results.append({
|
|
215
|
+
"module": "A",
|
|
216
|
+
"endpoint": f"{operation.method.upper()} {operation.path}",
|
|
217
|
+
"issue": f"Check Execution Error ({check_name})",
|
|
218
|
+
"status": "FAIL",
|
|
219
|
+
"details": str(e),
|
|
220
|
+
"severity": "HIGH"
|
|
221
|
+
})
|
|
222
|
+
|
|
223
|
+
except Exception as e:
|
|
224
|
+
print(f"AUDIT LOG: Critical Error during endpoint test: {str(e)}")
|
|
225
|
+
continue
|
|
226
|
+
|
|
227
|
+
return results
|
|
228
|
+
|
|
229
|
+
def run_resilience_tests(self) -> List[Dict]:
|
|
230
|
+
"""
|
|
231
|
+
Module B: The 'Resilience' Stress Test (Art. 24 & 25)
|
|
232
|
+
Checks for Rate Limiting and Timeout gracefully handling.
|
|
233
|
+
"""
|
|
234
|
+
results = []
|
|
235
|
+
ops = list(self.schema.get_all_operations())
|
|
236
|
+
if not ops:
|
|
237
|
+
return []
|
|
238
|
+
|
|
239
|
+
operation = ops[0].ok() if hasattr(ops[0], "ok") else ops[0]
|
|
240
|
+
|
|
241
|
+
# Simulate flooding
|
|
242
|
+
responses = []
|
|
243
|
+
for _ in range(50):
|
|
244
|
+
try:
|
|
245
|
+
case = operation.as_strategy().example()
|
|
246
|
+
except (AttributeError, Exception):
|
|
247
|
+
try:
|
|
248
|
+
cases = list(operation.make_case())
|
|
249
|
+
case = cases[0] if cases else None
|
|
250
|
+
except (AttributeError, Exception):
|
|
251
|
+
case = None
|
|
252
|
+
|
|
253
|
+
if case:
|
|
254
|
+
headers = {}
|
|
255
|
+
if self.api_key:
|
|
256
|
+
auth_header = self.api_key if self.api_key.lower().startswith("bearer ") else f"Bearer {self.api_key}"
|
|
257
|
+
headers["Authorization"] = auth_header
|
|
258
|
+
|
|
259
|
+
responses.append(case.call(base_url=self.base_url, headers=headers))
|
|
260
|
+
|
|
261
|
+
has_429 = any(r.status_code == 429 for r in responses)
|
|
262
|
+
has_500 = any(r.status_code == 500 for r in responses)
|
|
263
|
+
|
|
264
|
+
if not has_429 and has_500:
|
|
265
|
+
results.append({
|
|
266
|
+
"module": "B",
|
|
267
|
+
"issue": "Poor Resilience: 500 Error during flood",
|
|
268
|
+
"status": "FAIL",
|
|
269
|
+
"details": "The API returned 500 Internal Server Error instead of 429 Too Many Requests when flooded.",
|
|
270
|
+
"severity": "CRITICAL"
|
|
271
|
+
})
|
|
272
|
+
elif not has_429:
|
|
273
|
+
results.append({
|
|
274
|
+
"module": "B",
|
|
275
|
+
"issue": "No Rate Limiting Enforced",
|
|
276
|
+
"status": "FAIL",
|
|
277
|
+
"details": "The API did not return 429 Too Many Requests during high volume testing.",
|
|
278
|
+
"severity": "MEDIUM"
|
|
279
|
+
})
|
|
280
|
+
else:
|
|
281
|
+
results.append({
|
|
282
|
+
"module": "B",
|
|
283
|
+
"issue": "Rate Limiting Functional",
|
|
284
|
+
"status": "PASS",
|
|
285
|
+
"details": "The API correctly returned 429 Too Many Requests when flooded.",
|
|
286
|
+
"severity": "INFO"
|
|
287
|
+
})
|
|
288
|
+
|
|
289
|
+
if not has_500:
|
|
290
|
+
results.append({
|
|
291
|
+
"module": "B",
|
|
292
|
+
"issue": "Stress Handling",
|
|
293
|
+
"status": "PASS",
|
|
294
|
+
"details": "No 500 Internal Server Errors were observed during stress testing.",
|
|
295
|
+
"severity": "INFO"
|
|
296
|
+
})
|
|
297
|
+
|
|
298
|
+
return results
|
|
299
|
+
|
|
300
|
+
def run_security_hygiene(self) -> List[Dict]:
|
|
301
|
+
"""
|
|
302
|
+
Module C: Security Hygiene Check
|
|
303
|
+
Checks for TLS and Auth leakage in URL.
|
|
304
|
+
"""
|
|
305
|
+
results = []
|
|
306
|
+
print(f"AUDIT LOG: Checking Security Hygiene for base URL: {self.base_url}")
|
|
307
|
+
if self.base_url and not self.base_url.startswith("https"):
|
|
308
|
+
results.append({
|
|
309
|
+
"module": "C",
|
|
310
|
+
"issue": "Insecure Connection (No TLS)",
|
|
311
|
+
"status": "FAIL",
|
|
312
|
+
"details": "The API base URL does not use HTTPS.",
|
|
313
|
+
"severity": "CRITICAL"
|
|
314
|
+
})
|
|
315
|
+
else:
|
|
316
|
+
results.append({
|
|
317
|
+
"module": "C",
|
|
318
|
+
"issue": "Secure Connection (TLS)",
|
|
319
|
+
"status": "PASS",
|
|
320
|
+
"details": "The API uses HTTPS.",
|
|
321
|
+
"severity": "INFO"
|
|
322
|
+
})
|
|
323
|
+
|
|
324
|
+
auth_leakage_found = False
|
|
325
|
+
for op in self.schema.get_all_operations():
|
|
326
|
+
operation = op.ok() if hasattr(op, "ok") else op
|
|
327
|
+
endpoint = operation.path
|
|
328
|
+
if "key" in endpoint.lower() or "token" in endpoint.lower():
|
|
329
|
+
auth_leakage_found = True
|
|
330
|
+
results.append({
|
|
331
|
+
"module": "C",
|
|
332
|
+
"issue": "Auth Leakage Risk",
|
|
333
|
+
"status": "FAIL",
|
|
334
|
+
"details": f"Endpoint '{endpoint}' indicates auth tokens might be passed in the URL.",
|
|
335
|
+
"severity": "HIGH"
|
|
336
|
+
})
|
|
337
|
+
|
|
338
|
+
if not auth_leakage_found:
|
|
339
|
+
results.append({
|
|
340
|
+
"module": "C",
|
|
341
|
+
"issue": "No Auth Leakage in URLs",
|
|
342
|
+
"status": "PASS",
|
|
343
|
+
"details": "No endpoints found with 'key' or 'token' in the path, suggesting safe header-based auth.",
|
|
344
|
+
"severity": "INFO"
|
|
345
|
+
})
|
|
346
|
+
|
|
347
|
+
return results
|
|
348
|
+
|
|
349
|
+
def run_full_audit(self) -> Dict:
|
|
350
|
+
return {
|
|
351
|
+
"drift_check": self.run_drift_check(),
|
|
352
|
+
"resilience": self.run_resilience_tests(),
|
|
353
|
+
"security": self.run_security_hygiene()
|
|
354
|
+
}
|
pandoraspec/reporting.py
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from datetime import datetime
|
|
3
|
+
from jinja2 import Environment, FileSystemLoader
|
|
4
|
+
from weasyprint import HTML
|
|
5
|
+
|
|
6
|
+
TEMPLATE_DIR = os.path.join(os.path.dirname(__file__), "templates")
|
|
7
|
+
REPORTS_DIR = "reports"
|
|
8
|
+
|
|
9
|
+
if not os.path.exists(REPORTS_DIR):
|
|
10
|
+
os.makedirs(REPORTS_DIR)
|
|
11
|
+
|
|
12
|
+
def generate_report(vendor_name: str, audit_results: dict) -> str:
|
|
13
|
+
"""
|
|
14
|
+
Module D: The Compliance Report (The Deliverable)
|
|
15
|
+
Generates a branded PDF report.
|
|
16
|
+
"""
|
|
17
|
+
# Calculate score (simple MVP logic)
|
|
18
|
+
# Filter out PASS results for scoring
|
|
19
|
+
drift_issues = [r for r in audit_results["drift_check"] if r.get("status") != "PASS"]
|
|
20
|
+
resilience_issues = [r for r in audit_results["resilience"] if r.get("status") != "PASS"]
|
|
21
|
+
security_issues = [r for r in audit_results["security"] if r.get("status") != "PASS"]
|
|
22
|
+
|
|
23
|
+
drift_score = max(0, 100 - len(drift_issues) * 10)
|
|
24
|
+
resilience_score = max(0, 100 - len(resilience_issues) * 15)
|
|
25
|
+
security_score = max(0, 100 - len(security_issues) * 20)
|
|
26
|
+
|
|
27
|
+
total_score = (drift_score + resilience_score + security_score) / 3
|
|
28
|
+
|
|
29
|
+
# Pass/Fail based on score
|
|
30
|
+
is_compliant = total_score >= 80
|
|
31
|
+
|
|
32
|
+
context = {
|
|
33
|
+
"vendor_name": vendor_name,
|
|
34
|
+
"date": datetime.now().strftime("%Y-%m-%d"),
|
|
35
|
+
"score": round(total_score),
|
|
36
|
+
"is_compliant": is_compliant,
|
|
37
|
+
"results": audit_results
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
# Helper to render findings tables
|
|
41
|
+
def render_findings_table(module_name, findings):
|
|
42
|
+
if not findings:
|
|
43
|
+
return f"<p class='no-issues'>ā
No issues found in {module_name}.</p>"
|
|
44
|
+
|
|
45
|
+
rows = ""
|
|
46
|
+
for f in findings:
|
|
47
|
+
endpoint = f.get('endpoint', 'Global')
|
|
48
|
+
status = f.get('status', 'FAIL')
|
|
49
|
+
|
|
50
|
+
if status == "PASS":
|
|
51
|
+
severity_class = "pass"
|
|
52
|
+
severity_text = "PASS"
|
|
53
|
+
else:
|
|
54
|
+
severity_class = f.get('severity', 'LOW').lower()
|
|
55
|
+
severity_text = f.get('severity')
|
|
56
|
+
|
|
57
|
+
rows += f"""
|
|
58
|
+
<tr>
|
|
59
|
+
<td><span class="badge badge-{severity_class}">{severity_text}</span></td>
|
|
60
|
+
<td><code>{endpoint}</code></td>
|
|
61
|
+
<td><strong>{f.get('issue')}</strong></td>
|
|
62
|
+
<td>{f.get('details')}</td>
|
|
63
|
+
</tr>
|
|
64
|
+
"""
|
|
65
|
+
|
|
66
|
+
return f"""
|
|
67
|
+
<table>
|
|
68
|
+
<thead>
|
|
69
|
+
<tr>
|
|
70
|
+
<th style="width: 10%">Status</th>
|
|
71
|
+
<th style="width: 25%">Endpoint</th>
|
|
72
|
+
<th style="width: 25%">Issue</th>
|
|
73
|
+
<th>Technical Details</th>
|
|
74
|
+
</tr>
|
|
75
|
+
</thead>
|
|
76
|
+
<tbody>
|
|
77
|
+
{rows}
|
|
78
|
+
</tbody>
|
|
79
|
+
</table>
|
|
80
|
+
"""
|
|
81
|
+
|
|
82
|
+
html_content = f"""
|
|
83
|
+
<html>
|
|
84
|
+
<head>
|
|
85
|
+
<style>
|
|
86
|
+
@page {{ margin: 50px; }}
|
|
87
|
+
body {{ font-family: 'Inter', 'Helvetica Neue', Helvetica, Arial, sans-serif; color: #1e293b; line-height: 1.5; }}
|
|
88
|
+
.header {{
|
|
89
|
+
background: linear-gradient(135deg, #1e1b4b 0%, #4338ca 100%);
|
|
90
|
+
color: white;
|
|
91
|
+
padding: 40px;
|
|
92
|
+
text-align: center;
|
|
93
|
+
border-radius: 12px;
|
|
94
|
+
margin-bottom: 40px;
|
|
95
|
+
}}
|
|
96
|
+
.header h1 {{ margin: 0; font-size: 28px; letter-spacing: -0.5px; }}
|
|
97
|
+
.header p {{ margin: 10px 0 0; opacity: 0.8; font-size: 16px; }}
|
|
98
|
+
|
|
99
|
+
.summary-grid {{ display: flex; justify-content: space-between; margin-bottom: 40px; }}
|
|
100
|
+
.summary-card {{
|
|
101
|
+
background: #f8fafc;
|
|
102
|
+
padding: 20px;
|
|
103
|
+
border-radius: 8px;
|
|
104
|
+
width: 45%;
|
|
105
|
+
border: 1px solid #e2e8f0;
|
|
106
|
+
}}
|
|
107
|
+
|
|
108
|
+
.score-big {{ font-size: 56px; font-weight: 800; margin: 10px 0; }}
|
|
109
|
+
.status-badge {{
|
|
110
|
+
display: inline-block;
|
|
111
|
+
padding: 6px 16px;
|
|
112
|
+
border-radius: 99px;
|
|
113
|
+
font-weight: 700;
|
|
114
|
+
text-transform: uppercase;
|
|
115
|
+
font-size: 14px;
|
|
116
|
+
}}
|
|
117
|
+
.status-pass {{ background: #dcfce7; color: #166534; }}
|
|
118
|
+
.status-fail {{ background: #fee2e2; color: #991b1b; }}
|
|
119
|
+
|
|
120
|
+
.section {{ margin-top: 40px; page-break-inside: avoid; }}
|
|
121
|
+
.section h3 {{ border-bottom: 2px solid #e2e8f0; padding-bottom: 10px; color: #0f172a; margin-bottom: 20px; }}
|
|
122
|
+
|
|
123
|
+
table {{ width: 100%; border-collapse: collapse; margin-top: 10px; font-size: 12px; table-layout: fixed; word-wrap: break-word; }}
|
|
124
|
+
th, td {{ padding: 12px; text-align: left; border-bottom: 1px solid #e2e8f0; vertical-align: top; }}
|
|
125
|
+
td {{ word-break: break-word; white-space: pre-wrap; }}
|
|
126
|
+
th {{ background-color: #f1f5f9; color: #475569; font-weight: 600; text-transform: uppercase; letter-spacing: 0.5px; }}
|
|
127
|
+
|
|
128
|
+
.badge {{
|
|
129
|
+
padding: 4px 8px;
|
|
130
|
+
border-radius: 4px;
|
|
131
|
+
font-size: 10px;
|
|
132
|
+
font-weight: 700;
|
|
133
|
+
color: white;
|
|
134
|
+
text-transform: uppercase;
|
|
135
|
+
}}
|
|
136
|
+
.badge-critical {{ background: #ef4444; }}
|
|
137
|
+
.badge-high {{ background: #f97316; }}
|
|
138
|
+
.badge-medium {{ background: #eab308; }}
|
|
139
|
+
.badge-low {{ background: #3b82f6; }}
|
|
140
|
+
.badge-pass {{ background: #16a34a; }}
|
|
141
|
+
|
|
142
|
+
.no-issues {{ color: #059669; font-weight: 500; padding: 10px 0; }}
|
|
143
|
+
code {{ font-family: 'Courier New', monospace; background: #f1f5f9; padding: 2px 4px; border-radius: 3px; font-size: 11px; }}
|
|
144
|
+
|
|
145
|
+
footer {{ margin-top: 50px; text-align: center; color: #94a3b8; font-size: 10px; border-top: 1px solid #e2e8f0; padding-top: 20px; }}
|
|
146
|
+
</style>
|
|
147
|
+
</head>
|
|
148
|
+
<body>
|
|
149
|
+
<div class="header">
|
|
150
|
+
<h1>DORA ICT Third-Party Risk Assessment</h1>
|
|
151
|
+
<p>Vendor Compliance Audit for <strong>{vendor_name}</strong></p>
|
|
152
|
+
<p>Report Date: {datetime.now().strftime("%B %d, %Y")}</p>
|
|
153
|
+
</div>
|
|
154
|
+
|
|
155
|
+
<div class="summary-grid">
|
|
156
|
+
<div class="summary-card">
|
|
157
|
+
<p style="margin:0; color:#64748b; font-weight:600;">Overall Risk Score</p>
|
|
158
|
+
<div class="score-big" style="color: {'#166534' if is_compliant else '#991b1b'}">{round(total_score)}<span style="font-size: 24px; color:#94a3b8; font-weight:400;">/100</span></div>
|
|
159
|
+
</div>
|
|
160
|
+
<div class="summary-card">
|
|
161
|
+
<p style="margin:0; color:#64748b; font-weight:600;">Compliance Status</p>
|
|
162
|
+
<div style="margin-top: 20px;">
|
|
163
|
+
<span class="status-badge {'status-pass' if is_compliant else 'status-fail'}">
|
|
164
|
+
{'COMPLIANT (PASS)' if is_compliant else 'NON-COMPLIANT (FAIL)'}
|
|
165
|
+
</span>
|
|
166
|
+
</div>
|
|
167
|
+
</div>
|
|
168
|
+
</div>
|
|
169
|
+
|
|
170
|
+
<div class="section">
|
|
171
|
+
<h3>Technical Findings - Module A: Schema Integrity (Docs vs. Code)</h3>
|
|
172
|
+
<p style="font-size: 13px; color: #64748b; margin-bottom: 15px;">
|
|
173
|
+
This check verifies if the actual API implementation adheres to the provided OpenAPI specification.
|
|
174
|
+
Discrepancies here indicate "Schema Drift," which violates DORA requirements for accurate ICT documentation.
|
|
175
|
+
</p>
|
|
176
|
+
{render_findings_table("Module A", audit_results['drift_check'])}
|
|
177
|
+
</div>
|
|
178
|
+
|
|
179
|
+
<div class="section">
|
|
180
|
+
<h3>Technical Findings - Module B: Resilience Stress Test</h3>
|
|
181
|
+
<p style="font-size: 13px; color: #64748b; margin-bottom: 15px;">
|
|
182
|
+
Assesses high-load behavior and error handling (DORA Art. 24 & 25).
|
|
183
|
+
Checks if the system gracefully handles request flooding with appropriate 429 status codes.
|
|
184
|
+
</p>
|
|
185
|
+
{render_findings_table("Module B", audit_results['resilience'])}
|
|
186
|
+
</div>
|
|
187
|
+
|
|
188
|
+
<div class="section">
|
|
189
|
+
<h3>Technical Findings - Module C: Security Hygiene</h3>
|
|
190
|
+
<p style="font-size: 13px; color: #64748b; margin-bottom: 15px;">
|
|
191
|
+
Evaluates baseline security controls including TLS encryption and sensitive information leakage in URLs.
|
|
192
|
+
</p>
|
|
193
|
+
{render_findings_table("Module C", audit_results['security'])}
|
|
194
|
+
</div>
|
|
195
|
+
|
|
196
|
+
<footer>
|
|
197
|
+
<p>CONFIDENTIAL - FOR INTERNAL AUDIT PURPOSES ONLY</p>
|
|
198
|
+
<p>Generated by PanDoraSpec</p>
|
|
199
|
+
</footer>
|
|
200
|
+
</body>
|
|
201
|
+
</html>
|
|
202
|
+
"""
|
|
203
|
+
|
|
204
|
+
filename = f"{vendor_name.replace(' ', '_')}_{datetime.now().strftime('%Y%m%d%H%M%S')}.pdf"
|
|
205
|
+
filepath = os.path.join(REPORTS_DIR, filename)
|
|
206
|
+
|
|
207
|
+
HTML(string=html_content).write_pdf(filepath)
|
|
208
|
+
|
|
209
|
+
return filepath
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: pandoraspec
|
|
3
|
+
Version: 0.1.1
|
|
4
|
+
Summary: DORA Compliance Auditor for OpenAPI Specs
|
|
5
|
+
Author-email: Ulises Merlan <ulimerlan@gmail.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Requires-Python: >=3.9
|
|
8
|
+
Description-Content-Type: text/markdown
|
|
9
|
+
Requires-Dist: fastapi
|
|
10
|
+
Requires-Dist: schemathesis==4.9.1
|
|
11
|
+
Requires-Dist: typer[all]
|
|
12
|
+
Requires-Dist: rich
|
|
13
|
+
Requires-Dist: weasyprint
|
|
14
|
+
Requires-Dist: jinja2
|
|
15
|
+
Requires-Dist: requests
|
|
16
|
+
|
|
17
|
+
# PanDoraSpec
|
|
18
|
+
|
|
19
|
+
**The Open DORA Compliance Engine for OpenAPI Specs.**
|
|
20
|
+
|
|
21
|
+
PanDoraSpec is a CLI tool that performs deep technical due diligence on your APIs to verify compliance with **DORA (Digital Operational Resilience Act)** requirements. It compares your OpenAPI/Swagger specifications against real-world implementation to detect schema drift, resilience gaps, and security issues.
|
|
22
|
+
|
|
23
|
+
---
|
|
24
|
+
|
|
25
|
+
## š¦ Installation
|
|
26
|
+
|
|
27
|
+
```bash
|
|
28
|
+
pip install pandoraspec
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
## š Usage
|
|
32
|
+
|
|
33
|
+
Run the audit directly from your terminal.
|
|
34
|
+
|
|
35
|
+
### Basic Scan
|
|
36
|
+
```bash
|
|
37
|
+
pandoraspec https://petstore.swagger.io/v2/swagger.json
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
### With Options
|
|
41
|
+
```bash
|
|
42
|
+
pandoraspec https://api.example.com/spec.json --vendor "Stripe" --key "sk_live_..."
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
### Local File
|
|
46
|
+
```bash
|
|
47
|
+
pandoraspec ./openapi.yaml
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
---
|
|
51
|
+
|
|
52
|
+
## š”ļø What It Checks
|
|
53
|
+
|
|
54
|
+
### Module A: The Integrity Test (Drift)
|
|
55
|
+
Checks if your API implementation matches your documentation.
|
|
56
|
+
- **Why?** DORA requires you to monitor if the service effectively supports your critical functions. If the API behaves differently than documented, it's a risk.
|
|
57
|
+
|
|
58
|
+
### Module B: The Resilience Test
|
|
59
|
+
Stress tests the API to ensure it handles invalid inputs gracefully (`4xx` vs `5xx`).
|
|
60
|
+
- **Why?** DORA Article 25 calls for "Digital operational resilience testing".
|
|
61
|
+
|
|
62
|
+
### Module C: Security Hygiene
|
|
63
|
+
Checks for common security headers and configurations.
|
|
64
|
+
|
|
65
|
+
### Module D: The Report
|
|
66
|
+
Generates a branded PDF report: **"DORA ICT Third-Party Technical Risk Assessment"**.
|
|
67
|
+
|
|
68
|
+
---
|
|
69
|
+
|
|
70
|
+
## š License
|
|
71
|
+
|
|
72
|
+
MIT
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
pandoraspec/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
+
pandoraspec/cli.py,sha256=sJsVHMmQ_UHQf_lap--aLuf99_HUS_0rOAA5nTAxF54,3087
|
|
3
|
+
pandoraspec/core.py,sha256=bKoPYSfqQa4Yn7CxOX6QPCZXCLMLoiagD8aMfzLtC6o,16059
|
|
4
|
+
pandoraspec/reporting.py,sha256=aAFImWkhi5Ho6AQUCANJy-9MpIbzCJlsCWBSRmivOSQ,8804
|
|
5
|
+
pandoraspec-0.1.1.dist-info/METADATA,sha256=-YMhZl-uwnYuBS64UhgYZrr0oQgluAqCOiZR-w5Jq8k,1892
|
|
6
|
+
pandoraspec-0.1.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
7
|
+
pandoraspec-0.1.1.dist-info/entry_points.txt,sha256=gmXGBQNpfy0IeOjB_SqunmaitLbyFsUZdgfwQOto2P0,52
|
|
8
|
+
pandoraspec-0.1.1.dist-info/top_level.txt,sha256=8It7kimNf30-5ZUI7CZl6kCBeImIG8H49ZjSU26dRuc,12
|
|
9
|
+
pandoraspec-0.1.1.dist-info/RECORD,,
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
pandoraspec
|