pandoraspec 0.1.1__py3-none-any.whl → 0.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pandoraspec/cli.py +28 -20
- pandoraspec/config.py +23 -0
- pandoraspec/constants.py +17 -0
- pandoraspec/core.py +52 -329
- pandoraspec/modules/__init__.py +0 -0
- pandoraspec/modules/drift.py +180 -0
- pandoraspec/modules/resilience.py +174 -0
- pandoraspec/modules/security.py +234 -0
- pandoraspec/orchestrator.py +69 -0
- pandoraspec/reporting/__init__.py +0 -0
- pandoraspec/reporting/generator.py +111 -0
- pandoraspec/{reporting.py → reporting/templates.py} +10 -88
- pandoraspec/seed.py +181 -0
- pandoraspec/utils/__init__.py +0 -0
- pandoraspec/utils/logger.py +21 -0
- pandoraspec/utils/parsing.py +35 -0
- pandoraspec/utils/url.py +23 -0
- pandoraspec-0.2.7.dist-info/METADATA +200 -0
- pandoraspec-0.2.7.dist-info/RECORD +23 -0
- pandoraspec-0.2.7.dist-info/entry_points.txt +2 -0
- pandoraspec-0.1.1.dist-info/METADATA +0 -72
- pandoraspec-0.1.1.dist-info/RECORD +0 -9
- pandoraspec-0.1.1.dist-info/entry_points.txt +0 -2
- {pandoraspec-0.1.1.dist-info → pandoraspec-0.2.7.dist-info}/WHEEL +0 -0
- {pandoraspec-0.1.1.dist-info → pandoraspec-0.2.7.dist-info}/top_level.txt +0 -0
pandoraspec/cli.py
CHANGED
|
@@ -2,35 +2,43 @@ import typer
|
|
|
2
2
|
from rich.console import Console
|
|
3
3
|
from rich.table import Table
|
|
4
4
|
from rich.panel import Panel
|
|
5
|
-
from
|
|
6
|
-
from .core import AuditEngine
|
|
7
|
-
from .reporting import generate_report
|
|
5
|
+
from .orchestrator import run_dora_audit_logic
|
|
8
6
|
|
|
9
7
|
app = typer.Typer(help="DORA Audit CLI - Verify Compliance of OpenAI Specs")
|
|
10
8
|
console = Console()
|
|
11
9
|
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
schema_url: str = typer.Argument(..., help="URL or path to OpenAPI schema"),
|
|
10
|
+
def run_audit(
|
|
11
|
+
target: str = typer.Argument(..., help="URL or path to OpenAPI schema"),
|
|
15
12
|
api_key: str = typer.Option(None, "--key", "-k", help="API Key for authenticated endpoints"),
|
|
16
|
-
vendor: str = typer.Option("Vendor", "--vendor", "-v", help="Vendor name for the report")
|
|
13
|
+
vendor: str = typer.Option("Vendor", "--vendor", "-v", help="Vendor name for the report"),
|
|
14
|
+
config: str = typer.Option(None, "--config", "-c", help="Path to .yaml configuration file"),
|
|
15
|
+
base_url: str = typer.Option(None, "--base-url", "-b", help="Override API Base URL"),
|
|
16
|
+
output_format: str = typer.Option("pdf", "--format", "-f", help="Report format (pdf or json)"),
|
|
17
|
+
output_path: str = typer.Option(None, "--output", "-o", help="Custom path for the output report file")
|
|
17
18
|
):
|
|
18
19
|
"""
|
|
19
20
|
Run a DORA audit against an OpenAPI schema.
|
|
20
21
|
"""
|
|
21
22
|
console.print(Panel(f"[bold blue]Starting DORA Audit for {vendor}[/bold blue]", border_style="blue"))
|
|
22
|
-
console.print(f"🔎 Scanning [bold]{
|
|
23
|
+
console.print(f"🔎 Scanning [bold]{target}[/bold]...")
|
|
23
24
|
|
|
24
25
|
try:
|
|
25
|
-
|
|
26
|
+
# Delegate to Orchestrator
|
|
27
|
+
audit_result = run_dora_audit_logic(
|
|
28
|
+
target=target,
|
|
29
|
+
vendor=vendor,
|
|
30
|
+
api_key=api_key,
|
|
31
|
+
config_path=config,
|
|
32
|
+
base_url=base_url,
|
|
33
|
+
output_format=output_format,
|
|
34
|
+
output_path=output_path
|
|
35
|
+
)
|
|
26
36
|
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
# The user requested "Rich terminal output".
|
|
30
|
-
# Let's run it.
|
|
31
|
-
|
|
32
|
-
results = engine.run_full_audit()
|
|
37
|
+
if audit_result.seed_count > 0:
|
|
38
|
+
console.print(f"[green]Loaded {audit_result.seed_count} seed values from config[/green]")
|
|
33
39
|
|
|
40
|
+
results = audit_result.results
|
|
41
|
+
|
|
34
42
|
# Display Summary Table
|
|
35
43
|
table = Table(title="Audit Summary")
|
|
36
44
|
table.add_column("Module", style="cyan", no_wrap=True)
|
|
@@ -56,15 +64,15 @@ def scan(
|
|
|
56
64
|
table.add_row("Module C: Security", sec_status, f"{sec_pass} / {sec_fail}")
|
|
57
65
|
|
|
58
66
|
console.print(table)
|
|
59
|
-
|
|
60
|
-
# Generate Report
|
|
61
|
-
report_path = generate_report(vendor, results)
|
|
62
67
|
|
|
63
|
-
console.print(Panel(f"[bold green]Audit Complete![/bold green]\n📄 Report generated: [link={report_path}]{report_path}[/link]", border_style="green"))
|
|
68
|
+
console.print(Panel(f"[bold green]Audit Complete![/bold green]\n📄 Report generated: [link={audit_result.report_path}]{audit_result.report_path}[/link]", border_style="green"))
|
|
64
69
|
|
|
65
70
|
except Exception as e:
|
|
66
71
|
console.print(f"[bold red]Error:[/bold red] {str(e)}")
|
|
67
72
|
raise typer.Exit(code=1)
|
|
68
73
|
|
|
74
|
+
def main():
|
|
75
|
+
typer.run(run_audit)
|
|
76
|
+
|
|
69
77
|
if __name__ == "__main__":
|
|
70
|
-
|
|
78
|
+
main()
|
pandoraspec/config.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
from pydantic import BaseModel, Field, ValidationError
|
|
2
|
+
from typing import Dict, Any
|
|
3
|
+
from .utils.logger import logger
|
|
4
|
+
|
|
5
|
+
class PandoraConfig(BaseModel):
|
|
6
|
+
seed_data: Dict[str, Any] = Field(
|
|
7
|
+
default_factory=dict,
|
|
8
|
+
description="Seed data for API testing. Keys can be parameter names or endpoint definitions (METHOD /path)."
|
|
9
|
+
)
|
|
10
|
+
|
|
11
|
+
def validate_config(config_dict: Dict[str, Any]) -> PandoraConfig:
|
|
12
|
+
"""
|
|
13
|
+
Validates the configuration dictionary against the PandoraConfig schema.
|
|
14
|
+
Raises ValidationError if invalid.
|
|
15
|
+
"""
|
|
16
|
+
try:
|
|
17
|
+
return PandoraConfig(**config_dict)
|
|
18
|
+
except ValidationError as e:
|
|
19
|
+
logger.error("Configuration validation failed!")
|
|
20
|
+
for err in e.errors():
|
|
21
|
+
loc = " -> ".join(str(l) for l in err['loc'])
|
|
22
|
+
logger.error(f" Field: {loc} | Error: {err['msg']}")
|
|
23
|
+
raise e
|
pandoraspec/constants.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
# Resilience/Stress Testing
|
|
2
|
+
FLOOD_REQUEST_COUNT = 20
|
|
3
|
+
|
|
4
|
+
# Security Hygiene
|
|
5
|
+
SENSITIVE_PATH_KEYWORDS = ["key", "token"]
|
|
6
|
+
|
|
7
|
+
LATENCY_THRESHOLD_WARN = 1.0 # Seconds
|
|
8
|
+
RECOVERY_WAIT_TIME = 2.0 # Seconds
|
|
9
|
+
|
|
10
|
+
SECURITY_SCAN_LIMIT = 3 # Max endpoints to probe per security check
|
|
11
|
+
|
|
12
|
+
# HTTP Status Codes
|
|
13
|
+
HTTP_200_OK = 200
|
|
14
|
+
HTTP_401_UNAUTHORIZED = 401
|
|
15
|
+
HTTP_403_FORBIDDEN = 403
|
|
16
|
+
HTTP_429_TOO_MANY_REQUESTS = 429
|
|
17
|
+
HTTP_500_INTERNAL_SERVER_ERROR = 500
|
pandoraspec/core.py
CHANGED
|
@@ -1,47 +1,35 @@
|
|
|
1
1
|
import schemathesis
|
|
2
|
-
from typing import
|
|
3
|
-
import requests
|
|
4
|
-
from schemathesis import checks
|
|
5
|
-
from schemathesis.specs.openapi import checks as oai_checks
|
|
6
|
-
from schemathesis.checks import CheckContext, ChecksConfig
|
|
7
|
-
import html
|
|
2
|
+
from typing import Any
|
|
8
3
|
import os
|
|
4
|
+
from .seed import SeedManager
|
|
5
|
+
from .utils.logger import logger
|
|
6
|
+
from .utils.url import derive_base_url_from_target
|
|
7
|
+
from .modules.drift import run_drift_check
|
|
8
|
+
from .modules.resilience import run_resilience_tests
|
|
9
|
+
from .modules.security import run_security_hygiene
|
|
9
10
|
|
|
10
11
|
class AuditEngine:
|
|
11
|
-
def __init__(self,
|
|
12
|
-
self.
|
|
12
|
+
def __init__(self, target: str, api_key: str = None, seed_data: dict[str, Any] = None, base_url: str = None):
|
|
13
|
+
self.target = target
|
|
13
14
|
self.api_key = api_key
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
working_schema_url = schema_url
|
|
19
|
-
if "localhost" in schema_url or "127.0.0.1" in schema_url:
|
|
20
|
-
# Try host.docker.internal first (standard for Docker Desktop)
|
|
21
|
-
# We DON'T change self.schema_url so the report still shows what the user entered.
|
|
22
|
-
try:
|
|
23
|
-
print(f"DEBUG: Attempting to resolve localhost URL using host.docker.internal")
|
|
24
|
-
test_url = schema_url.replace("localhost", "host.docker.internal").replace("127.0.0.1", "host.docker.internal")
|
|
25
|
-
requests.head(test_url, timeout=2) # Quick check
|
|
26
|
-
working_schema_url = test_url
|
|
27
|
-
print(f"DEBUG: Successfully resolved to {working_schema_url}")
|
|
28
|
-
except Exception:
|
|
29
|
-
print(f"DEBUG: Failed to reach host.docker.internal, trying original")
|
|
30
|
-
pass
|
|
15
|
+
self.seed_data = seed_data or {}
|
|
16
|
+
self.base_url = base_url
|
|
17
|
+
self.dynamic_cache = {}
|
|
18
|
+
self.schema = None
|
|
31
19
|
|
|
32
20
|
try:
|
|
33
|
-
if os.path.exists(
|
|
34
|
-
|
|
35
|
-
self.schema = schemathesis.openapi.from_path(
|
|
21
|
+
if os.path.exists(target) and os.path.isfile(target):
|
|
22
|
+
logger.debug(f"Loading schema from local file: {target}")
|
|
23
|
+
self.schema = schemathesis.openapi.from_path(target)
|
|
36
24
|
else:
|
|
37
|
-
self.schema = schemathesis.openapi.from_url(
|
|
25
|
+
self.schema = schemathesis.openapi.from_url(target)
|
|
38
26
|
|
|
39
|
-
#
|
|
40
|
-
if base_url:
|
|
41
|
-
|
|
42
|
-
|
|
27
|
+
# If base_url was manually provided, we skip dynamic resolution
|
|
28
|
+
if self.base_url:
|
|
29
|
+
logger.debug(f"Using manual override base_url: {self.base_url}")
|
|
30
|
+
resolved_url = self.base_url
|
|
43
31
|
else:
|
|
44
|
-
#
|
|
32
|
+
# Priority 1: Extract from the 'servers' field in the spec
|
|
45
33
|
resolved_url = None
|
|
46
34
|
if hasattr(self.schema, "raw_schema"):
|
|
47
35
|
servers = self.schema.raw_schema.get("servers", [])
|
|
@@ -49,306 +37,41 @@ class AuditEngine:
|
|
|
49
37
|
spec_server_url = servers[0].get("url")
|
|
50
38
|
if spec_server_url:
|
|
51
39
|
resolved_url = spec_server_url
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
# 3. Priority 2: Use whatever schemathesis resolved automatically (fallback)
|
|
55
|
-
if not resolved_url:
|
|
56
|
-
resolved_url = getattr(self.schema, "base_url", None)
|
|
57
|
-
print(f"DEBUG: Falling back to Schemathesis resolved base_url: {resolved_url}")
|
|
58
|
-
|
|
59
|
-
if not resolved_url and self.schema_url:
|
|
60
|
-
# Fallback: Derive from schema_url (e.g., remove swagger.json)
|
|
61
|
-
try:
|
|
62
|
-
from urllib.parse import urlparse, urlunparse
|
|
63
|
-
parsed = urlparse(self.schema_url)
|
|
64
|
-
path_parts = parsed.path.split('/')
|
|
65
|
-
# Simple heuristic: remove the last segment (e.g. swagger.json) to get base
|
|
66
|
-
if '.' in path_parts[-1]:
|
|
67
|
-
path_parts.pop()
|
|
68
|
-
new_path = '/'.join(path_parts)
|
|
69
|
-
resolved_url = urlunparse(parsed._replace(path=new_path))
|
|
70
|
-
print(f"DEBUG: Derived base_url from schema_url: {resolved_url}")
|
|
71
|
-
except Exception as e:
|
|
72
|
-
print(f"DEBUG: Failed to derive base_url from schema_url: {e}")
|
|
73
|
-
|
|
74
|
-
print(f"DEBUG: Final resolved base_url for engine: {resolved_url}")
|
|
75
|
-
|
|
76
|
-
# Fix base_url if it's localhost as well
|
|
77
|
-
if resolved_url and ("localhost" in resolved_url or "127.0.0.1" in resolved_url):
|
|
78
|
-
print(f"DEBUG: Adjusting base_url '{resolved_url}' for Docker environment")
|
|
79
|
-
resolved_url = resolved_url.replace("localhost", "host.docker.internal").replace("127.0.0.1", "host.docker.internal")
|
|
80
|
-
|
|
81
|
-
self.base_url = resolved_url
|
|
82
|
-
if resolved_url:
|
|
83
|
-
try:
|
|
84
|
-
self.schema.base_url = resolved_url
|
|
85
|
-
except Exception:
|
|
86
|
-
pass
|
|
87
|
-
|
|
88
|
-
except Exception as e:
|
|
89
|
-
if isinstance(e, AttributeError) and "base_url" in str(e):
|
|
90
|
-
self.base_url = None
|
|
91
|
-
else:
|
|
92
|
-
raise ValueError(f"Failed to load OpenAPI schema from {schema_url}. Error: {str(e)}")
|
|
93
|
-
|
|
94
|
-
def run_drift_check(self) -> List[Dict]:
|
|
95
|
-
"""
|
|
96
|
-
Module A: The 'Docs vs. Code' Drift Check (The Integrity Test)
|
|
97
|
-
Uses schemathesis to verify if the API implementation matches the spec.
|
|
98
|
-
"""
|
|
99
|
-
results = []
|
|
100
|
-
# Mapping check names to actual functions
|
|
101
|
-
check_map = {
|
|
102
|
-
"not_a_server_error": checks.not_a_server_error,
|
|
103
|
-
"status_code_conformance": oai_checks.status_code_conformance,
|
|
104
|
-
"response_schema_conformance": oai_checks.response_schema_conformance
|
|
105
|
-
}
|
|
106
|
-
check_names = list(check_map.keys())
|
|
107
|
-
|
|
108
|
-
# Schemathesis 4.x checks require a context object
|
|
109
|
-
checks_config = ChecksConfig()
|
|
110
|
-
check_ctx = CheckContext(
|
|
111
|
-
override=None,
|
|
112
|
-
auth=None,
|
|
113
|
-
headers=None,
|
|
114
|
-
config=checks_config,
|
|
115
|
-
transport_kwargs=None,
|
|
116
|
-
)
|
|
117
|
-
|
|
118
|
-
for op in self.schema.get_all_operations():
|
|
119
|
-
# Handle Result type (Ok/Err) wrapping if present
|
|
120
|
-
operation = op.ok() if hasattr(op, "ok") else op
|
|
121
|
-
|
|
122
|
-
operation_path = f"{operation.method.upper()} {operation.path}"
|
|
123
|
-
print(f"AUDIT LOG: Testing endpoint {operation_path}")
|
|
40
|
+
logger.debug(f"Found server URL in specification: {resolved_url}")
|
|
124
41
|
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
except (AttributeError, Exception):
|
|
130
|
-
try:
|
|
131
|
-
cases = list(operation.make_case())
|
|
132
|
-
case = cases[0] if cases else None
|
|
133
|
-
except (AttributeError, Exception):
|
|
134
|
-
case = None
|
|
135
|
-
|
|
136
|
-
if not case:
|
|
137
|
-
continue
|
|
138
|
-
|
|
139
|
-
# Prepare headers
|
|
140
|
-
headers = {}
|
|
141
|
-
if self.api_key:
|
|
142
|
-
auth_header = self.api_key if self.api_key.lower().startswith("bearer ") else f"Bearer {self.api_key}"
|
|
143
|
-
headers["Authorization"] = auth_header
|
|
144
|
-
|
|
145
|
-
# Call the API
|
|
146
|
-
target_url = f"{self.base_url.rstrip('/')}/{operation.path.lstrip('/')}"
|
|
147
|
-
print(f"AUDIT LOG: Calling {operation.method.upper()} {target_url}")
|
|
148
|
-
|
|
149
|
-
response = case.call(base_url=self.base_url, headers=headers)
|
|
150
|
-
print(f"AUDIT LOG: Response Status Code: {response.status_code}")
|
|
151
|
-
|
|
152
|
-
# --- FIXED VALIDATION LOGIC ---
|
|
153
|
-
# We manually call the check function to ensure arguments are passed correctly.
|
|
154
|
-
for check_name in check_names:
|
|
155
|
-
check_func = check_map[check_name]
|
|
156
|
-
try:
|
|
157
|
-
# Direct call: check_func(ctx, response, case)
|
|
158
|
-
check_func(check_ctx, response, case)
|
|
159
|
-
|
|
160
|
-
# If we get here, the check passed
|
|
161
|
-
results.append({
|
|
162
|
-
"module": "A",
|
|
163
|
-
"endpoint": f"{operation.method.upper()} {operation.path}",
|
|
164
|
-
"issue": f"{check_name} - Passed",
|
|
165
|
-
"status": "PASS",
|
|
166
|
-
"severity": "INFO",
|
|
167
|
-
"details": f"Status: {response.status_code}"
|
|
168
|
-
})
|
|
169
|
-
|
|
170
|
-
except AssertionError as e:
|
|
171
|
-
# This catches actual drift (e.g., Schema validation failed)
|
|
172
|
-
# Capture and format detailed error info
|
|
173
|
-
validation_errors = []
|
|
174
|
-
|
|
175
|
-
# Safely get causes if they exist and are iterable
|
|
176
|
-
causes = getattr(e, "causes", None)
|
|
177
|
-
if causes:
|
|
178
|
-
for cause in causes:
|
|
179
|
-
if hasattr(cause, "message"):
|
|
180
|
-
validation_errors.append(cause.message)
|
|
181
|
-
else:
|
|
182
|
-
validation_errors.append(str(cause))
|
|
183
|
-
|
|
184
|
-
if not validation_errors:
|
|
185
|
-
validation_errors.append(str(e) or "Validation failed")
|
|
186
|
-
|
|
187
|
-
err_msg = "<br>".join(validation_errors)
|
|
188
|
-
safe_err = html.escape(err_msg)
|
|
189
|
-
|
|
190
|
-
# Add helpful context (Status & Body Preview)
|
|
191
|
-
context_msg = f"Status: {response.status_code}"
|
|
192
|
-
try:
|
|
193
|
-
if response.content:
|
|
194
|
-
preview = response.text[:500]
|
|
195
|
-
safe_preview = html.escape(preview)
|
|
196
|
-
context_msg += f"<br>Response: {safe_preview}"
|
|
197
|
-
except Exception:
|
|
198
|
-
pass
|
|
199
|
-
|
|
200
|
-
full_details = f"<strong>Error:</strong> {safe_err}<br><br><strong>Context:</strong><br>{context_msg}"
|
|
42
|
+
# Priority 2: Use whatever schemathesis resolved automatically (fallback)
|
|
43
|
+
if not resolved_url:
|
|
44
|
+
resolved_url = getattr(self.schema, "base_url", None)
|
|
45
|
+
logger.debug(f"Falling back to Schemathesis resolved base_url: {resolved_url}")
|
|
201
46
|
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
"details": full_details,
|
|
209
|
-
"severity": "HIGH"
|
|
210
|
-
})
|
|
211
|
-
except Exception as e:
|
|
212
|
-
# This catches unexpected coding errors
|
|
213
|
-
print(f"AUDIT LOG: Error executing check {check_name}: {str(e)}")
|
|
214
|
-
results.append({
|
|
215
|
-
"module": "A",
|
|
216
|
-
"endpoint": f"{operation.method.upper()} {operation.path}",
|
|
217
|
-
"issue": f"Check Execution Error ({check_name})",
|
|
218
|
-
"status": "FAIL",
|
|
219
|
-
"details": str(e),
|
|
220
|
-
"severity": "HIGH"
|
|
221
|
-
})
|
|
222
|
-
|
|
223
|
-
except Exception as e:
|
|
224
|
-
print(f"AUDIT LOG: Critical Error during endpoint test: {str(e)}")
|
|
225
|
-
continue
|
|
226
|
-
|
|
227
|
-
return results
|
|
47
|
+
if not resolved_url:
|
|
48
|
+
# Fallback: Derive from target URL
|
|
49
|
+
derived = derive_base_url_from_target(self.target)
|
|
50
|
+
if derived:
|
|
51
|
+
resolved_url = derived
|
|
52
|
+
logger.debug(f"Derived base_url from schema_url: {resolved_url}")
|
|
228
53
|
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
Checks for Rate Limiting and Timeout gracefully handling.
|
|
233
|
-
"""
|
|
234
|
-
results = []
|
|
235
|
-
ops = list(self.schema.get_all_operations())
|
|
236
|
-
if not ops:
|
|
237
|
-
return []
|
|
238
|
-
|
|
239
|
-
operation = ops[0].ok() if hasattr(ops[0], "ok") else ops[0]
|
|
240
|
-
|
|
241
|
-
# Simulate flooding
|
|
242
|
-
responses = []
|
|
243
|
-
for _ in range(50):
|
|
244
|
-
try:
|
|
245
|
-
case = operation.as_strategy().example()
|
|
246
|
-
except (AttributeError, Exception):
|
|
54
|
+
logger.debug(f"Final resolved base_url for engine: {resolved_url}")
|
|
55
|
+
self.base_url = resolved_url
|
|
56
|
+
if resolved_url:
|
|
247
57
|
try:
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
if
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
responses.append(case.call(base_url=self.base_url, headers=headers))
|
|
260
|
-
|
|
261
|
-
has_429 = any(r.status_code == 429 for r in responses)
|
|
262
|
-
has_500 = any(r.status_code == 500 for r in responses)
|
|
263
|
-
|
|
264
|
-
if not has_429 and has_500:
|
|
265
|
-
results.append({
|
|
266
|
-
"module": "B",
|
|
267
|
-
"issue": "Poor Resilience: 500 Error during flood",
|
|
268
|
-
"status": "FAIL",
|
|
269
|
-
"details": "The API returned 500 Internal Server Error instead of 429 Too Many Requests when flooded.",
|
|
270
|
-
"severity": "CRITICAL"
|
|
271
|
-
})
|
|
272
|
-
elif not has_429:
|
|
273
|
-
results.append({
|
|
274
|
-
"module": "B",
|
|
275
|
-
"issue": "No Rate Limiting Enforced",
|
|
276
|
-
"status": "FAIL",
|
|
277
|
-
"details": "The API did not return 429 Too Many Requests during high volume testing.",
|
|
278
|
-
"severity": "MEDIUM"
|
|
279
|
-
})
|
|
280
|
-
else:
|
|
281
|
-
results.append({
|
|
282
|
-
"module": "B",
|
|
283
|
-
"issue": "Rate Limiting Functional",
|
|
284
|
-
"status": "PASS",
|
|
285
|
-
"details": "The API correctly returned 429 Too Many Requests when flooded.",
|
|
286
|
-
"severity": "INFO"
|
|
287
|
-
})
|
|
288
|
-
|
|
289
|
-
if not has_500:
|
|
290
|
-
results.append({
|
|
291
|
-
"module": "B",
|
|
292
|
-
"issue": "Stress Handling",
|
|
293
|
-
"status": "PASS",
|
|
294
|
-
"details": "No 500 Internal Server Errors were observed during stress testing.",
|
|
295
|
-
"severity": "INFO"
|
|
296
|
-
})
|
|
297
|
-
|
|
298
|
-
return results
|
|
58
|
+
self.schema.base_url = resolved_url
|
|
59
|
+
except Exception:
|
|
60
|
+
pass
|
|
61
|
+
except Exception as e:
|
|
62
|
+
# Handle invalid URL or schema loading error gracefully
|
|
63
|
+
logger.error(f"Error loading schema: {e}")
|
|
64
|
+
if target and (target.startswith("http") or os.path.exists(target)):
|
|
65
|
+
pass # Allow to continue if it's just a warning, but schemathesis might fail later
|
|
66
|
+
else:
|
|
67
|
+
raise ValueError(f"Failed to load OpenAPI schema from {target}. Error: {str(e)}")
|
|
299
68
|
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
Module C: Security Hygiene Check
|
|
303
|
-
Checks for TLS and Auth leakage in URL.
|
|
304
|
-
"""
|
|
305
|
-
results = []
|
|
306
|
-
print(f"AUDIT LOG: Checking Security Hygiene for base URL: {self.base_url}")
|
|
307
|
-
if self.base_url and not self.base_url.startswith("https"):
|
|
308
|
-
results.append({
|
|
309
|
-
"module": "C",
|
|
310
|
-
"issue": "Insecure Connection (No TLS)",
|
|
311
|
-
"status": "FAIL",
|
|
312
|
-
"details": "The API base URL does not use HTTPS.",
|
|
313
|
-
"severity": "CRITICAL"
|
|
314
|
-
})
|
|
315
|
-
else:
|
|
316
|
-
results.append({
|
|
317
|
-
"module": "C",
|
|
318
|
-
"issue": "Secure Connection (TLS)",
|
|
319
|
-
"status": "PASS",
|
|
320
|
-
"details": "The API uses HTTPS.",
|
|
321
|
-
"severity": "INFO"
|
|
322
|
-
})
|
|
323
|
-
|
|
324
|
-
auth_leakage_found = False
|
|
325
|
-
for op in self.schema.get_all_operations():
|
|
326
|
-
operation = op.ok() if hasattr(op, "ok") else op
|
|
327
|
-
endpoint = operation.path
|
|
328
|
-
if "key" in endpoint.lower() or "token" in endpoint.lower():
|
|
329
|
-
auth_leakage_found = True
|
|
330
|
-
results.append({
|
|
331
|
-
"module": "C",
|
|
332
|
-
"issue": "Auth Leakage Risk",
|
|
333
|
-
"status": "FAIL",
|
|
334
|
-
"details": f"Endpoint '{endpoint}' indicates auth tokens might be passed in the URL.",
|
|
335
|
-
"severity": "HIGH"
|
|
336
|
-
})
|
|
337
|
-
|
|
338
|
-
if not auth_leakage_found:
|
|
339
|
-
results.append({
|
|
340
|
-
"module": "C",
|
|
341
|
-
"issue": "No Auth Leakage in URLs",
|
|
342
|
-
"status": "PASS",
|
|
343
|
-
"details": "No endpoints found with 'key' or 'token' in the path, suggesting safe header-based auth.",
|
|
344
|
-
"severity": "INFO"
|
|
345
|
-
})
|
|
346
|
-
|
|
347
|
-
return results
|
|
69
|
+
# Initialize Seed Manager
|
|
70
|
+
self.seed_manager = SeedManager(self.seed_data, self.base_url, self.api_key)
|
|
348
71
|
|
|
349
|
-
def run_full_audit(self) ->
|
|
72
|
+
def run_full_audit(self) -> dict:
|
|
350
73
|
return {
|
|
351
|
-
"drift_check": self.
|
|
352
|
-
"resilience": self.
|
|
353
|
-
"security": self.
|
|
74
|
+
"drift_check": run_drift_check(self.schema, self.base_url, self.api_key, self.seed_manager),
|
|
75
|
+
"resilience": run_resilience_tests(self.schema, self.base_url, self.api_key, self.seed_manager),
|
|
76
|
+
"security": run_security_hygiene(self.schema, self.base_url, self.api_key)
|
|
354
77
|
}
|
|
File without changes
|