pandoraspec 0.1.1__py3-none-any.whl → 0.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pandoraspec/cli.py +28 -20
- pandoraspec/config.py +23 -0
- pandoraspec/constants.py +17 -0
- pandoraspec/core.py +52 -329
- pandoraspec/modules/__init__.py +0 -0
- pandoraspec/modules/drift.py +180 -0
- pandoraspec/modules/resilience.py +174 -0
- pandoraspec/modules/security.py +234 -0
- pandoraspec/orchestrator.py +69 -0
- pandoraspec/reporting/__init__.py +0 -0
- pandoraspec/reporting/generator.py +111 -0
- pandoraspec/{reporting.py → reporting/templates.py} +10 -88
- pandoraspec/seed.py +181 -0
- pandoraspec/utils/__init__.py +0 -0
- pandoraspec/utils/logger.py +21 -0
- pandoraspec/utils/parsing.py +35 -0
- pandoraspec/utils/url.py +23 -0
- pandoraspec-0.2.7.dist-info/METADATA +200 -0
- pandoraspec-0.2.7.dist-info/RECORD +23 -0
- pandoraspec-0.2.7.dist-info/entry_points.txt +2 -0
- pandoraspec-0.1.1.dist-info/METADATA +0 -72
- pandoraspec-0.1.1.dist-info/RECORD +0 -9
- pandoraspec-0.1.1.dist-info/entry_points.txt +0 -2
- {pandoraspec-0.1.1.dist-info → pandoraspec-0.2.7.dist-info}/WHEEL +0 -0
- {pandoraspec-0.1.1.dist-info → pandoraspec-0.2.7.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from datetime import datetime
|
|
3
|
+
from weasyprint import HTML
|
|
4
|
+
from .templates import get_report_template
|
|
5
|
+
|
|
6
|
+
REPORTS_DIR = "reports"
|
|
7
|
+
|
|
8
|
+
if not os.path.exists(REPORTS_DIR):
|
|
9
|
+
os.makedirs(REPORTS_DIR)
|
|
10
|
+
|
|
11
|
+
def generate_report(vendor_name: str, audit_results: dict, output_path: str = None) -> str:
|
|
12
|
+
"""
|
|
13
|
+
Module D: The Compliance Report (The Deliverable)
|
|
14
|
+
Generates a branded PDF report.
|
|
15
|
+
"""
|
|
16
|
+
# ... (omitted for brevity, assume logic matches but filename handling changes)
|
|
17
|
+
|
|
18
|
+
# Calculate scores...
|
|
19
|
+
drift_issues = [r for r in audit_results["drift_check"] if r.get("status") != "PASS"]
|
|
20
|
+
resilience_issues = [r for r in audit_results["resilience"] if r.get("status") != "PASS"]
|
|
21
|
+
security_issues = [r for r in audit_results["security"] if r.get("status") != "PASS"]
|
|
22
|
+
|
|
23
|
+
drift_score = max(0, 100 - len(drift_issues) * 10)
|
|
24
|
+
resilience_score = max(0, 100 - len(resilience_issues) * 15)
|
|
25
|
+
security_score = max(0, 100 - len(security_issues) * 20)
|
|
26
|
+
|
|
27
|
+
total_score = (drift_score + resilience_score + security_score) / 3
|
|
28
|
+
is_compliant = total_score >= 80
|
|
29
|
+
|
|
30
|
+
context = {
|
|
31
|
+
"vendor_name": vendor_name,
|
|
32
|
+
"date": datetime.now().strftime("%Y-%m-%d"),
|
|
33
|
+
"score": round(total_score),
|
|
34
|
+
"is_compliant": is_compliant,
|
|
35
|
+
"results": audit_results
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
# ... (render_findings_table helper omitted) ...
|
|
39
|
+
def render_findings_table(module_name, findings):
|
|
40
|
+
if not findings:
|
|
41
|
+
return f"<p class='no-issues'>✅ No issues found in {module_name}.</p>"
|
|
42
|
+
|
|
43
|
+
rows = ""
|
|
44
|
+
for f in findings:
|
|
45
|
+
endpoint = f.get('endpoint', 'Global')
|
|
46
|
+
status = f.get('status', 'FAIL')
|
|
47
|
+
|
|
48
|
+
if status == "PASS":
|
|
49
|
+
severity_class = "pass"
|
|
50
|
+
severity_text = "PASS"
|
|
51
|
+
else:
|
|
52
|
+
severity_class = f.get('severity', 'LOW').lower()
|
|
53
|
+
severity_text = f.get('severity')
|
|
54
|
+
|
|
55
|
+
rows += f"<tr><td><span class='badge badge-{severity_class}'>{severity_text}</span></td><td><code>{endpoint}</code></td><td><strong>{f.get('issue')}</strong></td><td>{f.get('details')}</td></tr>"
|
|
56
|
+
|
|
57
|
+
return f"<table><thead><tr><th style='width: 10%'>Status</th><th style='width: 25%'>Endpoint</th><th style='width: 25%'>Issue</th><th>Technical Details</th></tr></thead><tbody>{rows}</tbody></table>"
|
|
58
|
+
|
|
59
|
+
html_content = get_report_template(
|
|
60
|
+
vendor_name=vendor_name,
|
|
61
|
+
total_score=total_score,
|
|
62
|
+
is_compliant=is_compliant,
|
|
63
|
+
audit_results=audit_results,
|
|
64
|
+
render_findings_table_func=render_findings_table
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
if output_path:
|
|
68
|
+
filepath = output_path
|
|
69
|
+
else:
|
|
70
|
+
filename = f"{vendor_name.replace(' ', '_')}_{datetime.now().strftime('%Y%m%d%H%M%S')}.pdf"
|
|
71
|
+
filepath = os.path.join(REPORTS_DIR, filename)
|
|
72
|
+
|
|
73
|
+
HTML(string=html_content).write_pdf(filepath)
|
|
74
|
+
return filepath
|
|
75
|
+
|
|
76
|
+
def generate_json_report(vendor_name: str, audit_results: dict, output_path: str = None) -> str:
|
|
77
|
+
"""
|
|
78
|
+
Generates a machine-readable JSON report for CI/CD pipelines.
|
|
79
|
+
"""
|
|
80
|
+
import json
|
|
81
|
+
|
|
82
|
+
# Filter out PASS results for scoring
|
|
83
|
+
drift_issues = [r for r in audit_results["drift_check"] if r.get("status") != "PASS"]
|
|
84
|
+
resilience_issues = [r for r in audit_results["resilience"] if r.get("status") != "PASS"]
|
|
85
|
+
security_issues = [r for r in audit_results["security"] if r.get("status") != "PASS"]
|
|
86
|
+
|
|
87
|
+
drift_score = max(0, 100 - len(drift_issues) * 10)
|
|
88
|
+
resilience_score = max(0, 100 - len(resilience_issues) * 15)
|
|
89
|
+
security_score = max(0, 100 - len(security_issues) * 20)
|
|
90
|
+
|
|
91
|
+
total_score = (drift_score + resilience_score + security_score) / 3
|
|
92
|
+
is_compliant = total_score >= 80
|
|
93
|
+
|
|
94
|
+
report_data = {
|
|
95
|
+
"vendor_name": vendor_name,
|
|
96
|
+
"date": datetime.now().isoformat(),
|
|
97
|
+
"score": round(total_score, 2),
|
|
98
|
+
"is_compliant": is_compliant,
|
|
99
|
+
"results": audit_results
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
if output_path:
|
|
103
|
+
filepath = output_path
|
|
104
|
+
else:
|
|
105
|
+
filename = f"{vendor_name.replace(' ', '_')}_{datetime.now().strftime('%Y%m%d%H%M%S')}.json"
|
|
106
|
+
filepath = os.path.join(REPORTS_DIR, filename)
|
|
107
|
+
|
|
108
|
+
with open(filepath, "w") as f:
|
|
109
|
+
json.dump(report_data, f, indent=2)
|
|
110
|
+
|
|
111
|
+
return filepath
|
|
@@ -1,85 +1,10 @@
|
|
|
1
|
-
import os
|
|
2
1
|
from datetime import datetime
|
|
3
|
-
from jinja2 import Environment, FileSystemLoader
|
|
4
|
-
from weasyprint import HTML
|
|
5
2
|
|
|
6
|
-
|
|
7
|
-
REPORTS_DIR = "reports"
|
|
8
|
-
|
|
9
|
-
if not os.path.exists(REPORTS_DIR):
|
|
10
|
-
os.makedirs(REPORTS_DIR)
|
|
11
|
-
|
|
12
|
-
def generate_report(vendor_name: str, audit_results: dict) -> str:
|
|
3
|
+
def get_report_template(vendor_name: str, total_score: float, is_compliant: bool, audit_results: dict, render_findings_table_func) -> str:
|
|
13
4
|
"""
|
|
14
|
-
|
|
15
|
-
Generates a branded PDF report.
|
|
5
|
+
Returns the HTML content for the DORA Compliance Report.
|
|
16
6
|
"""
|
|
17
|
-
|
|
18
|
-
# Filter out PASS results for scoring
|
|
19
|
-
drift_issues = [r for r in audit_results["drift_check"] if r.get("status") != "PASS"]
|
|
20
|
-
resilience_issues = [r for r in audit_results["resilience"] if r.get("status") != "PASS"]
|
|
21
|
-
security_issues = [r for r in audit_results["security"] if r.get("status") != "PASS"]
|
|
22
|
-
|
|
23
|
-
drift_score = max(0, 100 - len(drift_issues) * 10)
|
|
24
|
-
resilience_score = max(0, 100 - len(resilience_issues) * 15)
|
|
25
|
-
security_score = max(0, 100 - len(security_issues) * 20)
|
|
26
|
-
|
|
27
|
-
total_score = (drift_score + resilience_score + security_score) / 3
|
|
28
|
-
|
|
29
|
-
# Pass/Fail based on score
|
|
30
|
-
is_compliant = total_score >= 80
|
|
31
|
-
|
|
32
|
-
context = {
|
|
33
|
-
"vendor_name": vendor_name,
|
|
34
|
-
"date": datetime.now().strftime("%Y-%m-%d"),
|
|
35
|
-
"score": round(total_score),
|
|
36
|
-
"is_compliant": is_compliant,
|
|
37
|
-
"results": audit_results
|
|
38
|
-
}
|
|
39
|
-
|
|
40
|
-
# Helper to render findings tables
|
|
41
|
-
def render_findings_table(module_name, findings):
|
|
42
|
-
if not findings:
|
|
43
|
-
return f"<p class='no-issues'>✅ No issues found in {module_name}.</p>"
|
|
44
|
-
|
|
45
|
-
rows = ""
|
|
46
|
-
for f in findings:
|
|
47
|
-
endpoint = f.get('endpoint', 'Global')
|
|
48
|
-
status = f.get('status', 'FAIL')
|
|
49
|
-
|
|
50
|
-
if status == "PASS":
|
|
51
|
-
severity_class = "pass"
|
|
52
|
-
severity_text = "PASS"
|
|
53
|
-
else:
|
|
54
|
-
severity_class = f.get('severity', 'LOW').lower()
|
|
55
|
-
severity_text = f.get('severity')
|
|
56
|
-
|
|
57
|
-
rows += f"""
|
|
58
|
-
<tr>
|
|
59
|
-
<td><span class="badge badge-{severity_class}">{severity_text}</span></td>
|
|
60
|
-
<td><code>{endpoint}</code></td>
|
|
61
|
-
<td><strong>{f.get('issue')}</strong></td>
|
|
62
|
-
<td>{f.get('details')}</td>
|
|
63
|
-
</tr>
|
|
64
|
-
"""
|
|
65
|
-
|
|
66
|
-
return f"""
|
|
67
|
-
<table>
|
|
68
|
-
<thead>
|
|
69
|
-
<tr>
|
|
70
|
-
<th style="width: 10%">Status</th>
|
|
71
|
-
<th style="width: 25%">Endpoint</th>
|
|
72
|
-
<th style="width: 25%">Issue</th>
|
|
73
|
-
<th>Technical Details</th>
|
|
74
|
-
</tr>
|
|
75
|
-
</thead>
|
|
76
|
-
<tbody>
|
|
77
|
-
{rows}
|
|
78
|
-
</tbody>
|
|
79
|
-
</table>
|
|
80
|
-
"""
|
|
81
|
-
|
|
82
|
-
html_content = f"""
|
|
7
|
+
return f"""
|
|
83
8
|
<html>
|
|
84
9
|
<head>
|
|
85
10
|
<style>
|
|
@@ -132,6 +57,10 @@ def generate_report(vendor_name: str, audit_results: dict) -> str:
|
|
|
132
57
|
font-weight: 700;
|
|
133
58
|
color: white;
|
|
134
59
|
text-transform: uppercase;
|
|
60
|
+
font-size: 10px;
|
|
61
|
+
font-weight: 700;
|
|
62
|
+
color: white;
|
|
63
|
+
text-transform: uppercase;
|
|
135
64
|
}}
|
|
136
65
|
.badge-critical {{ background: #ef4444; }}
|
|
137
66
|
.badge-high {{ background: #f97316; }}
|
|
@@ -173,7 +102,7 @@ def generate_report(vendor_name: str, audit_results: dict) -> str:
|
|
|
173
102
|
This check verifies if the actual API implementation adheres to the provided OpenAPI specification.
|
|
174
103
|
Discrepancies here indicate "Schema Drift," which violates DORA requirements for accurate ICT documentation.
|
|
175
104
|
</p>
|
|
176
|
-
{
|
|
105
|
+
{render_findings_table_func("Module A", audit_results['drift_check'])}
|
|
177
106
|
</div>
|
|
178
107
|
|
|
179
108
|
<div class="section">
|
|
@@ -182,7 +111,7 @@ def generate_report(vendor_name: str, audit_results: dict) -> str:
|
|
|
182
111
|
Assesses high-load behavior and error handling (DORA Art. 24 & 25).
|
|
183
112
|
Checks if the system gracefully handles request flooding with appropriate 429 status codes.
|
|
184
113
|
</p>
|
|
185
|
-
{
|
|
114
|
+
{render_findings_table_func("Module B", audit_results['resilience'])}
|
|
186
115
|
</div>
|
|
187
116
|
|
|
188
117
|
<div class="section">
|
|
@@ -190,7 +119,7 @@ def generate_report(vendor_name: str, audit_results: dict) -> str:
|
|
|
190
119
|
<p style="font-size: 13px; color: #64748b; margin-bottom: 15px;">
|
|
191
120
|
Evaluates baseline security controls including TLS encryption and sensitive information leakage in URLs.
|
|
192
121
|
</p>
|
|
193
|
-
{
|
|
122
|
+
{render_findings_table_func("Module C", audit_results['security'])}
|
|
194
123
|
</div>
|
|
195
124
|
|
|
196
125
|
<footer>
|
|
@@ -200,10 +129,3 @@ def generate_report(vendor_name: str, audit_results: dict) -> str:
|
|
|
200
129
|
</body>
|
|
201
130
|
</html>
|
|
202
131
|
"""
|
|
203
|
-
|
|
204
|
-
filename = f"{vendor_name.replace(' ', '_')}_{datetime.now().strftime('%Y%m%d%H%M%S')}.pdf"
|
|
205
|
-
filepath = os.path.join(REPORTS_DIR, filename)
|
|
206
|
-
|
|
207
|
-
HTML(string=html_content).write_pdf(filepath)
|
|
208
|
-
|
|
209
|
-
return filepath
|
pandoraspec/seed.py
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
import re
|
|
2
|
+
import requests
|
|
3
|
+
from typing import Any, Optional
|
|
4
|
+
from .utils.parsing import extract_json_value, extract_regex_value
|
|
5
|
+
from .utils.logger import logger
|
|
6
|
+
|
|
7
|
+
class SeedManager:
|
|
8
|
+
def __init__(self, seed_data: dict[str, Any], base_url: Optional[str] = None, api_key: Optional[str] = None):
|
|
9
|
+
self.seed_data = seed_data
|
|
10
|
+
self.base_url = base_url
|
|
11
|
+
self.api_key = api_key
|
|
12
|
+
self.dynamic_cache = {}
|
|
13
|
+
self._resolving_stack = set() # To detect recursion cycles
|
|
14
|
+
|
|
15
|
+
def _get_seed_config(self, method: str, path: str) -> dict[str, Any]:
|
|
16
|
+
"""Merges seed data for a specific endpoint (General < Verb < Endpoint)"""
|
|
17
|
+
if not self.seed_data:
|
|
18
|
+
return {}
|
|
19
|
+
|
|
20
|
+
is_hierarchical = any(k in self.seed_data for k in ['general', 'verbs', 'endpoints'])
|
|
21
|
+
|
|
22
|
+
if is_hierarchical:
|
|
23
|
+
# 1. General
|
|
24
|
+
merged_data = self.seed_data.get('general', {}).copy()
|
|
25
|
+
# 2. Verb
|
|
26
|
+
verb_data = self.seed_data.get('verbs', {}).get(method.upper(), {})
|
|
27
|
+
merged_data.update(verb_data)
|
|
28
|
+
# 3. Endpoint
|
|
29
|
+
endpoint_data = self.seed_data.get('endpoints', {}).get(path, {}).get(method.upper(), {})
|
|
30
|
+
merged_data.update(endpoint_data)
|
|
31
|
+
else:
|
|
32
|
+
merged_data = self.seed_data.copy()
|
|
33
|
+
|
|
34
|
+
return merged_data
|
|
35
|
+
|
|
36
|
+
def _resolve_dynamic_value(self, config_value: Any) -> Any:
|
|
37
|
+
"""Resolves dynamic seed values with recursion support"""
|
|
38
|
+
if not isinstance(config_value, dict) or "from_endpoint" not in config_value:
|
|
39
|
+
return config_value
|
|
40
|
+
|
|
41
|
+
endpoint_def = config_value["from_endpoint"]
|
|
42
|
+
|
|
43
|
+
# Check cache first
|
|
44
|
+
if endpoint_def in self.dynamic_cache:
|
|
45
|
+
return self.dynamic_cache[endpoint_def]
|
|
46
|
+
|
|
47
|
+
# Cycle detection
|
|
48
|
+
if endpoint_def in self._resolving_stack:
|
|
49
|
+
logger.warning(f"Circular dependency detected for {endpoint_def}. Breaking cycle.")
|
|
50
|
+
return None
|
|
51
|
+
|
|
52
|
+
self._resolving_stack.add(endpoint_def)
|
|
53
|
+
|
|
54
|
+
try:
|
|
55
|
+
try:
|
|
56
|
+
method, path = endpoint_def.split(" ", 1)
|
|
57
|
+
except ValueError:
|
|
58
|
+
logger.warning(f"Invalid endpoint definition '{endpoint_def}'. Expected 'METHOD /path'")
|
|
59
|
+
return None
|
|
60
|
+
|
|
61
|
+
if not self.base_url:
|
|
62
|
+
logger.warning("Cannot resolve dynamic seed, base_url is not set.")
|
|
63
|
+
return None
|
|
64
|
+
|
|
65
|
+
# Recursive Step: Resolve dependencies BEFORE making the request
|
|
66
|
+
# We get the seed config for the *upstream* endpoint we are about to call
|
|
67
|
+
upstream_seed_config = self._get_seed_config(method, path)
|
|
68
|
+
resolved_upstream_params = {}
|
|
69
|
+
|
|
70
|
+
for k, v in upstream_seed_config.items():
|
|
71
|
+
resolved_val = self._resolve_dynamic_value(v)
|
|
72
|
+
if resolved_val is not None:
|
|
73
|
+
resolved_upstream_params[k] = resolved_val
|
|
74
|
+
|
|
75
|
+
# URL Parameter Injection
|
|
76
|
+
# Iterate through resolved params to inject into path (e.g. /users/{id})
|
|
77
|
+
# Also fall back to general seeds if not explicitly resolved above (legacy behavior)
|
|
78
|
+
general_seeds = self.seed_data.get('general', {}) if self.seed_data else {}
|
|
79
|
+
|
|
80
|
+
def replace_param(match):
|
|
81
|
+
param_name = match.group(1)
|
|
82
|
+
# specific resolved param > general seed
|
|
83
|
+
if param_name in resolved_upstream_params:
|
|
84
|
+
return str(resolved_upstream_params[param_name])
|
|
85
|
+
if param_name in general_seeds:
|
|
86
|
+
return str(general_seeds[param_name])
|
|
87
|
+
logger.warning(f"Missing seed value for {{{param_name}}} in dynamic endpoint {endpoint_def}")
|
|
88
|
+
return match.group(0)
|
|
89
|
+
|
|
90
|
+
url_path = re.sub(r"\{([a-zA-Z0-9_]+)\}", replace_param, path)
|
|
91
|
+
url = f"{self.base_url.rstrip('/')}/{url_path.lstrip('/')}"
|
|
92
|
+
|
|
93
|
+
# Prepare Request
|
|
94
|
+
headers = {}
|
|
95
|
+
if self.api_key:
|
|
96
|
+
auth_header = self.api_key if self.api_key.lower().startswith("bearer ") else f"Bearer {self.api_key}"
|
|
97
|
+
headers["Authorization"] = auth_header
|
|
98
|
+
|
|
99
|
+
# Query Params from unused resolved seeds
|
|
100
|
+
query_params = {}
|
|
101
|
+
for k, v in resolved_upstream_params.items():
|
|
102
|
+
# If it wasn't used in the path, put it in query params
|
|
103
|
+
if f"{{{k}}}" not in path:
|
|
104
|
+
query_params[k] = v
|
|
105
|
+
|
|
106
|
+
logger.debug(f"AUDIT LOG: Resolving dynamic seed from {method} {url_path}")
|
|
107
|
+
response = requests.request(method, url, headers=headers, params=query_params)
|
|
108
|
+
|
|
109
|
+
if response.status_code >= 400:
|
|
110
|
+
logger.warning(f"Dynamic seed request failed with {response.status_code}")
|
|
111
|
+
return None
|
|
112
|
+
|
|
113
|
+
result = None
|
|
114
|
+
extract_key = config_value.get("extract")
|
|
115
|
+
regex_pattern = config_value.get("regex")
|
|
116
|
+
|
|
117
|
+
# JSON Extraction
|
|
118
|
+
if extract_key:
|
|
119
|
+
try:
|
|
120
|
+
json_data = response.json()
|
|
121
|
+
result = extract_json_value(json_data, extract_key)
|
|
122
|
+
except Exception:
|
|
123
|
+
logger.warning("Failed to parse JSON for seed extraction")
|
|
124
|
+
else:
|
|
125
|
+
result = response.text
|
|
126
|
+
|
|
127
|
+
# Regex Extraction
|
|
128
|
+
if regex_pattern and result is not None:
|
|
129
|
+
result = extract_regex_value(str(result), regex_pattern)
|
|
130
|
+
|
|
131
|
+
self.dynamic_cache[endpoint_def] = result
|
|
132
|
+
return result
|
|
133
|
+
|
|
134
|
+
except Exception as e:
|
|
135
|
+
logger.error(f"Failed to resolve dynamic seed: {e}")
|
|
136
|
+
return None
|
|
137
|
+
finally:
|
|
138
|
+
self._resolving_stack.discard(endpoint_def)
|
|
139
|
+
|
|
140
|
+
def apply_seed_data(self, case):
|
|
141
|
+
"""Helper to inject seed data into test cases with hierarchy: General < Verbs < Endpoints"""
|
|
142
|
+
if not self.seed_data:
|
|
143
|
+
return set()
|
|
144
|
+
|
|
145
|
+
if hasattr(case, 'operation'):
|
|
146
|
+
method = case.operation.method.upper()
|
|
147
|
+
path = case.operation.path
|
|
148
|
+
merged_data = self._get_seed_config(method, path)
|
|
149
|
+
else:
|
|
150
|
+
merged_data = self._get_seed_config("", "")
|
|
151
|
+
|
|
152
|
+
# Resolve dynamic values for the final merged dataset
|
|
153
|
+
resolved_data = {}
|
|
154
|
+
for k, v in merged_data.items():
|
|
155
|
+
resolved_val = self._resolve_dynamic_value(v)
|
|
156
|
+
if resolved_val is not None:
|
|
157
|
+
resolved_data[k] = resolved_val
|
|
158
|
+
|
|
159
|
+
seeded_keys = set()
|
|
160
|
+
# Inject into Path Parameters (e.g., /users/{userId})
|
|
161
|
+
if hasattr(case, 'path_parameters') and case.path_parameters:
|
|
162
|
+
for key in case.path_parameters:
|
|
163
|
+
if key in resolved_data:
|
|
164
|
+
case.path_parameters[key] = resolved_data[key]
|
|
165
|
+
seeded_keys.add(key)
|
|
166
|
+
|
|
167
|
+
# Inject into Query Parameters (e.g., ?status=active)
|
|
168
|
+
if hasattr(case, 'query') and case.query:
|
|
169
|
+
for key in case.query:
|
|
170
|
+
if key in resolved_data:
|
|
171
|
+
case.query[key] = resolved_data[key]
|
|
172
|
+
seeded_keys.add(key)
|
|
173
|
+
|
|
174
|
+
# Inject into Headers (e.g., X-Tenant-ID)
|
|
175
|
+
if hasattr(case, 'headers') and case.headers:
|
|
176
|
+
for key in case.headers:
|
|
177
|
+
if key in resolved_data:
|
|
178
|
+
case.headers[key] = str(resolved_data[key])
|
|
179
|
+
seeded_keys.add(key)
|
|
180
|
+
|
|
181
|
+
return seeded_keys
|
|
File without changes
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from rich.logging import RichHandler
|
|
3
|
+
|
|
4
|
+
def setup_logger(name: str = "pandoraspec", level: int = logging.INFO) -> logging.Logger:
|
|
5
|
+
"""Configures a rich-enabled logger for the application."""
|
|
6
|
+
logger = logging.getLogger(name)
|
|
7
|
+
|
|
8
|
+
# Avoid adding multiple handlers if setup is called multiple times
|
|
9
|
+
if logger.handlers:
|
|
10
|
+
return logger
|
|
11
|
+
|
|
12
|
+
logger.setLevel(level)
|
|
13
|
+
|
|
14
|
+
handler = RichHandler(rich_tracebacks=True, markup=True, show_time=False)
|
|
15
|
+
handler.setFormatter(logging.Formatter("%(message)s"))
|
|
16
|
+
|
|
17
|
+
logger.addHandler(handler)
|
|
18
|
+
return logger
|
|
19
|
+
|
|
20
|
+
# Singleton instance
|
|
21
|
+
logger = setup_logger()
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
from typing import Any, Optional
|
|
2
|
+
import re
|
|
3
|
+
|
|
4
|
+
def extract_json_value(data: Any, path: str) -> Any:
|
|
5
|
+
"""
|
|
6
|
+
Extracts a value from a nested dictionary or list using dot notation.
|
|
7
|
+
Supports list indices (e.g. 'items.0.id').
|
|
8
|
+
"""
|
|
9
|
+
keys = path.split('.')
|
|
10
|
+
val = data
|
|
11
|
+
for k in keys:
|
|
12
|
+
if isinstance(val, dict):
|
|
13
|
+
val = val.get(k)
|
|
14
|
+
elif isinstance(val, list) and k.isdigit():
|
|
15
|
+
try:
|
|
16
|
+
val = val[int(k)]
|
|
17
|
+
except IndexError:
|
|
18
|
+
val = None
|
|
19
|
+
break
|
|
20
|
+
else:
|
|
21
|
+
val = None
|
|
22
|
+
break
|
|
23
|
+
return val
|
|
24
|
+
|
|
25
|
+
def extract_regex_value(text: str, pattern: str) -> Optional[str]:
|
|
26
|
+
"""
|
|
27
|
+
Extracts a value from text using a regex pattern.
|
|
28
|
+
Returns the first group if present, otherwise the whole match.
|
|
29
|
+
"""
|
|
30
|
+
if not text or not pattern:
|
|
31
|
+
return None
|
|
32
|
+
match = re.search(pattern, str(text))
|
|
33
|
+
if match:
|
|
34
|
+
return match.group(1) if match.groups() else match.group(0)
|
|
35
|
+
return None
|
pandoraspec/utils/url.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
from urllib.parse import urlparse, urlunparse
|
|
3
|
+
|
|
4
|
+
def derive_base_url_from_target(target_url: str) -> Optional[str]:
|
|
5
|
+
"""
|
|
6
|
+
Derives a base URL from a target spec URL by stripping the filename.
|
|
7
|
+
e.g., https://api.com/v1/swagger.json -> https://api.com/v1
|
|
8
|
+
"""
|
|
9
|
+
try:
|
|
10
|
+
if not target_url or not target_url.startswith("http"):
|
|
11
|
+
return None
|
|
12
|
+
|
|
13
|
+
parsed = urlparse(target_url)
|
|
14
|
+
path_parts = parsed.path.split('/')
|
|
15
|
+
|
|
16
|
+
# Simple heuristic: remove the last segment if it looks like a file (has dot)
|
|
17
|
+
if '.' in path_parts[-1]:
|
|
18
|
+
path_parts.pop()
|
|
19
|
+
|
|
20
|
+
new_path = '/'.join(path_parts)
|
|
21
|
+
return urlunparse(parsed._replace(path=new_path))
|
|
22
|
+
except Exception:
|
|
23
|
+
return None
|