gitflow-analytics 1.3.6__py3-none-any.whl → 3.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gitflow_analytics/_version.py +1 -1
- gitflow_analytics/classification/batch_classifier.py +156 -4
- gitflow_analytics/cli.py +897 -179
- gitflow_analytics/config/loader.py +40 -1
- gitflow_analytics/config/schema.py +4 -0
- gitflow_analytics/core/cache.py +20 -0
- gitflow_analytics/core/data_fetcher.py +1254 -228
- gitflow_analytics/core/git_auth.py +169 -0
- gitflow_analytics/core/git_timeout_wrapper.py +347 -0
- gitflow_analytics/core/metrics_storage.py +12 -3
- gitflow_analytics/core/progress.py +219 -18
- gitflow_analytics/core/subprocess_git.py +145 -0
- gitflow_analytics/extractors/ml_tickets.py +3 -2
- gitflow_analytics/extractors/tickets.py +93 -8
- gitflow_analytics/integrations/jira_integration.py +1 -1
- gitflow_analytics/integrations/orchestrator.py +47 -29
- gitflow_analytics/metrics/branch_health.py +3 -2
- gitflow_analytics/models/database.py +72 -1
- gitflow_analytics/pm_framework/adapters/jira_adapter.py +12 -5
- gitflow_analytics/pm_framework/orchestrator.py +8 -3
- gitflow_analytics/qualitative/classifiers/llm/openai_client.py +24 -4
- gitflow_analytics/qualitative/classifiers/llm_commit_classifier.py +3 -1
- gitflow_analytics/qualitative/core/llm_fallback.py +34 -2
- gitflow_analytics/reports/narrative_writer.py +118 -74
- gitflow_analytics/security/__init__.py +11 -0
- gitflow_analytics/security/config.py +189 -0
- gitflow_analytics/security/extractors/__init__.py +7 -0
- gitflow_analytics/security/extractors/dependency_checker.py +379 -0
- gitflow_analytics/security/extractors/secret_detector.py +197 -0
- gitflow_analytics/security/extractors/vulnerability_scanner.py +333 -0
- gitflow_analytics/security/llm_analyzer.py +347 -0
- gitflow_analytics/security/reports/__init__.py +5 -0
- gitflow_analytics/security/reports/security_report.py +358 -0
- gitflow_analytics/security/security_analyzer.py +414 -0
- gitflow_analytics/tui/app.py +3 -1
- gitflow_analytics/tui/progress_adapter.py +313 -0
- gitflow_analytics/tui/screens/analysis_progress_screen.py +407 -46
- gitflow_analytics/tui/screens/results_screen.py +219 -206
- gitflow_analytics/ui/__init__.py +21 -0
- gitflow_analytics/ui/progress_display.py +1477 -0
- gitflow_analytics/verify_activity.py +697 -0
- {gitflow_analytics-1.3.6.dist-info → gitflow_analytics-3.3.0.dist-info}/METADATA +2 -1
- {gitflow_analytics-1.3.6.dist-info → gitflow_analytics-3.3.0.dist-info}/RECORD +47 -31
- gitflow_analytics/cli_rich.py +0 -503
- {gitflow_analytics-1.3.6.dist-info → gitflow_analytics-3.3.0.dist-info}/WHEEL +0 -0
- {gitflow_analytics-1.3.6.dist-info → gitflow_analytics-3.3.0.dist-info}/entry_points.txt +0 -0
- {gitflow_analytics-1.3.6.dist-info → gitflow_analytics-3.3.0.dist-info}/licenses/LICENSE +0 -0
- {gitflow_analytics-1.3.6.dist-info → gitflow_analytics-3.3.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,358 @@
|
|
|
1
|
+
"""Generate security analysis reports."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import csv
|
|
5
|
+
from typing import List, Dict, Any, Optional
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from datetime import datetime
|
|
8
|
+
from ..security_analyzer import SecurityAnalysis
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class SecurityReportGenerator:
|
|
12
|
+
"""Generate various format reports for security findings."""
|
|
13
|
+
|
|
14
|
+
def __init__(self, output_dir: Optional[Path] = None):
|
|
15
|
+
"""Initialize report generator.
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
output_dir: Directory for report output
|
|
19
|
+
"""
|
|
20
|
+
self.output_dir = output_dir or Path("reports")
|
|
21
|
+
self.output_dir.mkdir(parents=True, exist_ok=True)
|
|
22
|
+
|
|
23
|
+
def generate_reports(self, analyses: List[SecurityAnalysis], summary: Dict[str, Any]) -> Dict[str, Path]:
|
|
24
|
+
"""Generate all report formats.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
analyses: List of security analyses
|
|
28
|
+
summary: Summary statistics
|
|
29
|
+
|
|
30
|
+
Returns:
|
|
31
|
+
Dictionary of report type to file path
|
|
32
|
+
"""
|
|
33
|
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
34
|
+
reports = {}
|
|
35
|
+
|
|
36
|
+
# Generate Markdown report
|
|
37
|
+
md_path = self.output_dir / f"security_report_{timestamp}.md"
|
|
38
|
+
self._generate_markdown_report(analyses, summary, md_path)
|
|
39
|
+
reports["markdown"] = md_path
|
|
40
|
+
|
|
41
|
+
# Generate JSON report
|
|
42
|
+
json_path = self.output_dir / f"security_findings_{timestamp}.json"
|
|
43
|
+
self._generate_json_report(analyses, summary, json_path)
|
|
44
|
+
reports["json"] = json_path
|
|
45
|
+
|
|
46
|
+
# Generate CSV report
|
|
47
|
+
csv_path = self.output_dir / f"security_issues_{timestamp}.csv"
|
|
48
|
+
self._generate_csv_report(analyses, csv_path)
|
|
49
|
+
reports["csv"] = csv_path
|
|
50
|
+
|
|
51
|
+
# Generate SARIF report if requested
|
|
52
|
+
if any(a.total_findings > 0 for a in analyses):
|
|
53
|
+
sarif_path = self.output_dir / f"security_sarif_{timestamp}.json"
|
|
54
|
+
self._generate_sarif_report(analyses, sarif_path)
|
|
55
|
+
reports["sarif"] = sarif_path
|
|
56
|
+
|
|
57
|
+
return reports
|
|
58
|
+
|
|
59
|
+
def _generate_markdown_report(self, analyses: List[SecurityAnalysis], summary: Dict, path: Path) -> None:
|
|
60
|
+
"""Generate comprehensive Markdown security report."""
|
|
61
|
+
with open(path, 'w') as f:
|
|
62
|
+
# Header
|
|
63
|
+
f.write("# 🔒 Security Analysis Report\n\n")
|
|
64
|
+
f.write(f"**Generated**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n")
|
|
65
|
+
|
|
66
|
+
# Executive Summary
|
|
67
|
+
f.write("## 📊 Executive Summary\n\n")
|
|
68
|
+
f.write(f"- **Commits Analyzed**: {summary['total_commits']}\n")
|
|
69
|
+
f.write(f"- **Commits with Issues**: {summary['commits_with_issues']}\n")
|
|
70
|
+
f.write(f"- **Total Findings**: {summary['total_findings']}\n")
|
|
71
|
+
f.write(f"- **Risk Level**: **{summary['risk_level']}** (Score: {summary['average_risk_score']})\n\n")
|
|
72
|
+
|
|
73
|
+
# Risk Assessment
|
|
74
|
+
self._write_risk_assessment(f, summary)
|
|
75
|
+
|
|
76
|
+
# Severity Distribution
|
|
77
|
+
f.write("## 🎯 Severity Distribution\n\n")
|
|
78
|
+
severity = summary['severity_distribution']
|
|
79
|
+
if severity['critical'] > 0:
|
|
80
|
+
f.write(f"- 🔴 **Critical**: {severity['critical']}\n")
|
|
81
|
+
if severity['high'] > 0:
|
|
82
|
+
f.write(f"- 🟠 **High**: {severity['high']}\n")
|
|
83
|
+
if severity['medium'] > 0:
|
|
84
|
+
f.write(f"- 🟡 **Medium**: {severity['medium']}\n")
|
|
85
|
+
if severity['low'] > 0:
|
|
86
|
+
f.write(f"- 🟢 **Low**: {severity['low']}\n")
|
|
87
|
+
f.write("\n")
|
|
88
|
+
|
|
89
|
+
# Top Issues
|
|
90
|
+
if summary['top_issues']:
|
|
91
|
+
f.write("## 🔝 Top Security Issues\n\n")
|
|
92
|
+
f.write("| Issue Type | Severity | Occurrences | Affected Files |\n")
|
|
93
|
+
f.write("|------------|----------|-------------|----------------|\n")
|
|
94
|
+
for issue in summary['top_issues']:
|
|
95
|
+
f.write(f"| {issue['type']} | {issue['severity'].upper()} | "
|
|
96
|
+
f"{issue['occurrences']} | {issue['affected_files']} |\n")
|
|
97
|
+
f.write("\n")
|
|
98
|
+
|
|
99
|
+
# Detailed Findings by Category
|
|
100
|
+
self._write_detailed_findings(f, analyses)
|
|
101
|
+
|
|
102
|
+
# LLM Insights
|
|
103
|
+
if 'llm_insights' in summary and summary['llm_insights']:
|
|
104
|
+
f.write("## 🤖 AI Security Insights\n\n")
|
|
105
|
+
f.write(summary['llm_insights'])
|
|
106
|
+
f.write("\n\n")
|
|
107
|
+
|
|
108
|
+
# Recommendations
|
|
109
|
+
f.write("## 💡 Recommendations\n\n")
|
|
110
|
+
for rec in summary['recommendations']:
|
|
111
|
+
f.write(f"- {rec}\n")
|
|
112
|
+
f.write("\n")
|
|
113
|
+
|
|
114
|
+
# Appendix - All Findings
|
|
115
|
+
f.write("## 📋 Detailed Findings\n\n")
|
|
116
|
+
self._write_all_findings(f, analyses)
|
|
117
|
+
|
|
118
|
+
def _write_risk_assessment(self, f, summary: Dict) -> None:
|
|
119
|
+
"""Write risk assessment section."""
|
|
120
|
+
risk_level = summary['risk_level']
|
|
121
|
+
score = summary['average_risk_score']
|
|
122
|
+
|
|
123
|
+
f.write("## ⚠️ Risk Assessment\n\n")
|
|
124
|
+
|
|
125
|
+
if risk_level == "CRITICAL":
|
|
126
|
+
f.write("### 🚨 CRITICAL RISK DETECTED\n\n")
|
|
127
|
+
f.write("Immediate action required. Critical security vulnerabilities have been identified "
|
|
128
|
+
"that could lead to severe security breaches.\n\n")
|
|
129
|
+
elif risk_level == "HIGH":
|
|
130
|
+
f.write("### 🔴 High Risk\n\n")
|
|
131
|
+
f.write("Significant security issues detected that should be addressed urgently.\n\n")
|
|
132
|
+
elif risk_level == "MEDIUM":
|
|
133
|
+
f.write("### 🟡 Medium Risk\n\n")
|
|
134
|
+
f.write("Moderate security concerns identified that should be addressed in the near term.\n\n")
|
|
135
|
+
else:
|
|
136
|
+
f.write("### 🟢 Low Risk\n\n")
|
|
137
|
+
f.write("Minor security issues detected. Continue with regular security practices.\n\n")
|
|
138
|
+
|
|
139
|
+
# Risk score visualization
|
|
140
|
+
f.write("**Risk Score Breakdown**:\n")
|
|
141
|
+
f.write("```\n")
|
|
142
|
+
bar_length = 50
|
|
143
|
+
filled = int(score / 100 * bar_length)
|
|
144
|
+
bar = "█" * filled + "░" * (bar_length - filled)
|
|
145
|
+
f.write(f"[{bar}] {score:.1f}/100\n")
|
|
146
|
+
f.write("```\n\n")
|
|
147
|
+
|
|
148
|
+
def _write_detailed_findings(self, f, analyses: List[SecurityAnalysis]) -> None:
|
|
149
|
+
"""Write detailed findings by category."""
|
|
150
|
+
# Aggregate findings
|
|
151
|
+
all_secrets = []
|
|
152
|
+
all_vulnerabilities = []
|
|
153
|
+
all_dependencies = []
|
|
154
|
+
all_llm = []
|
|
155
|
+
|
|
156
|
+
for analysis in analyses:
|
|
157
|
+
all_secrets.extend(analysis.secrets)
|
|
158
|
+
all_vulnerabilities.extend(analysis.vulnerabilities)
|
|
159
|
+
all_dependencies.extend(analysis.dependency_issues)
|
|
160
|
+
all_llm.extend(analysis.llm_findings)
|
|
161
|
+
|
|
162
|
+
# Secrets Section
|
|
163
|
+
if all_secrets:
|
|
164
|
+
f.write("## 🔑 Exposed Secrets\n\n")
|
|
165
|
+
f.write(f"**Total**: {len(all_secrets)} potential secrets detected\n\n")
|
|
166
|
+
|
|
167
|
+
# Group by secret type
|
|
168
|
+
by_type = {}
|
|
169
|
+
for secret in all_secrets:
|
|
170
|
+
secret_type = secret.get('secret_type', 'unknown')
|
|
171
|
+
if secret_type not in by_type:
|
|
172
|
+
by_type[secret_type] = []
|
|
173
|
+
by_type[secret_type].append(secret)
|
|
174
|
+
|
|
175
|
+
for secret_type, secrets in sorted(by_type.items()):
|
|
176
|
+
f.write(f"### {secret_type.replace('_', ' ').title()}\n")
|
|
177
|
+
for s in secrets[:5]: # Show first 5 of each type
|
|
178
|
+
f.write(f"- **File**: `{s.get('file', 'unknown')}`\n")
|
|
179
|
+
f.write(f" - Line: {s.get('line', 'N/A')}\n")
|
|
180
|
+
f.write(f" - Pattern: `{s.get('match', 'N/A')}`\n")
|
|
181
|
+
if len(secrets) > 5:
|
|
182
|
+
f.write(f" - *... and {len(secrets) - 5} more*\n")
|
|
183
|
+
f.write("\n")
|
|
184
|
+
|
|
185
|
+
# Vulnerabilities Section
|
|
186
|
+
if all_vulnerabilities:
|
|
187
|
+
f.write("## 🛡️ Code Vulnerabilities\n\n")
|
|
188
|
+
f.write(f"**Total**: {len(all_vulnerabilities)} vulnerabilities detected\n\n")
|
|
189
|
+
|
|
190
|
+
# Group by vulnerability type
|
|
191
|
+
by_type = {}
|
|
192
|
+
for vuln in all_vulnerabilities:
|
|
193
|
+
vuln_type = vuln.get('vulnerability_type', 'unknown')
|
|
194
|
+
if vuln_type not in by_type:
|
|
195
|
+
by_type[vuln_type] = []
|
|
196
|
+
by_type[vuln_type].append(vuln)
|
|
197
|
+
|
|
198
|
+
for vuln_type, vulns in sorted(by_type.items()):
|
|
199
|
+
f.write(f"### {vuln_type.replace('_', ' ').title()}\n")
|
|
200
|
+
for v in vulns[:5]:
|
|
201
|
+
f.write(f"- **File**: `{v.get('file', 'unknown')}:{v.get('line', 'N/A')}`\n")
|
|
202
|
+
f.write(f" - Tool: {v.get('tool', 'N/A')}\n")
|
|
203
|
+
f.write(f" - Message: {v.get('message', 'N/A')}\n")
|
|
204
|
+
if len(vulns) > 5:
|
|
205
|
+
f.write(f" - *... and {len(vulns) - 5} more*\n")
|
|
206
|
+
f.write("\n")
|
|
207
|
+
|
|
208
|
+
# Dependencies Section
|
|
209
|
+
if all_dependencies:
|
|
210
|
+
f.write("## 📦 Vulnerable Dependencies\n\n")
|
|
211
|
+
f.write(f"**Total**: {len(all_dependencies)} vulnerable dependencies\n\n")
|
|
212
|
+
|
|
213
|
+
for dep in all_dependencies[:10]:
|
|
214
|
+
f.write(f"- **{dep.get('package', 'unknown')}** @ {dep.get('version', 'unknown')}\n")
|
|
215
|
+
f.write(f" - File: `{dep.get('file', 'unknown')}`\n")
|
|
216
|
+
if dep.get('cve'):
|
|
217
|
+
f.write(f" - CVE: {dep['cve']}\n")
|
|
218
|
+
f.write(f" - Message: {dep.get('message', 'N/A')}\n")
|
|
219
|
+
if len(all_dependencies) > 10:
|
|
220
|
+
f.write(f"\n*... and {len(all_dependencies) - 10} more vulnerable dependencies*\n")
|
|
221
|
+
f.write("\n")
|
|
222
|
+
|
|
223
|
+
def _write_all_findings(self, f, analyses: List[SecurityAnalysis]) -> None:
|
|
224
|
+
"""Write all findings in detail."""
|
|
225
|
+
for analysis in analyses:
|
|
226
|
+
if analysis.total_findings == 0:
|
|
227
|
+
continue
|
|
228
|
+
|
|
229
|
+
f.write(f"### Commit: `{analysis.commit_hash[:8]}`\n")
|
|
230
|
+
f.write(f"**Time**: {analysis.timestamp.strftime('%Y-%m-%d %H:%M:%S')}\n")
|
|
231
|
+
f.write(f"**Files Changed**: {len(analysis.files_changed)}\n")
|
|
232
|
+
f.write(f"**Risk Score**: {analysis.risk_score:.1f}\n\n")
|
|
233
|
+
|
|
234
|
+
if analysis.secrets:
|
|
235
|
+
f.write("**Secrets**:\n")
|
|
236
|
+
for s in analysis.secrets:
|
|
237
|
+
f.write(f"- {s.get('secret_type', 'unknown')}: {s.get('file', 'N/A')}\n")
|
|
238
|
+
|
|
239
|
+
if analysis.vulnerabilities:
|
|
240
|
+
f.write("**Vulnerabilities**:\n")
|
|
241
|
+
for v in analysis.vulnerabilities:
|
|
242
|
+
f.write(f"- {v.get('vulnerability_type', 'unknown')}: {v.get('file', 'N/A')}\n")
|
|
243
|
+
|
|
244
|
+
f.write("\n---\n\n")
|
|
245
|
+
|
|
246
|
+
def _generate_json_report(self, analyses: List[SecurityAnalysis], summary: Dict, path: Path) -> None:
|
|
247
|
+
"""Generate JSON report with all findings."""
|
|
248
|
+
report = {
|
|
249
|
+
"metadata": {
|
|
250
|
+
"generated": datetime.now().isoformat(),
|
|
251
|
+
"version": "1.0.0"
|
|
252
|
+
},
|
|
253
|
+
"summary": summary,
|
|
254
|
+
"analyses": []
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
for analysis in analyses:
|
|
258
|
+
report["analyses"].append({
|
|
259
|
+
"commit_hash": analysis.commit_hash,
|
|
260
|
+
"timestamp": analysis.timestamp.isoformat(),
|
|
261
|
+
"files_changed": analysis.files_changed,
|
|
262
|
+
"risk_score": analysis.risk_score,
|
|
263
|
+
"findings": {
|
|
264
|
+
"secrets": analysis.secrets,
|
|
265
|
+
"vulnerabilities": analysis.vulnerabilities,
|
|
266
|
+
"dependency_issues": analysis.dependency_issues,
|
|
267
|
+
"llm_findings": analysis.llm_findings
|
|
268
|
+
},
|
|
269
|
+
"metrics": {
|
|
270
|
+
"total": analysis.total_findings,
|
|
271
|
+
"critical": analysis.critical_count,
|
|
272
|
+
"high": analysis.high_count,
|
|
273
|
+
"medium": analysis.medium_count,
|
|
274
|
+
"low": analysis.low_count
|
|
275
|
+
}
|
|
276
|
+
})
|
|
277
|
+
|
|
278
|
+
with open(path, 'w') as f:
|
|
279
|
+
json.dump(report, f, indent=2)
|
|
280
|
+
|
|
281
|
+
def _generate_csv_report(self, analyses: List[SecurityAnalysis], path: Path) -> None:
|
|
282
|
+
"""Generate CSV report of all findings."""
|
|
283
|
+
with open(path, 'w', newline='') as f:
|
|
284
|
+
writer = csv.DictWriter(f, fieldnames=[
|
|
285
|
+
'commit_hash', 'timestamp', 'type', 'severity',
|
|
286
|
+
'category', 'file', 'line', 'message', 'tool', 'confidence'
|
|
287
|
+
])
|
|
288
|
+
writer.writeheader()
|
|
289
|
+
|
|
290
|
+
for analysis in analyses:
|
|
291
|
+
# Write all findings
|
|
292
|
+
for finding in (analysis.secrets + analysis.vulnerabilities +
|
|
293
|
+
analysis.dependency_issues + analysis.llm_findings):
|
|
294
|
+
writer.writerow({
|
|
295
|
+
'commit_hash': analysis.commit_hash[:8],
|
|
296
|
+
'timestamp': analysis.timestamp.isoformat(),
|
|
297
|
+
'type': finding.get('type', 'unknown'),
|
|
298
|
+
'severity': finding.get('severity', 'medium'),
|
|
299
|
+
'category': finding.get('vulnerability_type',
|
|
300
|
+
finding.get('secret_type', 'unknown')),
|
|
301
|
+
'file': finding.get('file', ''),
|
|
302
|
+
'line': finding.get('line', ''),
|
|
303
|
+
'message': finding.get('message', ''),
|
|
304
|
+
'tool': finding.get('tool', finding.get('source', '')),
|
|
305
|
+
'confidence': finding.get('confidence', '')
|
|
306
|
+
})
|
|
307
|
+
|
|
308
|
+
def _generate_sarif_report(self, analyses: List[SecurityAnalysis], path: Path) -> None:
|
|
309
|
+
"""Generate SARIF format report for GitHub Security tab integration."""
|
|
310
|
+
sarif = {
|
|
311
|
+
"$schema": "https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json",
|
|
312
|
+
"version": "2.1.0",
|
|
313
|
+
"runs": [{
|
|
314
|
+
"tool": {
|
|
315
|
+
"driver": {
|
|
316
|
+
"name": "GitFlow Analytics Security",
|
|
317
|
+
"version": "1.0.0",
|
|
318
|
+
"informationUri": "https://github.com/yourusername/gitflow-analytics"
|
|
319
|
+
}
|
|
320
|
+
},
|
|
321
|
+
"results": []
|
|
322
|
+
}]
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
for analysis in analyses:
|
|
326
|
+
for finding in (analysis.secrets + analysis.vulnerabilities):
|
|
327
|
+
result = {
|
|
328
|
+
"ruleId": finding.get('vulnerability_type',
|
|
329
|
+
finding.get('secret_type', 'unknown')),
|
|
330
|
+
"level": self._severity_to_sarif_level(finding.get('severity', 'medium')),
|
|
331
|
+
"message": {
|
|
332
|
+
"text": finding.get('message', 'Security issue detected')
|
|
333
|
+
},
|
|
334
|
+
"locations": [{
|
|
335
|
+
"physicalLocation": {
|
|
336
|
+
"artifactLocation": {
|
|
337
|
+
"uri": finding.get('file', 'unknown')
|
|
338
|
+
},
|
|
339
|
+
"region": {
|
|
340
|
+
"startLine": finding.get('line', 1)
|
|
341
|
+
}
|
|
342
|
+
}
|
|
343
|
+
}]
|
|
344
|
+
}
|
|
345
|
+
sarif["runs"][0]["results"].append(result)
|
|
346
|
+
|
|
347
|
+
with open(path, 'w') as f:
|
|
348
|
+
json.dump(sarif, f, indent=2)
|
|
349
|
+
|
|
350
|
+
def _severity_to_sarif_level(self, severity: str) -> str:
|
|
351
|
+
"""Convert severity to SARIF level."""
|
|
352
|
+
mapping = {
|
|
353
|
+
"critical": "error",
|
|
354
|
+
"high": "error",
|
|
355
|
+
"medium": "warning",
|
|
356
|
+
"low": "note"
|
|
357
|
+
}
|
|
358
|
+
return mapping.get(severity.lower(), "warning")
|