devsecops-radar 0.2.5__tar.gz → 0.2.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. {devsecops_radar-0.2.5/devsecops_radar.egg-info → devsecops_radar-0.2.6}/PKG-INFO +2 -1
  2. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/cli/scanner.py +11 -2
  3. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/core/analyzer.py +37 -14
  4. devsecops_radar-0.2.6/devsecops_radar/core/attack_simulation.py +22 -0
  5. devsecops_radar-0.2.6/devsecops_radar/core/database.py +104 -0
  6. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/core/models.py +42 -15
  7. devsecops_radar-0.2.6/devsecops_radar/core/rag.py +21 -0
  8. devsecops_radar-0.2.6/devsecops_radar/core/reporting.py +71 -0
  9. devsecops_radar-0.2.6/devsecops_radar/core/sbom.py +37 -0
  10. devsecops_radar-0.2.6/devsecops_radar/core/valuation.py +20 -0
  11. devsecops_radar-0.2.6/devsecops_radar/scanners/adapter.py +15 -0
  12. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/web/dashboard/routes.py +15 -5
  13. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6/devsecops_radar.egg-info}/PKG-INFO +2 -1
  14. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar.egg-info/SOURCES.txt +3 -0
  15. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar.egg-info/requires.txt +1 -0
  16. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/pyproject.toml +2 -1
  17. devsecops_radar-0.2.5/devsecops_radar/core/database.py +0 -92
  18. devsecops_radar-0.2.5/devsecops_radar/core/reporting.py +0 -55
  19. devsecops_radar-0.2.5/devsecops_radar/core/sbom.py +0 -27
  20. devsecops_radar-0.2.5/devsecops_radar/core/valuation.py +0 -10
  21. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/LICENSE +0 -0
  22. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/MANIFEST.in +0 -0
  23. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/README.md +0 -0
  24. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/__init__.py +0 -0
  25. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/cli/__init__.py +0 -0
  26. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/core/__init__.py +0 -0
  27. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/core/parser.py +0 -0
  28. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/core/remediation.py +0 -0
  29. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/core/rule_fusion.py +0 -0
  30. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/plugins/__init__.py +0 -0
  31. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/scanners/base.py +0 -0
  32. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/scanners/poutine.py +0 -0
  33. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/scanners/semgrep.py +0 -0
  34. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/scanners/trivy.py +0 -0
  35. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/scanners/zizmor.py +0 -0
  36. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/web/__init__.py +0 -0
  37. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/web/app.py +0 -0
  38. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/web/attack_paths/__init__.py +0 -0
  39. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/web/attack_paths/routes.py +0 -0
  40. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/web/dashboard/__init__.py +0 -0
  41. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/web/sentry/routes.py +0 -0
  42. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/web/static/css/bootstrap.min.css +0 -0
  43. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/web/static/css/style.css +0 -0
  44. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/web/static/js/bootstrap.bundle.min.js +0 -0
  45. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/web/static/js/chart.umd.min.js +0 -0
  46. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/web/static/js/dashboard.js +0 -0
  47. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/web/summary/__init__.py +0 -0
  48. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/web/templates/index.html +0 -0
  49. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/web/topology/__init__.py +0 -0
  50. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar/web/topology/routes.py +0 -0
  51. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar.egg-info/dependency_links.txt +0 -0
  52. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar.egg-info/entry_points.txt +0 -0
  53. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/devsecops_radar.egg-info/top_level.txt +0 -0
  54. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/setup.cfg +0 -0
  55. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/tests/test_cli.py +0 -0
  56. {devsecops_radar-0.2.5 → devsecops_radar-0.2.6}/tests/test_scanners.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: devsecops-radar
3
- Version: 0.2.5
3
+ Version: 0.2.6
4
4
  Summary: Unified CI/CD Security Dashboard — Pipeline Sentinel
5
5
  Author-email: Mehrdoost <70381337+Mehrdoost@users.noreply.github.com>
6
6
  License-Expression: MIT
@@ -21,6 +21,7 @@ Requires-Dist: loguru>=0.7
21
21
  Requires-Dist: reportlab>=4.0
22
22
  Requires-Dist: litellm>=1.50
23
23
  Requires-Dist: sqlalchemy>=2.0
24
+ Requires-Dist: pydantic>=2.0
24
25
  Dynamic: license-file
25
26
 
26
27
  <!-- markdownlint-disable MD033 MD041 -->
@@ -4,11 +4,13 @@ import os
4
4
  import sys
5
5
  from importlib.metadata import entry_points
6
6
  from loguru import logger
7
+ from devsecops_radar.scanners.adapter import ScannerAdapter
7
8
  from devsecops_radar.core.analyzer import get_analyzer
8
9
  from devsecops_radar.core.database import save_scan
9
10
  from devsecops_radar.core.rule_fusion import RuleFusion
10
11
  from devsecops_radar.core.remediation import auto_fix, generate_pr
11
12
  from devsecops_radar.core.reporting import generate_pdf_report
13
+ from devsecops_radar.core.valuation import compute_dynamic_risk_score
12
14
 
13
15
  def discover_plugins():
14
16
  plugins = {}
@@ -47,13 +49,15 @@ def run_scans(args, plugins):
47
49
  if target:
48
50
  scanner = plugins.get(name)
49
51
  if scanner:
52
+ adapter = ScannerAdapter(scanner)
50
53
  try:
51
54
  if os.path.isfile(target):
52
55
  logger.info(f"Parsing {name} JSON file: {target}")
53
- all_findings.extend(scanner.parse(target))
56
+ validated = adapter.parse(target)
54
57
  else:
55
58
  logger.info(f"Running {name} on: {target}")
56
- all_findings.extend(scanner.run(target))
59
+ validated = adapter.run(target)
60
+ all_findings.extend([v.dict() for v in validated])
57
61
  except Exception as e:
58
62
  logger.error(f"{name} failed: {e}")
59
63
  return all_findings
@@ -121,6 +125,11 @@ def main():
121
125
 
122
126
  ai_summary = run_analysis(args, findings, topology)
123
127
 
128
+ # Compute dynamic risk scores
129
+ if findings and topology:
130
+ for f in findings:
131
+ f['dynamic_risk_score'] = compute_dynamic_risk_score(f, topology)
132
+
124
133
  if args.fix and ai_summary:
125
134
  fixed = auto_fix(findings, ai_summary)
126
135
  if fixed:
@@ -2,28 +2,50 @@ import json
2
2
  import os
3
3
  import re
4
4
  import requests
5
+ from requests.adapters import HTTPAdapter
6
+ from urllib3.util.retry import Retry
5
7
  from typing import List, Dict, Any, Optional
6
8
 
9
+ # --- Retry logic for LLM calls ---
10
+ def _session_with_retries(total=3, backoff_factor=0.5, status_forcelist=[429, 500, 502, 503, 504]):
11
+ session = requests.Session()
12
+ retries = Retry(
13
+ total=total,
14
+ backoff_factor=backoff_factor,
15
+ status_forcelist=status_forcelist,
16
+ allowed_methods=["POST"]
17
+ )
18
+ adapter = HTTPAdapter(max_retries=retries)
19
+ session.mount('http://', adapter)
20
+ session.mount('https://', adapter)
21
+ return session
22
+
23
+ # Default maximum findings sent to LLM (configurable via env)
24
+ MAX_ANALYZER_FINDINGS = int(os.environ.get("ANALYZER_MAX_FINDINGS", "100"))
25
+
7
26
  FEW_SHOT_EXAMPLE = {
8
- "executive_summary": "The pipeline shows a critical vulnerability in the web server...",
9
- "risk_score": 85,
27
+ "executive_summary": "A leaked CI/CD credential combined with an unpatched container image creates a critical supply chain attack path. Immediate action is required.",
28
+ "risk_score": 92,
10
29
  "attack_paths": [
11
30
  {
12
- "name": "Example Attack Path",
13
- "description": "...",
14
- "involved_findings": ["CVE-2026-1234"],
15
- "mitre_tactics": ["TA0001"],
16
- "mitre_techniques": ["T1190"]
31
+ "name": "Supply Chain Compromise via Credential Leak",
32
+ "description": "An exposed GitHub Actions secret (ID: SECRET-001) allows an attacker to push malicious images to the container registry. Combined with a known RCE vulnerability in the web server (CVE-2026-1234), this chain grants full control over the production environment.",
33
+ "involved_findings": ["SECRET-001", "CVE-2026-1234"],
34
+ "mitre_tactics": ["TA0001", "TA0042"],
35
+ "mitre_techniques": ["T1078", "T1578"],
36
+ "potential_impact": "Full compromise of production services",
37
+ "difficulty": "medium"
17
38
  }
18
39
  ],
19
40
  "top_remediations": [
20
41
  {
21
42
  "priority": 1,
22
- "finding_id": "CVE-2026-1234",
23
- "action": "Upgrade package X to version Y",
24
- "fix_diff": "--- a/requirements.txt\n+++ b/requirements.txt\n-package==1.0\n+package==1.1"
43
+ "finding_id": "SECRET-001",
44
+ "action": "Rotate the exposed secret and remove it from the workflow log. Use GitHub's masked variables.",
45
+ "fix_diff": "--- a/.github/workflows/deploy.yml\n+++ b/.github/workflows/deploy.yml\n- run: echo ${{ secrets.DEPLOY_KEY }}\n+ run: echo '**redacted**'"
25
46
  }
26
- ]
47
+ ],
48
+ "false_positives_likely": []
27
49
  }
28
50
 
29
51
  class BaseAnalyzer:
@@ -42,7 +64,7 @@ def extract_json(text: str) -> Dict[str, Any]:
42
64
  pass
43
65
  return {"executive_summary": text, "attack_paths": [], "top_remediations": []}
44
66
 
45
- def select_findings_for_llm(findings: List[Dict], max_items: int = 100) -> List[Dict]:
67
+ def select_findings_for_llm(findings: List[Dict], max_items: int = MAX_ANALYZER_FINDINGS) -> List[Dict]:
46
68
  if len(findings) <= max_items:
47
69
  return findings
48
70
  critical_high = [f for f in findings if f.get('severity') in ('CRITICAL', 'HIGH')]
@@ -57,6 +79,7 @@ class OllamaAnalyzer(BaseAnalyzer):
57
79
  def __init__(self, model: str = None, endpoint: str = None):
58
80
  self.model = model or os.environ.get("PIPELINE_LLM_MODEL", "llama3.2:latest")
59
81
  self.endpoint = endpoint or os.environ.get("OPENAI_API_BASE", "http://localhost:11434/api/generate")
82
+ self.session = _session_with_retries()
60
83
 
61
84
  def analyze(self, findings: List[Dict[str, Any]], topology: Dict[str, Any] = None) -> Dict[str, Any]:
62
85
  if not findings:
@@ -71,7 +94,7 @@ class OllamaAnalyzer(BaseAnalyzer):
71
94
  Example output structure:
72
95
  {json.dumps(FEW_SHOT_EXAMPLE, indent=2)}
73
96
 
74
- IMPORTANT: Each remediation must reference the exact 'id' of the finding.
97
+ IMPORTANT: Each remediation must reference the exact 'id' of the finding. Identify multi-step attack chains.
75
98
 
76
99
  Findings:
77
100
  {json.dumps(selected, indent=2)}
@@ -80,7 +103,7 @@ Findings:
80
103
  Respond ONLY with valid JSON in the same format as the example."""
81
104
 
82
105
  try:
83
- resp = requests.post(
106
+ resp = self.session.post(
84
107
  self.endpoint,
85
108
  json={"model": self.model, "prompt": prompt, "stream": False, "format": "json"},
86
109
  timeout=180
@@ -0,0 +1,22 @@
1
+ import subprocess
2
+ import tempfile
3
+ import os
4
+
5
+ def simulate_attack(finding: dict) -> str:
6
+ script = f"#!/bin/bash\n# PoC for {finding.get('id')}\necho 'Simulating {finding.get('title')}'"
7
+ tmpdir = tempfile.mkdtemp()
8
+ script_path = os.path.join(tmpdir, "poc.sh")
9
+ with open(script_path, 'w') as f:
10
+ f.write(script)
11
+ os.chmod(script_path, 0o700)
12
+ return script_path
13
+
14
+ def run_sandboxed_poc(script_path: str) -> str:
15
+ try:
16
+ result = subprocess.run(
17
+ ['docker', 'run', '--rm', '-v', f'{script_path}:/poc.sh:ro', 'alpine', 'sh', '/poc.sh'],
18
+ capture_output=True, text=True, timeout=30
19
+ )
20
+ return result.stdout
21
+ except Exception as e:
22
+ return f"Sandbox execution failed: {e}"
@@ -0,0 +1,104 @@
1
+ from contextlib import contextmanager
2
+ from devsecops_radar.core.models import (
3
+ init_db, SessionLocal, Scan, Finding
4
+ )
5
+ from typing import List, Dict, Any, Optional
6
+
7
+ @contextmanager
8
+ def get_session():
9
+ session = SessionLocal()
10
+ try:
11
+ yield session
12
+ session.commit()
13
+ except:
14
+ session.rollback()
15
+ raise
16
+ finally:
17
+ session.close()
18
+
19
+ def save_scan(findings: List[Dict[str, Any]]):
20
+ from devsecops_radar.core.models import save_scan_to_db
21
+ save_scan_to_db(findings)
22
+
23
+ def get_all_scans() -> List[Dict[str, Any]]:
24
+ init_db()
25
+ with get_session() as session:
26
+ scans = []
27
+ for scan in session.query(Scan).order_by(Scan.timestamp.asc()).all():
28
+ findings = session.query(Finding).filter(Finding.scan_id == scan.id).all()
29
+ counts = {"CRITICAL": 0, "HIGH": 0, "MEDIUM": 0, "LOW": 0}
30
+ for f in findings:
31
+ sev = f.severity.upper() if f.severity else "UNKNOWN"
32
+ counts[sev] = counts.get(sev, 0) + 1
33
+ scans.append({
34
+ "id": scan.id,
35
+ "timestamp": scan.timestamp.isoformat(),
36
+ "total": len(findings),
37
+ "critical": counts["CRITICAL"],
38
+ "high": counts["HIGH"],
39
+ "medium": counts["MEDIUM"],
40
+ "low": counts["LOW"],
41
+ })
42
+ return scans
43
+
44
+ def get_scan_by_id(scan_id: int) -> Optional[Dict[str, Any]]:
45
+ with get_session() as session:
46
+ scan = session.query(Scan).filter(Scan.id == scan_id).first()
47
+ if not scan:
48
+ return None
49
+ findings = session.query(Finding).filter(Finding.scan_id == scan_id).all()
50
+ findings_list = []
51
+ for f in findings:
52
+ findings_list.append({
53
+ "tool": f.tool,
54
+ "id": f.id,
55
+ "severity": f.severity,
56
+ "target": f.target,
57
+ "title": f.title,
58
+ "description": f.description,
59
+ "line": f.line
60
+ })
61
+ return {
62
+ "id": scan.id,
63
+ "timestamp": scan.timestamp.isoformat(),
64
+ "findings": findings_list,
65
+ "total": len(findings_list)
66
+ }
67
+
68
+ def compare_scans(scan_id_1: int, scan_id_2: int) -> Dict[str, Any]:
69
+ scan1 = get_scan_by_id(scan_id_1)
70
+ scan2 = get_scan_by_id(scan_id_2)
71
+ if not scan1 or not scan2:
72
+ return {"error": "One or both scans not found"}
73
+ ids1 = {f.get("id") for f in scan1["findings"]}
74
+ ids2 = {f.get("id") for f in scan2["findings"]}
75
+ added = [f for f in scan2["findings"] if f.get("id") not in ids1]
76
+ removed = [f for f in scan1["findings"] if f.get("id") not in ids2]
77
+ return {
78
+ "scan1": {"id": scan1["id"], "timestamp": scan1["timestamp"], "total": scan1["total"]},
79
+ "scan2": {"id": scan2["id"], "timestamp": scan2["timestamp"], "total": scan2["total"]},
80
+ "added": len(added),
81
+ "removed": len(removed),
82
+ "unchanged": len(scan1["findings"]) - len(removed),
83
+ "added_findings": added,
84
+ "removed_findings": removed,
85
+ }
86
+
87
+ def get_findings_paginated(page: int = 1, per_page: int = 50) -> Dict[str, Any]:
88
+ with get_session() as session:
89
+ total = session.query(Finding).count()
90
+ findings = session.query(Finding).order_by(Finding.id.desc()).offset(
91
+ (page - 1) * per_page
92
+ ).limit(per_page).all()
93
+ items = []
94
+ for f in findings:
95
+ items.append({
96
+ "tool": f.tool,
97
+ "id": f.id,
98
+ "severity": f.severity,
99
+ "target": f.target,
100
+ "title": f.title,
101
+ "description": f.description,
102
+ "line": f.line
103
+ })
104
+ return {"items": items, "total": total, "page": page, "per_page": per_page}
@@ -1,10 +1,30 @@
1
1
  from sqlalchemy import create_engine, Column, Integer, String, DateTime, JSON, ForeignKey
2
2
  from sqlalchemy.orm import declarative_base, sessionmaker, relationship
3
+ from pydantic import BaseModel, Field, validator
4
+ from typing import List, Optional
3
5
  import datetime
4
6
  import os
5
7
 
6
8
  Base = declarative_base()
7
9
 
10
+ class FindingSchema(BaseModel):
11
+ tool: str
12
+ id: str
13
+ severity: str
14
+ target: str
15
+ title: str
16
+ description: Optional[str] = ""
17
+ line: Optional[int] = None
18
+
19
+ @validator('severity')
20
+ def severity_upper(cls, v):
21
+ return v.upper()
22
+
23
+ class ScanMetadata(BaseModel):
24
+ findings: List[FindingSchema]
25
+ scan_id: Optional[int] = None
26
+ timestamp: Optional[str] = None
27
+
8
28
  class Scan(Base):
9
29
  __tablename__ = 'scans'
10
30
  id = Column(Integer, primary_key=True)
@@ -32,20 +52,27 @@ def init_db():
32
52
  Base.metadata.create_all(engine)
33
53
 
34
54
  def save_scan_to_db(findings: list):
55
+ # Validate with Pydantic before storing
56
+ validated = [FindingSchema(**f) for f in findings]
35
57
  init_db()
36
58
  session = SessionLocal()
37
- scan = Scan()
38
- session.add(scan)
39
- for f in findings:
40
- finding = Finding(
41
- scan_id=scan.id,
42
- tool=f.get('tool'),
43
- severity=f.get('severity'),
44
- target=f.get('target'),
45
- title=f.get('title'),
46
- description=f.get('description'),
47
- line=f.get('line')
48
- )
49
- session.add(finding)
50
- session.commit()
51
- session.close()
59
+ try:
60
+ scan = Scan()
61
+ session.add(scan)
62
+ for f in validated:
63
+ finding = Finding(
64
+ scan_id=scan.id,
65
+ tool=f.tool,
66
+ severity=f.severity,
67
+ target=f.target,
68
+ title=f.title,
69
+ description=f.description,
70
+ line=f.line
71
+ )
72
+ session.add(finding)
73
+ session.commit()
74
+ except Exception:
75
+ session.rollback()
76
+ raise
77
+ finally:
78
+ session.close()
@@ -0,0 +1,21 @@
1
+ from devsecops_radar.core.models import SessionLocal, Finding
2
+ from typing import List, Dict, Any
3
+
4
+ def rag_search(query: str, limit: int = 5) -> List[Dict[str, Any]]:
5
+ session = SessionLocal()
6
+ results = session.query(Finding).filter(
7
+ Finding.title.ilike(f'%{query}%') | Finding.description.ilike(f'%{query}%')
8
+ ).order_by(Finding.id.desc()).limit(limit).all()
9
+ findings = []
10
+ for f in results:
11
+ findings.append({
12
+ "tool": f.tool,
13
+ "id": f.id,
14
+ "severity": f.severity,
15
+ "target": f.target,
16
+ "title": f.title,
17
+ "description": f.description,
18
+ "line": f.line
19
+ })
20
+ session.close()
21
+ return findings
@@ -0,0 +1,71 @@
1
+ import re
2
+ import datetime
3
+ from reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph
4
+ from reportlab.lib.pagesizes import A4
5
+ from reportlab.lib.styles import getSampleStyleSheet
6
+ from reportlab.lib import colors
7
+ from typing import List, Dict, Any
8
+
9
+ def redact_sensitive(text: str, patterns: List[str] = None) -> str:
10
+ if patterns is None:
11
+ patterns = [
12
+ r'(?i)(password|secret|token|key)\s*[:=]\s*\S+',
13
+ r'ghp_[a-zA-Z0-9]{36}',
14
+ r'eyJ[a-zA-Z0-9\-_]+\.[a-zA-Z0-9\-_]+\.[a-zA-Z0-9\-_]+'
15
+ ]
16
+ for pat in patterns:
17
+ text = re.sub(pat, '***REDACTED***', text)
18
+ return text
19
+
20
+ def generate_pdf_report(findings: List[Dict[str, Any]], ai_summary: Dict[str, Any], output_file: str = "report.pdf", redact: bool = True):
21
+ try:
22
+ from reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph
23
+ except ImportError:
24
+ print("[ERROR] reportlab not installed.")
25
+ return
26
+
27
+ doc = SimpleDocTemplate(output_file, pagesize=A4)
28
+ elements = []
29
+ styles = getSampleStyleSheet()
30
+ title = "Pipeline Sentinel Security Report"
31
+ if redact:
32
+ title += " (Sensitive Data Redacted)"
33
+ elements.append(Paragraph(title, styles['Title']))
34
+ elements.append(Paragraph(f"Generated: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}", styles['Normal']))
35
+
36
+ if ai_summary.get("executive_summary"):
37
+ summary = ai_summary['executive_summary']
38
+ if redact:
39
+ summary = redact_sensitive(summary)
40
+ elements.append(Paragraph("Executive Summary", styles['Heading2']))
41
+ elements.append(Paragraph(summary, styles['Normal']))
42
+ if ai_summary.get("risk_score"):
43
+ elements.append(Paragraph(f"Risk Score: {ai_summary['risk_score']}/100", styles['Normal']))
44
+
45
+ if findings:
46
+ elements.append(Paragraph("Findings", styles['Heading2']))
47
+ table_data = [["Tool", "ID", "Severity", "Target", "Title"]]
48
+ for f in findings[:50]:
49
+ title = f.get('title', '')
50
+ if redact:
51
+ title = redact_sensitive(title)
52
+ table_data.append([
53
+ f.get('tool',''),
54
+ f.get('id',''),
55
+ f.get('severity',''),
56
+ redact_sensitive(f.get('target','')) if redact else f.get('target',''),
57
+ title[:80]
58
+ ])
59
+ t = Table(table_data)
60
+ t.setStyle(TableStyle([
61
+ ('BACKGROUND', (0,0), (-1,0), colors.grey),
62
+ ('TEXTCOLOR',(0,0),(-1,0), colors.whitesmoke),
63
+ ('ALIGN',(0,0),(-1,-1),'CENTER'),
64
+ ('FONTNAME', (0,0), (-1,0), 'Helvetica-Bold'),
65
+ ('BOTTOMPADDING', (0,0), (-1,0), 12),
66
+ ('GRID', (0,0), (-1,-1), 1, colors.black)
67
+ ]))
68
+ elements.append(t)
69
+
70
+ doc.build(elements)
71
+ print(f"[REPORT] PDF saved to {output_file}")
@@ -0,0 +1,37 @@
1
+ import subprocess
2
+ import json
3
+ import os
4
+ from typing import List, Dict, Any, Optional
5
+
6
+ def generate_sbom(target_dir: str, output_file: str = "sbom.json") -> Optional[Dict]:
7
+ try:
8
+ subprocess.run(['syft', 'scan', target_dir, '-o', 'cyclonedx-json', '--output', output_file], check=True)
9
+ with open(output_file) as f:
10
+ return json.load(f)
11
+ except Exception as e:
12
+ print(f"SBOM generation failed: {e}")
13
+ return None
14
+
15
+ def detect_dependency_confusion(manifest_path: str, internal_prefixes: List[str] = None) -> List[Dict]:
16
+ findings = []
17
+ if not internal_prefixes:
18
+ internal_prefixes = ['mycompany-', 'internal-']
19
+ try:
20
+ with open(manifest_path) as f:
21
+ content = f.read()
22
+ import re
23
+ if manifest_path.endswith('package.json'):
24
+ pkg_pattern = re.findall(r'"([^"]+)":\s*"([^"]*)"', content)
25
+ for name, ver in pkg_pattern:
26
+ if any(name.startswith(p) for p in internal_prefixes):
27
+ findings.append({"package": name, "version": ver, "risk": "Potential dependency confusion"})
28
+ elif manifest_path.endswith('requirements.txt'):
29
+ for line in content.splitlines():
30
+ line = line.strip()
31
+ if line and not line.startswith('#'):
32
+ pkg = line.split('==')[0].strip()
33
+ if any(pkg.startswith(p) for p in internal_prefixes):
34
+ findings.append({"package": pkg, "version": line, "risk": "Potential dependency confusion"})
35
+ except Exception:
36
+ pass
37
+ return findings
@@ -0,0 +1,20 @@
1
+ from typing import Dict, Any, Optional
2
+
3
+ def compute_dynamic_risk_score(finding: Dict[str, Any], topology: Optional[Dict[str, Any]] = None) -> float:
4
+ severity_weights = {'CRITICAL': 10.0, 'HIGH': 7.0, 'MEDIUM': 4.0, 'LOW': 1.0}
5
+ base = severity_weights.get(finding.get('severity', 'LOW'), 1.0)
6
+ exposure_mult = 1.0
7
+ if topology:
8
+ target = finding.get('target', '')
9
+ for server in topology.get('servers', []):
10
+ if server.get('name') in target:
11
+ if server.get('exposed', False):
12
+ exposure_mult = 2.5
13
+ if server.get('data_classification') == 'sensitive':
14
+ exposure_mult *= 1.5
15
+ break
16
+ likelihood_mult = 1.0
17
+ if finding.get('exploit_available', False):
18
+ likelihood_mult *= 2.0
19
+ score = base * exposure_mult * likelihood_mult
20
+ return round(min(10.0, score), 1)
@@ -0,0 +1,15 @@
1
+ from typing import List
2
+ from devsecops_radar.plugins import ScannerPlugin
3
+ from devsecops_radar.core.models import FindingSchema
4
+
5
+ class ScannerAdapter:
6
+ def __init__(self, scanner: ScannerPlugin):
7
+ self.scanner = scanner
8
+
9
+ def parse(self, file_path: str) -> List[FindingSchema]:
10
+ raw = self.scanner.parse(file_path)
11
+ return [FindingSchema(**f) for f in raw]
12
+
13
+ def run(self, target: str) -> List[FindingSchema]:
14
+ raw = self.scanner.run(target)
15
+ return [FindingSchema(**f) for f in raw]
@@ -1,7 +1,8 @@
1
- from flask import Blueprint, jsonify, render_template_string
1
+ from flask import Blueprint, jsonify, render_template_string, request
2
2
  import json
3
3
  import os
4
- from devsecops_radar.core.database import get_all_scans
4
+ from devsecops_radar.core.database import get_all_scans, get_findings_paginated
5
+ from devsecops_radar.core.rag import rag_search
5
6
 
6
7
  dashboard_bp = Blueprint('dashboard', __name__)
7
8
 
@@ -408,7 +409,7 @@ DASHBOARD_HTML = r"""
408
409
  </script>
409
410
  </body>
410
411
  </html>
411
- """
412
+ """
412
413
 
413
414
  def load_findings():
414
415
  if not os.path.exists(FINDINGS_FILE):
@@ -427,8 +428,17 @@ def index():
427
428
 
428
429
  @dashboard_bp.route('/api/findings')
429
430
  def api_findings():
430
- return jsonify(load_findings())
431
+ page = request.args.get('page', 1, type=int)
432
+ per_page = request.args.get('per_page', 50, type=int)
433
+ return jsonify(get_findings_paginated(page, per_page))
431
434
 
432
435
  @dashboard_bp.route('/api/history')
433
436
  def api_history():
434
- return jsonify(get_all_scans())
437
+ return jsonify(get_all_scans())
438
+
439
+ @dashboard_bp.route('/api/rag')
440
+ def api_rag():
441
+ q = request.args.get('q', '')
442
+ if not q:
443
+ return jsonify([])
444
+ return jsonify(rag_search(q))
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: devsecops-radar
3
- Version: 0.2.5
3
+ Version: 0.2.6
4
4
  Summary: Unified CI/CD Security Dashboard — Pipeline Sentinel
5
5
  Author-email: Mehrdoost <70381337+Mehrdoost@users.noreply.github.com>
6
6
  License-Expression: MIT
@@ -21,6 +21,7 @@ Requires-Dist: loguru>=0.7
21
21
  Requires-Dist: reportlab>=4.0
22
22
  Requires-Dist: litellm>=1.50
23
23
  Requires-Dist: sqlalchemy>=2.0
24
+ Requires-Dist: pydantic>=2.0
24
25
  Dynamic: license-file
25
26
 
26
27
  <!-- markdownlint-disable MD033 MD041 -->
@@ -13,15 +13,18 @@ devsecops_radar/cli/__init__.py
13
13
  devsecops_radar/cli/scanner.py
14
14
  devsecops_radar/core/__init__.py
15
15
  devsecops_radar/core/analyzer.py
16
+ devsecops_radar/core/attack_simulation.py
16
17
  devsecops_radar/core/database.py
17
18
  devsecops_radar/core/models.py
18
19
  devsecops_radar/core/parser.py
20
+ devsecops_radar/core/rag.py
19
21
  devsecops_radar/core/remediation.py
20
22
  devsecops_radar/core/reporting.py
21
23
  devsecops_radar/core/rule_fusion.py
22
24
  devsecops_radar/core/sbom.py
23
25
  devsecops_radar/core/valuation.py
24
26
  devsecops_radar/plugins/__init__.py
27
+ devsecops_radar/scanners/adapter.py
25
28
  devsecops_radar/scanners/base.py
26
29
  devsecops_radar/scanners/poutine.py
27
30
  devsecops_radar/scanners/semgrep.py
@@ -6,3 +6,4 @@ loguru>=0.7
6
6
  reportlab>=4.0
7
7
  litellm>=1.50
8
8
  sqlalchemy>=2.0
9
+ pydantic>=2.0
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "devsecops-radar"
7
- version = "0.2.5"
7
+ version = "0.2.6"
8
8
  description = "Unified CI/CD Security Dashboard — Pipeline Sentinel"
9
9
  readme = "README.md"
10
10
  license = "MIT"
@@ -27,6 +27,7 @@ dependencies = [
27
27
  "reportlab>=4.0",
28
28
  "litellm>=1.50",
29
29
  "sqlalchemy>=2.0",
30
+ "pydantic>=2.0",
30
31
  ]
31
32
 
32
33
  [project.urls]
@@ -1,92 +0,0 @@
1
- from devsecops_radar.core.models import (
2
- init_db, SessionLocal, Scan, Finding
3
- )
4
- from typing import List, Dict, Any, Optional
5
-
6
- def save_scan(findings: List[Dict[str, Any]]):
7
- from devsecops_radar.core.models import save_scan_to_db
8
- save_scan_to_db(findings)
9
-
10
- def get_all_scans() -> List[Dict[str, Any]]:
11
- init_db()
12
- session = SessionLocal()
13
- scans = []
14
- for scan in session.query(Scan).order_by(Scan.timestamp.asc()).all():
15
- findings = session.query(Finding).filter(Finding.scan_id == scan.id).all()
16
- counts = {"CRITICAL": 0, "HIGH": 0, "MEDIUM": 0, "LOW": 0}
17
- for f in findings:
18
- sev = f.severity.upper() if f.severity else "UNKNOWN"
19
- counts[sev] = counts.get(sev, 0) + 1
20
- scans.append({
21
- "id": scan.id,
22
- "timestamp": scan.timestamp.isoformat(),
23
- "total": len(findings),
24
- "critical": counts["CRITICAL"],
25
- "high": counts["HIGH"],
26
- "medium": counts["MEDIUM"],
27
- "low": counts["LOW"],
28
- })
29
- session.close()
30
- return scans
31
-
32
- def get_scan_by_id(scan_id: int) -> Optional[Dict[str, Any]]:
33
- session = SessionLocal()
34
- scan = session.query(Scan).filter(Scan.id == scan_id).first()
35
- if not scan:
36
- session.close()
37
- return None
38
- findings = session.query(Finding).filter(Finding.scan_id == scan_id).all()
39
- findings_list = []
40
- for f in findings:
41
- findings_list.append({
42
- "tool": f.tool,
43
- "id": f.id,
44
- "severity": f.severity,
45
- "target": f.target,
46
- "title": f.title,
47
- "description": f.description,
48
- "line": f.line
49
- })
50
- session.close()
51
- return {
52
- "id": scan.id,
53
- "timestamp": scan.timestamp.isoformat(),
54
- "findings": findings_list,
55
- "total": len(findings_list)
56
- }
57
-
58
- def compare_scans(scan_id_1: int, scan_id_2: int) -> Dict[str, Any]:
59
- scan1 = get_scan_by_id(scan_id_1)
60
- scan2 = get_scan_by_id(scan_id_2)
61
- if not scan1 or not scan2:
62
- return {"error": "One or both scans not found"}
63
- ids1 = {f.get("id") for f in scan1["findings"]}
64
- ids2 = {f.get("id") for f in scan2["findings"]}
65
- added = [f for f in scan2["findings"] if f.get("id") not in ids1]
66
- removed = [f for f in scan1["findings"] if f.get("id") not in ids2]
67
- return {
68
- "scan1": {"id": scan1["id"], "timestamp": scan1["timestamp"], "total": scan1["total"]},
69
- "scan2": {"id": scan2["id"], "timestamp": scan2["timestamp"], "total": scan2["total"]},
70
- "added": len(added),
71
- "removed": len(removed),
72
- "unchanged": len(scan1["findings"]) - len(removed),
73
- "added_findings": added,
74
- "removed_findings": removed,
75
- }
76
-
77
- def get_findings_by_severity(severity: str, limit: int = 100) -> List[Dict[str, Any]]:
78
- session = SessionLocal()
79
- findings = session.query(Finding).filter(Finding.severity == severity.upper()).limit(limit).all()
80
- result = []
81
- for f in findings:
82
- result.append({
83
- "tool": f.tool,
84
- "id": f.id,
85
- "severity": f.severity,
86
- "target": f.target,
87
- "title": f.title,
88
- "description": f.description,
89
- "line": f.line
90
- })
91
- session.close()
92
- return result
@@ -1,55 +0,0 @@
1
- import json
2
- import os
3
- from datetime import datetime
4
- from typing import List, Dict, Any
5
-
6
- def generate_pdf_report(findings: List[Dict[str, Any]], ai_summary: Dict[str, Any], output_file: str = "report.pdf"):
7
- try:
8
- from reportlab.lib.pagesizes import A4
9
- from reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph
10
- from reportlab.lib.styles import getSampleStyleSheet
11
- from reportlab.lib import colors
12
- except ImportError:
13
- print("[ERROR] reportlab not installed. Install with 'pip install reportlab'")
14
- return
15
-
16
- doc = SimpleDocTemplate(output_file, pagesize=A4)
17
- elements = []
18
- styles = getSampleStyleSheet()
19
-
20
- # Title
21
- elements.append(Paragraph("Pipeline Sentinel Security Report", styles['Title']))
22
- elements.append(Paragraph(f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}", styles['Normal']))
23
-
24
- # Executive Summary
25
- if ai_summary.get("executive_summary"):
26
- elements.append(Paragraph("Executive Summary", styles['Heading2']))
27
- elements.append(Paragraph(ai_summary['executive_summary'], styles['Normal']))
28
- if ai_summary.get("risk_score"):
29
- elements.append(Paragraph(f"Risk Score: {ai_summary['risk_score']}/100", styles['Normal']))
30
-
31
- # Findings Table
32
- if findings:
33
- elements.append(Paragraph("Findings", styles['Heading2']))
34
- table_data = [["Tool", "ID", "Severity", "Target", "Title"]]
35
- for f in findings[:50]: # limit rows
36
- table_data.append([
37
- f.get('tool',''),
38
- f.get('id',''),
39
- f.get('severity',''),
40
- f.get('target',''),
41
- f.get('title','')[:80]
42
- ])
43
- t = Table(table_data)
44
- t.setStyle(TableStyle([
45
- ('BACKGROUND', (0,0), (-1,0), colors.grey),
46
- ('TEXTCOLOR',(0,0),(-1,0), colors.whitesmoke),
47
- ('ALIGN',(0,0),(-1,-1),'CENTER'),
48
- ('FONTNAME', (0,0), (-1,0), 'Helvetica-Bold'),
49
- ('BOTTOMPADDING', (0,0), (-1,0), 12),
50
- ('GRID', (0,0), (-1,-1), 1, colors.black)
51
- ]))
52
- elements.append(t)
53
-
54
- doc.build(elements)
55
- print(f"[REPORT] PDF saved to {output_file}")
@@ -1,27 +0,0 @@
1
- import subprocess
2
- import json
3
- import os
4
-
5
- def generate_sbom(target_dir: str, output_file: str = "sbom.json"):
6
- try:
7
- subprocess.run(['syft', 'scan', target_dir, '-o', 'cyclonedx-json', '--output', output_file], check=True)
8
- with open(output_file) as f:
9
- sbom = json.load(f)
10
- return sbom
11
- except Exception as e:
12
- print(f"SBOM generation failed: {e}")
13
- return None
14
-
15
- def sbom_health(sbom: dict) -> dict:
16
- components = sbom.get('components', [])
17
- total = len(components)
18
- outdated = 0
19
- for comp in components:
20
- if comp.get('version', '').endswith('-SNAPSHOT'):
21
- outdated += 1
22
- return {
23
- "total_components": total,
24
- "outdated": outdated,
25
- "healthy": total - outdated,
26
- "health_percent": round((total - outdated) / total * 100, 1) if total else 0
27
- }
@@ -1,10 +0,0 @@
1
- def compute_fix_value(finding: dict, topology: dict = None) -> float:
2
- weights = {'CRITICAL': 100, 'HIGH': 70, 'MEDIUM': 40, 'LOW': 10}
3
- score = weights.get(finding.get('severity', 'LOW'), 10)
4
- if topology:
5
- target = finding.get('target', '')
6
- for server in topology.get('servers', []):
7
- if target in server.get('name', ''):
8
- score *= (1 + server.get('importance', 0.5))
9
- break
10
- return round(score, 2)
File without changes